From 8752c6cbb361f3609dd40ec11f482a540f9fb72b Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Tue, 28 May 2024 15:17:39 +0000 Subject: [PATCH 001/356] Add support for ```google_cloudfunctions_function``` to TGC (#10805) --- .../tgc/cloudfunctions_cloud_function.go | 20 ++++++ ...xample_google_cloudfunctions_function.json | 72 ++++++++++++------- .../example_google_cloudfunctions_function.tf | 37 +++++----- 3 files changed, 83 insertions(+), 46 deletions(-) diff --git a/mmv1/third_party/tgc/cloudfunctions_cloud_function.go b/mmv1/third_party/tgc/cloudfunctions_cloud_function.go index dbc8f4f9bc27..7e5fb205cfc8 100644 --- a/mmv1/third_party/tgc/cloudfunctions_cloud_function.go +++ b/mmv1/third_party/tgc/cloudfunctions_cloud_function.go @@ -130,6 +130,18 @@ func GetCloudFunctionsCloudFunctionApiObject(d tpgresource.TerraformResourceData } else if v, ok := d.GetOkExists("trigger_http"); !tpgresource.IsEmptyValue(reflect.ValueOf(trigger_httpProp)) && (ok || !reflect.DeepEqual(v, trigger_httpProp)) { obj["trigger_http"] = trigger_httpProp } + vpcConnectorProp, err := expandCloudFunctionsCloudFunctionvpcConnector(d.Get("vpc_connector"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("vpc_connector"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcConnectorProp)) && (ok || !reflect.DeepEqual(v, vpcConnectorProp)) { + obj["vpcConnector"] = vpcConnectorProp + } + vpcConnectorEgressSettingsProp, err := expandCloudFunctionsCloudFunctionvpcConnectorEgressSettings(d.Get("vpc_connector_egress_settings"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("vpc_connector_egress_settings"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcConnectorEgressSettingsProp)) && (ok || !reflect.DeepEqual(v, vpcConnectorEgressSettingsProp)) { + obj["vpcConnectorEgressSettings"] = vpcConnectorEgressSettingsProp + } return obj, nil } @@ -278,3 +290,11 @@ func expandCloudFunctionsCloudFunctionRegion(v interface{}, d tpgresource.Terraf func expandCloudFunctionsCloudFunctionTriggerHttp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return v, nil } + +func expandCloudFunctionsCloudFunctionvpcConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCloudFunctionsCloudFunctionvpcConnectorEgressSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json b/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json index 4cbebd01b0aa..023ace64ebc9 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json +++ b/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.json @@ -1,29 +1,51 @@ [ - { - "name": "//cloudfunctions.googleapis.com/projects/{{.Provider.project}}/locations/australia-southeast1/functions/function-test", - "asset_type": "cloudfunctions.googleapis.com/CloudFunction", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/cloudfunctions/v1/rest", - "discovery_name": "CloudFunction", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "availableMemoryMb": 128, - "description": "My function", - "entryPoint": "helloGCS", - "environmentVariables": { - "MY_ENV_VAR": "my-env-var-value" + { + "name": "//cloudfunctions.googleapis.com/projects/{{.Provider.project}}/locations/us-east1/functions/my-cf", + "asset_type": "cloudfunctions.googleapis.com/CloudFunction", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/cloudfunctions/v1/rest", + "discovery_name": "CloudFunction", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "availableMemoryMb": 128, + "description": "My CloudFunction", + "entryPoint": "helloGCS", + "environmentVariables": { + "MY_CF_ENV": "my-cf-env" + }, + "labels": { + "my-cf-label-value": "my-cf-label-value" + }, + "location": "us-east1", + "name": "my-cf", + "runtime": "nodejs14", + "timeout": 60, + "trigger_http": true, + "vpcConnector": "vpc-con-cf", + "vpcConnectorEgressSettings": "PRIVATE_RANGES_ONLY" + } }, - "labels": { - "my-label": "my-label-value" + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] + }, + { + "name": "//vpcaccess.googleapis.com/projects/{{.Provider.project}}/locations/us-east1/connectors/vpc-con-cf", + "asset_type": "vpcaccess.googleapis.com/Connector", + "resource": { + "version": "v1beta1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/vpcaccess/v1beta1/rest", + "discovery_name": "Connector", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "ipCidrRange": "10.8.0.0/28", + "machineType": "e2-micro", + "maxThroughput": 300, + "minThroughput": 200, + "network": "default" + } }, - "location": "australia-southeast1", - "name": "function-test", - "runtime": "nodejs14", - "timeout": 60, - "trigger_http": true - } + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] } - } -] +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.tf b/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.tf index 4b544666cdab..f55bed2e0c9a 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.tf +++ b/mmv1/third_party/tgc/tests/data/example_google_cloudfunctions_function.tf @@ -1,19 +1,3 @@ -/** - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - terraform { required_providers { google = { @@ -27,9 +11,16 @@ provider "google" { {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} } +resource "google_vpc_access_connector" "connector" { + name = "vpc-con-cf" + ip_cidr_range = "10.8.0.0/28" + network = "default" + region = "us-east1" +} + resource "google_cloudfunctions_function" "function" { - name = "function-test" - description = "My function" + name = "my-cf" + description = "My CloudFunction" runtime = "nodejs14" available_memory_mb = 128 @@ -39,12 +30,16 @@ resource "google_cloudfunctions_function" "function" { timeout = 60 entry_point = "helloGCS" labels = { - my-label = "my-label-value" + my-cf-label-value = "my-cf-label-value" } + ingress_settings = "ALLOW_INTERNAL_ONLY" + vpc_connector = google_vpc_access_connector.connector.name + vpc_connector_egress_settings = "PRIVATE_RANGES_ONLY" + environment_variables = { - MY_ENV_VAR = "my-env-var-value" + MY_CF_ENV = "my-cf-env" } - region = "australia-southeast1" + region = "us-east1" } From f9109ceb7552145f97041d7e10de8e58502d6e3a Mon Sep 17 00:00:00 2001 From: roop2 <161707562+roop2@users.noreply.github.com> Date: Tue, 28 May 2024 20:51:34 +0530 Subject: [PATCH 002/356] =?UTF-8?q?Fixing=20the=20text=20on=20the=20netapp?= =?UTF-8?q?=20volume=20storage=20pool=20resource=20for=20quicks=E2=80=A6?= =?UTF-8?q?=20(#10808)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- mmv1/products/netapp/storagePool.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/products/netapp/storagePool.yaml b/mmv1/products/netapp/storagePool.yaml index 2f9ee57296b3..529d2ea5aaae 100644 --- a/mmv1/products/netapp/storagePool.yaml +++ b/mmv1/products/netapp/storagePool.yaml @@ -27,7 +27,7 @@ description: | independent of consumption at the volume level. references: !ruby/object:Api::Resource::ReferenceLinks guides: - 'QUICKSTART_TITLE': 'https://cloud.google.com/netapp/volumes/docs/get-started/quickstarts/create-storage-pool' + 'Quickstart documentation': 'https://cloud.google.com/netapp/volumes/docs/get-started/quickstarts/create-storage-pool' api: 'https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.storagePools' base_url: projects/{{project}}/locations/{{location}}/storagePools self_link: projects/{{project}}/locations/{{location}}/storagePools/{{name}} @@ -49,7 +49,7 @@ parameters: immutable: true url_param_only: true description: | - Name of the location. Usually a region name, expect for some STANDARD service level pools which require a zone name. + Name of the location. Usually a region name, expect for some FLEX service level pools which require a zone name. - !ruby/object:Api::Type::String name: 'name' description: From 71bb70818eff5d9b038e97fb755a0212f369659c Mon Sep 17 00:00:00 2001 From: varsharmavs Date: Tue, 28 May 2024 15:47:48 +0000 Subject: [PATCH 003/356] Updated Description for parent. Fix for b/341928138 (#10763) Co-authored-by: Nick Elliot --- mmv1/products/privilegedaccessmanager/Entitlement.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/privilegedaccessmanager/Entitlement.yaml b/mmv1/products/privilegedaccessmanager/Entitlement.yaml index 06991b2c2426..3baf807ce452 100644 --- a/mmv1/products/privilegedaccessmanager/Entitlement.yaml +++ b/mmv1/products/privilegedaccessmanager/Entitlement.yaml @@ -258,7 +258,7 @@ parameters: required: true url_param_only: true description: | - Format: project/{project_id} or organization/{organization_number} or folder/{folder_number} + Format: projects/{project-id|project-number} or organizations/{organization-number} or folders/{folder-number} custom_code: !ruby/object:Provider::Terraform::CustomCode pre_update: templates/terraform/pre_update/privileged_access_manager_entitlement.go.erb constants: templates/terraform/constants/privileged_access_manager_entitlement.go.erb From e2197af707501c5a1acbd53eb6549999d88029c8 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Tue, 28 May 2024 16:57:48 +0100 Subject: [PATCH 004/356] TeamCity : Define execution mode of post VCR step (#10800) --- .../terraform/.teamcity/components/builds/vcr_build_steps.kt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt b/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt index 05bcb261e901..c6ed9059b546 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/vcr_build_steps.kt @@ -7,6 +7,7 @@ package builds +import jetbrains.buildServer.configs.kotlin.BuildStep import jetbrains.buildServer.configs.kotlin.BuildSteps import jetbrains.buildServer.configs.kotlin.buildSteps.ScriptBuildStep @@ -140,6 +141,7 @@ fun BuildSteps.runVcrTestRecordingSetup() { fun BuildSteps.runVcrTestRecordingSaveCassettes() { step(ScriptBuildStep { name = "Tasks after running VCR tests: if in RECORDING mode, push new cassettes to GCS" + executionMode = BuildStep.ExecutionMode.RUN_ON_FAILURE scriptContent = """ #!/bin/bash echo "VCR Testing: Post-test steps" From a244eb757d35e51a83609c7d0c3dd272f1d1b26d Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Tue, 28 May 2024 19:05:03 +0000 Subject: [PATCH 005/356] Add support for ```google_dataproc_cluster``` to TGC (#10803) --- mmv1/provider/terraform_tgc.rb | 4 +- mmv1/templates/tgc/resource_converters.go.erb | 1 + mmv1/third_party/tgc/dataproc_cluster.go | 1176 +++++++++++++++++ ...le_google_dataproc_autoscaling_policy.json | 40 +- .../data/example_google_dataproc_cluster.json | 68 + .../data/example_google_dataproc_cluster.tf | 60 + 6 files changed, 1344 insertions(+), 5 deletions(-) create mode 100644 mmv1/third_party/tgc/dataproc_cluster.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index e493f4ea58d0..6355a3fdd9aa 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -313,7 +313,9 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/service_account_key.go', 'third_party/tgc/service_account_key.go'], ['converters/google/resources/compute_target_pool.go', - 'third_party/tgc/compute_target_pool.go'] + 'third_party/tgc/compute_target_pool.go'], + ['converters/google/resources/dataproc_cluster.go', + 'third_party/tgc/dataproc_cluster.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index e19b984b91b6..12e0a514ba4c 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -66,6 +66,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_compute_target_pool": {resourceConverterComputeTargetPool()}, "google_dataflow_job": {resourceDataflowJob()}, "google_dataproc_autoscaling_policy": {dataproc.ResourceConverterDataprocAutoscalingPolicy()}, + "google_dataproc_cluster": {resourceConverterDataprocCluster()}, "google_dns_managed_zone": {dns.ResourceConverterDNSManagedZone()}, "google_dns_policy": {dns.ResourceConverterDNSPolicy()}, "google_kms_key_ring_import_job": {kms.ResourceConverterKMSKeyRingImportJob()}, diff --git a/mmv1/third_party/tgc/dataproc_cluster.go b/mmv1/third_party/tgc/dataproc_cluster.go new file mode 100644 index 000000000000..e33b8134617a --- /dev/null +++ b/mmv1/third_party/tgc/dataproc_cluster.go @@ -0,0 +1,1176 @@ +package google + +import ( + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const DataprocClusterAssetType string = "dataproc.googleapis.com/Cluster" + +func resourceConverterDataprocCluster() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: DataprocClusterAssetType, + Convert: GetDataprocClusterCaiObject, + } +} + +func GetDataprocClusterCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//compute.googleapis.com/projects/{{project}}/regions/{{region}}/clusters/{{name}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetDataprocClusterApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: DataprocClusterAssetType, + Resource: &cai.AssetResource{ + Version: "v1", + DiscoveryDocumentURI: "https://dataproc.googleapis.com/$discovery/rest?version=v1", + DiscoveryName: "Cluster", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetDataprocClusterApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + projectIdProp, err := expandDataprocClusterProjectId(d.Get("project"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(projectIdProp)) && (ok || !reflect.DeepEqual(v, projectIdProp)) { + obj["projectId"] = projectIdProp + } + + clusterNameProp, err := expandDataprocClusterName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(clusterNameProp)) && (ok || !reflect.DeepEqual(v, clusterNameProp)) { + obj["clusterName"] = clusterNameProp + } + + configProp, err := expandDataprocClusterConfig(d.Get("cluster_config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("cluster_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + obj["config"] = configProp + } + + virtualClusterConfigProp, err := expandDataprocVirtualClusterConfig(d.Get("virtual_cluster_config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("virtual_cluster_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(virtualClusterConfigProp)) && (ok || !reflect.DeepEqual(v, virtualClusterConfigProp)) { + obj["virtualClusterConfig"] = virtualClusterConfigProp + } + + labelsProp, err := expandDataprocClusterLabels(d.Get("effective_labels"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + return obj, nil +} + +func expandDataprocClusterProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConfigBucket, err := expandDataprocClusterConfigBucket(original["staging_bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConfigBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["configBucket"] = transformedConfigBucket + } + + transformedTempBucket, err := expandDataprocClusterTempBucket(original["temp_bucket"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTempBucket); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tempBucket"] = transformedTempBucket + } + + transformedGceClusterConfig, err := expandDataprocClusterConfigGceClusterConfig(original["gce_cluster_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["gceClusterConfig"] = transformedGceClusterConfig + } + + transformedMasterConfig, err := expandDataprocClusterConfigMasterConfig(original["master_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["masterConfig"] = transformedMasterConfig + } + + transformedWorkerConfig, err := expandDataprocClusterConfigWorkerConfig(original["worker_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["workerConfig"] = transformedWorkerConfig + } + + transformedSecondaryWorkerConfig, err := expandDataprocClusterConfigSecondaryWorkerConfig(original["preemptible_worker_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["secondaryWorkerConfig"] = transformedSecondaryWorkerConfig + } + + transformedSoftwareConfig, err := expandDataprocClusterConfigSoftwareConfig(original["software_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["softwareConfig"] = transformedSoftwareConfig + } + + transformedSecurityConfig, err := expandDataprocClusterConfigSecurityConfig(original["security_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["securityConfig"] = transformedSecurityConfig + } + + transformedAutoscalingConfig, err := expandDataprocClusterConfigAutoscalingConfig(original["autoscaling_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["autoscalingConfig"] = transformedAutoscalingConfig + } + + transformedNodeInitializationAction, err := expandDataprocClusterConfigNodeInitializationAction(original["initialization_action"], d, config) + if err != nil { + return nil, err + } else { + transformed["initializationActions"] = transformedNodeInitializationAction + } + + transformedEncryptionConfig, err := expandDataprocClusterConfigEncryptionConfig(original["encryption_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["encryptionConfig"] = transformedEncryptionConfig + } + + transformedLifecycleConfig, err := expandDataprocClusterConfigLifecycleConfig(original["lifecycle_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["lifecycleConfig"] = transformedLifecycleConfig + } + + transformedEndpointConfig, err := expandDataprocClusterConfigEndpointConfig(original["endpoint_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["endpointConfig"] = transformedEndpointConfig + } + + transformedDataprocMetricConfig, err := expandDataprocClusterConfigDataprocMetricConfig(original["dataproc_metric_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["dataprocMetricConfig"] = transformedDataprocMetricConfig + } + + transformedAuxiliaryNodeGroups, err := expandDataprocClusterConfigAuxiliaryNodeGroups(original["auxiliary_node_groups"], d, config) + if err != nil { + return nil, err + } else { + transformed["auxiliaryNodeGroups"] = transformedAuxiliaryNodeGroups + } + + transformedMetastoreConfig, err := expandDataprocClusterConfigMetastoreConfig(original["metastore_config"], d, config) + if err != nil { + return nil, err + } else { + transformed["metastoreConfig"] = transformedMetastoreConfig + } + + return transformed, nil +} + +func expandDataprocClusterConfigBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterTempBucket(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedZone, err := expandDataprocClusterConfigGceClusterConfigZone(original["zone"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zoneUri"] = transformedZone + } + + transformedNetwork, err := expandDataprocClusterConfigGceClusterConfigNetwork(original["network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["networkUri"] = transformedNetwork + } + + transformedSubnetwork, err := expandDataprocClusterConfigGceClusterConfigSubnetwork(original["subnetwork"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnetworkUri"] = transformedSubnetwork + } + + transformedServiceAccount, err := expandDataprocClusterConfigGceClusterConfigServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + transformedServiceAccountScopes, err := expandDataprocClusterConfigGceClusterConfigServiceAccountScopes(original["service_account_scopes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccountScopes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccountScopes"] = transformedServiceAccountScopes + } + + transformedTags, err := expandDataprocClusterConfigGceClusterConfigTags(original["tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tags"] = transformedTags + } + + transformedInternalIpOnly, err := expandDataprocClusterConfigGceClusterConfigInternalIpOnly(original["internal_ip_only"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInternalIpOnly); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["internalIpOnly"] = transformedInternalIpOnly + } + + transformedMetadata, err := expandDataprocClusterConfigGceClusterConfigMetadata(original["metadata"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetadata); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metadata"] = transformedMetadata + } + + transformedReservationAffinity, err := expandDataprocClusterConfigGceClusterConfigReservationAffinity(original["reservation_affinity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedReservationAffinity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["reservationAffinity"] = transformedReservationAffinity + } + + transformedNodeGroupAffinity, err := expandDataprocClusterConfigGceClusterConfigNodeGroupAffinity(original["node_group_affinity"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeGroupAffinity); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeGroupAffinity"] = transformedNodeGroupAffinity + } + + transformedShieldedInstanceConfig, err := expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfig(original["shielded_instance_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShieldedInstanceConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shieldedInstanceConfig"] = transformedShieldedInstanceConfig + } + + return transformed, nil +} + +func expandDataprocClusterConfigGceClusterConfigZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigServiceAccountScopes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigInternalIpOnly(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigMetadata(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocClusterConfigGceClusterConfigReservationAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConsumeReservationType, err := expandDataprocClusterConfigGceClusterConfigReservationAffinityConsumeReservationType(original["consume_reservation_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConsumeReservationType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["consumeReservationType"] = transformedConsumeReservationType + } + + transformedKey, err := expandDataprocClusterConfigGceClusterConfigReservationAffinityKey(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKey); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedKey + } + + transformedValues, err := expandDataprocClusterConfigGceClusterConfigReservationAffinityValues(original["values"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValues); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["values"] = transformedValues + } + + return transformed, nil +} + +func expandDataprocClusterConfigGceClusterConfigReservationAffinityConsumeReservationType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigReservationAffinityKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigReservationAffinityValues(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigNodeGroupAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeGroupUri, err := expandDataprocClusterConfigGceClusterConfigNodeGroupAffinityNodeGroupUri(original["node_group_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeGroupUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeGroupUri"] = transformedNodeGroupUri + } + + return transformed, nil +} + +func expandDataprocClusterConfigGceClusterConfigNodeGroupAffinityNodeGroupUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnableSecureBoot, err := expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfigEnableSecureBoot(original["enable_secure_boot"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableSecureBoot); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableSecureBoot"] = transformedEnableSecureBoot + } + + transformedEnableVtpm, err := expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfigEnableVtpm(original["enable_vtpm"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableVtpm); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableVtpm"] = transformedEnableVtpm + } + + transformedEnableIntegrityMonitoring, err := expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfigEnableIntegrityMonitoring(original["enable_integrity_monitoring"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableIntegrityMonitoring); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableIntegrityMonitoring"] = transformedEnableIntegrityMonitoring + } + + return transformed, nil +} + +func expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfigEnableSecureBoot(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfigEnableVtpm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigGceClusterConfigShieldedInstanceConfigEnableIntegrityMonitoring(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNumInstances, err := expandDataprocClusterConfigMasterConfigNumInstances(original["num_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numInstances"] = transformedNumInstances + } + + transformedMachineType, err := expandDataprocClusterConfigMasterConfigMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + transformedMinCpuPlatform, err := expandDataprocClusterConfigMasterConfigMinCpuPlatform(original["min_cpu_platform"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinCpuPlatform); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minCpuPlatform"] = transformedMinCpuPlatform + } + + transformedImageUri, err := expandDataprocClusterConfigMasterConfigImageUri(original["image_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageUri"] = transformedImageUri + } + + transformedDiskConfig, err := expandDataprocClusterConfigMasterConfigDiskConfig(original["disk_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskConfig"] = transformedDiskConfig + } + + transformedAcceleratorConfig, err := expandDataprocClusterConfigMasterConfigAcceleratorConfig(original["accelerators"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accelerators"] = transformedAcceleratorConfig + } + + return transformed, nil +} + +func expandDataprocClusterConfigMasterConfigNumInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigMinCpuPlatform(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigImageUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigDiskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBootDiskType, err := expandDataprocClusterConfigMasterConfigDiskConfigBootDiskType(original["boot_disk_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBootDiskType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bootDiskType"] = transformedBootDiskType + } + + transformedBootDiskSizeGb, err := expandDataprocClusterConfigMasterConfigDiskConfigBootDiskSizeGb(original["boot_disk_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBootDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bootDiskSizeGb"] = transformedBootDiskSizeGb + } + + transformedNumLocalSsds, err := expandDataprocClusterConfigMasterConfigDiskConfigNumLocalSsds(original["num_local_ssds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumLocalSsds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numLocalSsds"] = transformedNumLocalSsds + } + + transformedLocalSsdInterface, err := expandDataprocClusterConfigMasterConfigDiskConfigLocalSsdInterface(original["local_ssd_interface"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLocalSsdInterface); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["localSsdInterface"] = transformedLocalSsdInterface + } + + return transformed, nil +} + +func expandDataprocClusterConfigMasterConfigDiskConfigBootDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigDiskConfigBootDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigDiskConfigNumLocalSsds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigDiskConfigLocalSsdInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigAcceleratorConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAcceleratorTypeUri, err := expandDataprocClusterConfigMasterConfigAcceleratorConfigAcceleratorTypeUri(original["accelerator_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorTypeUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorTypeUri"] = transformedAcceleratorTypeUri + } + + transformedAcceleratorCount, err := expandDataprocClusterConfigMasterConfigAcceleratorConfigAcceleratorCount(original["accelerator_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorCount"] = transformedAcceleratorCount + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandDataprocClusterConfigMasterConfigAcceleratorConfigAcceleratorTypeUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigMasterConfigAcceleratorConfigAcceleratorCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNumInstances, err := expandDataprocClusterConfigWorkerConfigNumInstances(original["num_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numInstances"] = transformedNumInstances + } + + transformedMachineType, err := expandDataprocClusterConfigWorkerConfigMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + transformedMinCpuPlatform, err := expandDataprocClusterConfigWorkerConfigMinCpuPlatform(original["min_cpu_platform"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinCpuPlatform); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minCpuPlatform"] = transformedMinCpuPlatform + } + + transformedMinNumInstances, err := expandDataprocClusterConfigWorkerConfigMinNumInstances(original["min_num_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinNumInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minNumInstances"] = transformedMinNumInstances + } + + transformedImageUri, err := expandDataprocClusterConfigWorkerConfigImageUri(original["image_uri"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageUri"] = transformedImageUri + } + + transformedDiskConfig, err := expandDataprocClusterConfigWorkerConfigDiskConfig(original["disk_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskConfig"] = transformedDiskConfig + } + + transformedAcceleratorConfig, err := expandDataprocClusterConfigWorkerConfigAcceleratorConfig(original["accelerators"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accelerators"] = transformedAcceleratorConfig + } + + return transformed, nil +} + +func expandDataprocClusterConfigWorkerConfigNumInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigMinCpuPlatform(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigMinNumInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigImageUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigDiskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBootDiskType, err := expandDataprocClusterConfigWorkerConfigDiskConfigBootDiskType(original["boot_disk_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBootDiskType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bootDiskType"] = transformedBootDiskType + } + + transformedBootDiskSizeGb, err := expandDataprocClusterConfigWorkerConfigDiskConfigBootDiskSizeGb(original["boot_disk_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBootDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bootDiskSizeGb"] = transformedBootDiskSizeGb + } + + transformedNumLocalSsds, err := expandDataprocClusterConfigWorkerConfigDiskConfigNumLocalSsds(original["num_local_ssds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumLocalSsds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numLocalSsds"] = transformedNumLocalSsds + } + + return transformed, nil +} + +func expandDataprocClusterConfigWorkerConfigDiskConfigBootDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigDiskConfigBootDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigDiskConfigNumLocalSsds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigAcceleratorConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAcceleratorTypeUri, err := expandDataprocClusterConfigWorkerConfigAcceleratorConfigAcceleratorTypeUri(original["accelerator_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorTypeUri); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorTypeUri"] = transformedAcceleratorTypeUri + } + + transformedAcceleratorCount, err := expandDataprocClusterConfigWorkerConfigAcceleratorConfigAcceleratorCount(original["accelerator_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorCount"] = transformedAcceleratorCount + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandDataprocClusterConfigWorkerConfigAcceleratorConfigAcceleratorTypeUri(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigWorkerConfigAcceleratorConfigAcceleratorCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNumInstances, err := expandDataprocClusterConfigSecondaryWorkerConfigNumInstances(original["num_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numInstances"] = transformedNumInstances + } + + transformedPreemptibility, err := expandDataprocClusterConfigSecondaryWorkerConfigPreemptibility(original["preemptibility"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPreemptibility); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["preemptibility"] = transformedPreemptibility + } + + transformedDiskConfig, err := expandDataprocClusterConfigSecondaryWorkerConfigDiskConfig(original["disk_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskConfig"] = transformedDiskConfig + } + + transformedInstanceFlexibilityPolicy, err := expandDataprocClusterConfigSecondaryWorkerConfigInstanceFlexibilityPolicy(original["instance_flexibility_policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstanceFlexibilityPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instanceFlexibilityPolicy"] = transformedInstanceFlexibilityPolicy + } + + return transformed, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigNumInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigPreemptibility(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigDiskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBootDiskType, err := expandDataprocClusterConfigSecondaryWorkerConfigDiskConfigBootDiskType(original["boot_disk_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBootDiskType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bootDiskType"] = transformedBootDiskType + } + + transformedBootDiskSizeGb, err := expandDataprocClusterConfigSecondaryWorkerConfigDiskConfigBootDiskSizeGb(original["boot_disk_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBootDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["bootDiskSizeGb"] = transformedBootDiskSizeGb + } + + transformedNumLocalSsds, err := expandDataprocClusterConfigSecondaryWorkerConfigDiskConfigNumLocalSsds(original["num_local_ssds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumLocalSsds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numLocalSsds"] = transformedNumLocalSsds + } + + return transformed, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigDiskConfigBootDiskType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigDiskConfigBootDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigDiskConfigNumLocalSsds(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSecondaryWorkerConfigInstanceFlexibilityPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSoftwareConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedImageVersion, err := expandDataprocClusterConfigSoftwareConfigImageVersion(original["image_version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImageVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["imageVersion"] = transformedImageVersion + } + + transformedProperties, err := expandDataprocClusterConfigSoftwareConfigProperties(original["override_properties"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedProperties); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["properties"] = transformedProperties + } + + transformedOptionalComponents, err := expandDataprocClusterConfigSoftwareConfigOptionalComponents(original["optional_components"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOptionalComponents); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["optionalComponents"] = transformedOptionalComponents + } + + return transformed, nil +} + +func expandDataprocClusterConfigSoftwareConfigImageVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigSoftwareConfigProperties(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocClusterConfigSoftwareConfigOptionalComponents(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandDataprocClusterConfigSecurityConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAutoscalingConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigNodeInitializationAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigLifecycleConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigEndpointConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigDataprocMetricConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetrics, err := expandDataprocClusterConfigDataprocMetricConfigMetrics(original["metrics"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetrics); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metrics"] = transformedMetrics + } + + return transformed, nil +} + +func expandDataprocClusterConfigDataprocMetricConfigMetrics(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMetricSource, err := expandDataprocClusterConfigDataprocMetricConfigMetricsMetricSource(original["metric_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetricSource); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metricSource"] = transformedMetricSource + } + + transformedMetricOverrides, err := expandDataprocClusterConfigDataprocMetricConfigMetricsMetricOverrides(original["metric_overrides"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMetricOverrides); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["metricOverrides"] = transformedMetricOverrides + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandDataprocClusterConfigDataprocMetricConfigMetricsMetricSource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigDataprocMetricConfigMetricsMetricOverrides(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeGroup, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroup(original["node_group"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeGroup); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeGroup"] = transformedNodeGroup + } + + return transformed, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroup(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedRoles, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupRoles(original["roles"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRoles); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["roles"] = transformedRoles + } + + transformedNodeGroupConfig, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfig(original["node_group_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeGroupConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeGroupConfig"] = transformedNodeGroupConfig + } + + return transformed, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupRoles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNumInstances, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigNumInstances(original["num_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNumInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["numInstances"] = transformedNumInstances + } + + transformedMachineType, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + transformedMinCpuPlatform, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigMinCpuPlatform(original["min_cpu_platform"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinCpuPlatform); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minCpuPlatform"] = transformedMinCpuPlatform + } + + transformedDiskConfig, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigDiskConfig(original["disk_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskConfig"] = transformedDiskConfig + } + + transformedAccelerators, err := expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigAccelerators(original["accelerators"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAccelerators); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["accelerators"] = transformedAccelerators + } + + return transformed, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigNumInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigMinCpuPlatform(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigDiskConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterConfigAuxiliaryNodeGroupsNodeGroupNodeGroupConfigAccelerators(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandDataprocClusterConfigMetastoreConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandDataprocClusterLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandDataprocVirtualClusterConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_dataproc_autoscaling_policy.json b/mmv1/third_party/tgc/tests/data/example_google_dataproc_autoscaling_policy.json index 9b4efab8a4be..e634f62eb2d6 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_dataproc_autoscaling_policy.json +++ b/mmv1/third_party/tgc/tests/data/example_google_dataproc_autoscaling_policy.json @@ -1,8 +1,41 @@ [ + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/regions/us-central1/clusters/dataproc-policy1", + "asset_type": "dataproc.googleapis.com/Cluster", + "resource": { + "version": "v1", + "discovery_document_uri": "https://dataproc.googleapis.com/$discovery/rest?version=v1", + "discovery_name": "Cluster", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "clusterName": "dataproc-policy1", + "config": { + "autoscalingConfig": [ + null + ], + "auxiliaryNodeGroups": null, + "dataprocMetricConfig": null, + "encryptionConfig": [], + "endpointConfig": [], + "gceClusterConfig": null, + "initializationActions": [], + "lifecycleConfig": [], + "masterConfig": null, + "metastoreConfig": [], + "secondaryWorkerConfig": null, + "securityConfig": [], + "softwareConfig": null, + "workerConfig": null + }, + "projectId": "{{.Provider.project}}" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, { "name": "//dataproc.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/autoscalingPolicies/dataproc-policy1", "asset_type": "dataproc.googleapis.com/AutoscalingPolicy", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", "resource": { "version": "v1beta2", "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/dataproc/v1beta2/rest", @@ -25,8 +58,7 @@ } } }, - "ancestors": [ - "organizations/{{.OrgID}}" - ] + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" } ] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json b/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json new file mode 100644 index 000000000000..0b99f3ae9b79 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.json @@ -0,0 +1,68 @@ +[ + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/regions/us-central1/clusters/my-new-cluster", + "asset_type": "dataproc.googleapis.com/Cluster", + "resource": { + "version": "v1", + "discovery_document_uri": "https://dataproc.googleapis.com/$discovery/rest?version=v1", + "discovery_name": "Cluster", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "clusterName": "my-new-cluster", + "config": { + "autoscalingConfig": [], + "auxiliaryNodeGroups": null, + "dataprocMetricConfig": null, + "encryptionConfig": [], + "endpointConfig": [], + "gceClusterConfig": { + "tags": [ + "bar", + "foo" + ] + }, + "initializationActions": [ + { + "script": "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh", + "timeout_sec": 500 + } + ], + "lifecycleConfig": [], + "masterConfig": { + "diskConfig": { + "bootDiskSizeGb": 30, + "bootDiskType": "pd-ssd" + }, + "numInstances": 1 + }, + "metastoreConfig": [], + "secondaryWorkerConfig": { + "preemptibility": "PREEMPTIBLE" + }, + "securityConfig": [], + "softwareConfig": { + "imageVersion": "2.0.35-debian10", + "properties": { + "dataproc:dataproc.allow.zero.workers": "true" + } + }, + "workerConfig": { + "diskConfig": { + "bootDiskSizeGb": 30, + "bootDiskType": "pd-standard", + "numLocalSsds": 1 + }, + "minCpuPlatform": "Intel Skylake", + "numInstances": 2 + } + }, + "labels": { + "foo": "bar" + }, + "projectId": "{{.Provider.project}}" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.tf b/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.tf new file mode 100644 index 000000000000..989f76bdcebf --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_dataproc_cluster.tf @@ -0,0 +1,60 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_dataproc_cluster" "mycluster" { + name = "my-new-cluster" + region = "us-central1" + graceful_decommission_timeout = "120s" + labels = { + foo = "bar" + } + + cluster_config { + master_config { + num_instances = 1 + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 30 + } + } + + worker_config { + num_instances = 2 + min_cpu_platform = "Intel Skylake" + disk_config { + boot_disk_size_gb = 30 + num_local_ssds = 1 + } + } + + preemptible_worker_config { + num_instances = 0 + } + + software_config { + image_version = "2.0.35-debian10" + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + gce_cluster_config { + tags = ["foo", "bar"] + } + + initialization_action { + script = "gs://dataproc-initialization-actions/stackdriver/stackdriver.sh" + timeout_sec = 500 + } + } +} \ No newline at end of file From 5792aa80f336d7da84eb6ad6fe6cd4f93f17d9ff Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Tue, 28 May 2024 19:05:14 +0000 Subject: [PATCH 006/356] Add support for ```google_storage_bucket``` to TGC (#10807) --- mmv1/third_party/tgc/storage_bucket.go | 1 + .../tests/data/example_storage_bucket.json | 58 ++++++++++--------- .../tgc/tests/data/example_storage_bucket.tf | 8 ++- 3 files changed, 36 insertions(+), 31 deletions(-) diff --git a/mmv1/third_party/tgc/storage_bucket.go b/mmv1/third_party/tgc/storage_bucket.go index 79261a0bd83a..1bacf205a812 100644 --- a/mmv1/third_party/tgc/storage_bucket.go +++ b/mmv1/third_party/tgc/storage_bucket.go @@ -303,5 +303,6 @@ func expandIamConfiguration(d tpgresource.TerraformResourceData) *storage.Bucket Enabled: d.Get("uniform_bucket_level_access").(bool), ForceSendFields: []string{"Enabled"}, }, + PublicAccessPrevention: d.Get("public_access_prevention").(string), } } diff --git a/mmv1/third_party/tgc/tests/data/example_storage_bucket.json b/mmv1/third_party/tgc/tests/data/example_storage_bucket.json index 6086117f63a3..186d965cf4ee 100644 --- a/mmv1/third_party/tgc/tests/data/example_storage_bucket.json +++ b/mmv1/third_party/tgc/tests/data/example_storage_bucket.json @@ -1,31 +1,33 @@ [ - { - "name": "//storage.googleapis.com/image-store-bucket", - "asset_type": "storage.googleapis.com/Bucket", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/storage/v1/rest", - "discovery_name": "Bucket", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "iamConfiguration": { - "uniformBucketLevelAccess": { - "enabled": false - } + { + "name": "//storage.googleapis.com/new-bucket-test-tf", + "asset_type": "storage.googleapis.com/Bucket", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/storage/v1/rest", + "discovery_name": "Bucket", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "iamConfiguration": { + "publicAccessPrevention": "enforced", + "uniformBucketLevelAccess": { + "enabled": false + } + }, + "lifecycle": { + "rule": [] + }, + "location": "EU", + "name": "new-bucket-test-tf", + "project": "{{.Provider.project}}", + "storageClass": "STANDARD", + "website": { + "mainPageSuffix": "index.html", + "notFoundPage": "404.html" + } + } }, - "lifecycle": { - "rule": [] - }, - "location": "EU", - "name": "image-store-bucket", - "project": "{{.Provider.project}}", - "storageClass": "STANDARD", - "website": { - "mainPageSuffix": "index.html", - "notFoundPage": "404.html" - } - } + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" } - } -] +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_storage_bucket.tf b/mmv1/third_party/tgc/tests/data/example_storage_bucket.tf index 7c256bdb3128..1384935d5b0b 100644 --- a/mmv1/third_party/tgc/tests/data/example_storage_bucket.tf +++ b/mmv1/third_party/tgc/tests/data/example_storage_bucket.tf @@ -13,8 +13,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - -terraform { + + terraform { required_providers { google = { source = "hashicorp/google-beta" @@ -28,9 +28,11 @@ provider "google" { } resource "google_storage_bucket" "image-store" { - name = "image-store-bucket" + name = "new-bucket-test-tf" location = "EU" + public_access_prevention = "enforced" + website { main_page_suffix = "index.html" not_found_page = "404.html" From f2ecfbee07a9ab66136c3326c0bd0b6cd3f6fd56 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 28 May 2024 13:53:25 -0700 Subject: [PATCH 007/356] Convert primary_resource_name in yaml files (#10812) --- mmv1/products/datafusion/go_instance.yaml | 7 +------ mmv1/products/pubsub/go_Schema.yaml | 3 +-- mmv1/products/pubsub/go_Subscription.yaml | 7 ------- mmv1/products/pubsub/go_Topic.yaml | 5 +---- mmv1/templates/terraform/yaml_conversion.erb | 4 ++-- 5 files changed, 5 insertions(+), 21 deletions(-) diff --git a/mmv1/products/datafusion/go_instance.yaml b/mmv1/products/datafusion/go_instance.yaml index 33cfefff7faf..952b4ccb7a29 100644 --- a/mmv1/products/datafusion/go_instance.yaml +++ b/mmv1/products/datafusion/go_instance.yaml @@ -53,7 +53,7 @@ custom_code: examples: - name: 'data_fusion_instance_basic' primary_resource_id: 'basic_instance' - primary_resource_name: 'basic_instance' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' vars: instance_name: 'my-instance' prober_test_run: '' @@ -61,7 +61,6 @@ examples: 'prober_test_run': '`options = { prober_test_run = "true" }`' - name: 'data_fusion_instance_full' primary_resource_id: 'extended_instance' - primary_resource_name: 'extended_instance' vars: instance_name: 'my-instance' ip_alloc: 'datafusion-ip-alloc' @@ -71,12 +70,10 @@ examples: 'prober_test_run': '`options = { prober_test_run = "true" }`' - name: 'data_fusion_instance_cmek' primary_resource_id: 'cmek' - primary_resource_name: 'cmek' vars: instance_name: 'my-instance' - name: 'data_fusion_instance_enterprise' primary_resource_id: 'enterprise_instance' - primary_resource_name: 'enterprise_instance' vars: instance_name: 'my-instance' prober_test_run: '' @@ -84,12 +81,10 @@ examples: 'prober_test_run': '`options = { prober_test_run = "true" }`' - name: 'data_fusion_instance_event' primary_resource_id: 'event' - primary_resource_name: 'event' vars: instance_name: 'my-instance' - name: 'data_fusion_instance_zone' primary_resource_id: 'zone' - primary_resource_name: 'zone' vars: instance_name: 'my-instance' parameters: diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index fb0222e11343..463cac379b16 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -46,12 +46,11 @@ custom_code: examples: - name: 'pubsub_schema_basic' primary_resource_id: 'example' - primary_resource_name: 'example' + primary_resource_name: 'fmt.Sprintf("tf-test-example-schema%s", context["random_suffix"])' vars: schema_name: 'example-schema' - name: 'pubsub_schema_protobuf' primary_resource_id: 'example' - primary_resource_name: 'example' vars: schema_name: 'example' test_env_vars: diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index a1c1fa36e5d2..141f00def5af 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -46,25 +46,21 @@ custom_code: examples: - name: 'pubsub_subscription_push' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' - name: 'pubsub_subscription_pull' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' - name: 'pubsub_subscription_dead_letter' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' - name: 'pubsub_subscription_push_bq' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' @@ -72,7 +68,6 @@ examples: table_id: 'example_table' - name: 'pubsub_subscription_push_bq_table_schema' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' @@ -80,14 +75,12 @@ examples: table_id: 'example_table' - name: 'pubsub_subscription_push_cloudstorage' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' bucket_name: 'example-bucket' - name: 'pubsub_subscription_push_cloudstorage_avro' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' subscription_name: 'example-subscription' diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index d7754f28b4bf..229a9bbe29cf 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -51,12 +51,11 @@ error_retry_predicates: examples: - name: 'pubsub_topic_basic' primary_resource_id: 'example' - primary_resource_name: 'example' + primary_resource_name: 'fmt.Sprintf("tf-test-example-topic%s", context["random_suffix"])' vars: topic_name: 'example-topic' - name: 'pubsub_topic_cmek' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' key_name: 'example-key' @@ -64,12 +63,10 @@ examples: skip_test: true - name: 'pubsub_topic_geo_restricted' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' - name: 'pubsub_topic_schema_settings' primary_resource_id: 'example' - primary_resource_name: 'example' vars: topic_name: 'example-topic' schema_name: 'example' diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index ef68a53b5b20..9420dd3f85fd 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -459,8 +459,8 @@ examples: <% unless example.primary_resource_id.nil? -%> primary_resource_id: '<%= example.primary_resource_id %>' <% end -%> -<% unless example.primary_resource_id.nil? -%> - primary_resource_name: '<%= example.primary_resource_id %>' +<% unless example.primary_resource_name.nil? -%> + primary_resource_name: '<%= example.primary_resource_name %>' <% end -%> <% unless example.min_version.nil? -%> min_version: '<%= example.min_version %>' From be365d7a2466aebe55fafcb2fa317ff60eb87b45 Mon Sep 17 00:00:00 2001 From: Alex Jones <97421092+alexrjones@users.noreply.github.com> Date: Wed, 29 May 2024 05:04:13 +0800 Subject: [PATCH 008/356] fix: Verify `google_cloud_run_v2_{service,job}` 'timeout' field with regex (#10611) --- mmv1/products/cloudrunv2/Job.yaml | 2 ++ mmv1/products/cloudrunv2/Service.yaml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/mmv1/products/cloudrunv2/Job.yaml b/mmv1/products/cloudrunv2/Job.yaml index c8e58edf624d..f28ed56409ce 100644 --- a/mmv1/products/cloudrunv2/Job.yaml +++ b/mmv1/products/cloudrunv2/Job.yaml @@ -523,6 +523,8 @@ properties: A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". default_from_api: true + validation: !ruby/object:Provider::Terraform::Validation + regex: '^[0-9]+(?:\.[0-9]{1,9})?s$' - !ruby/object:Api::Type::String name: 'serviceAccount' description: |- diff --git a/mmv1/products/cloudrunv2/Service.yaml b/mmv1/products/cloudrunv2/Service.yaml index 88929c429ed7..1b9e8f12a268 100644 --- a/mmv1/products/cloudrunv2/Service.yaml +++ b/mmv1/products/cloudrunv2/Service.yaml @@ -361,6 +361,8 @@ properties: A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". default_from_api: true + validation: !ruby/object:Provider::Terraform::Validation + regex: '^[0-9]+(?:\.[0-9]{1,9})?s$' - !ruby/object:Api::Type::String name: 'serviceAccount' description: |- From 86935162103d83c8b9c3cdeae8833a1173ea9fb2 Mon Sep 17 00:00:00 2001 From: chasevedder Date: Tue, 28 May 2024 14:23:38 -0700 Subject: [PATCH 009/356] Add a sleep `google_service_account` to reduce the likelihood of eventual consistency errors (#10813) --- .../resourcemanager/resource_google_service_account.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account.go b/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account.go index 99a61fa63512..9640ca08df0a 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account.go +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account.go @@ -158,6 +158,11 @@ func resourceGoogleServiceAccountCreate(d *schema.ResourceData, meta interface{} return err } + // We can't guarantee complete consistency even after polling, + // so sleep for some additional time to reduce the likelihood of + // eventual consistency failures. + time.Sleep(10 * time.Second) + return resourceGoogleServiceAccountRead(d, meta) } From 9bd4bb5045659c0474741a053b50c6ccc4ffe5a4 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Tue, 28 May 2024 14:51:42 -0700 Subject: [PATCH 010/356] Added 4hr timeout to vcr nightly recording (#10814) --- .ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh b/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh index 5e26731fd5c3..8baf28e037f7 100755 --- a/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh +++ b/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh @@ -115,7 +115,7 @@ if [[ -n $FAILED_TESTS_PATTERN ]]; then FAILED_TESTS=$(grep "^--- FAIL: TestAcc" replaying_test.log | awk '{print $3}') # test_exit_code=0 - parallel --jobs 16 TF_LOG=DEBUG TF_LOG_PATH_MASK=$local_path/testlog/recording/%s.log TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test {1} -parallel 1 -v -run="{2}$" -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc" ">>" testlog/recording_build/{2}_recording_test.log ::: $GOOGLE_TEST_DIRECTORY ::: $FAILED_TESTS + timeout 4h parallel --jobs 16 TF_LOG=DEBUG TF_LOG_PATH_MASK=$local_path/testlog/recording/%s.log TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test {1} -parallel 1 -v -run="{2}$" -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc" ">>" testlog/recording_build/{2}_recording_test.log ::: $GOOGLE_TEST_DIRECTORY ::: $FAILED_TESTS test_exit_code=$? From 3963065a1e467a9482cee7e3ac5134dfd95fd558 Mon Sep 17 00:00:00 2001 From: vmiglani <142545940+vmiglani@users.noreply.github.com> Date: Tue, 28 May 2024 16:29:51 -0700 Subject: [PATCH 011/356] [AlloyDB] Private Service Connect Support (#10783) --- mmv1/products/alloydb/Cluster.yaml | 9 +++ mmv1/products/alloydb/Instance.yaml | 26 +++++++ .../alloydb/resource_alloydb_cluster_test.go | 36 +++++++++ .../alloydb/resource_alloydb_instance_test.go | 77 +++++++++++++++++++ 4 files changed, 148 insertions(+) diff --git a/mmv1/products/alloydb/Cluster.yaml b/mmv1/products/alloydb/Cluster.yaml index 67b4d85f02fd..502547cd6df8 100644 --- a/mmv1/products/alloydb/Cluster.yaml +++ b/mmv1/products/alloydb/Cluster.yaml @@ -208,6 +208,7 @@ properties: exactly_one_of: - network - network_config.0.network + - psc_config.0.psc_enabled default_from_api: true deprecation_message: >- `network` is deprecated and will be removed in a future major release. Instead, use `network_config` to define the network configuration. @@ -227,6 +228,7 @@ properties: exactly_one_of: - network - network_config.0.network + - psc_config.0.psc_enabled description: | The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". @@ -264,6 +266,13 @@ properties: default_from_api: true description: | The database engine major version. This is an optional field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation. + - !ruby/object:Api::Type::NestedObject + name: 'pscConfig' + description: 'Configuration for Private Service Connect (PSC) for the cluster.' + properties: + - !ruby/object:Api::Type::Boolean + name: 'pscEnabled' + description: 'Create an instance that allows connections from Private Service Connect endpoints to the instance.' - !ruby/object:Api::Type::NestedObject name: 'initialUser' description: | diff --git a/mmv1/products/alloydb/Instance.yaml b/mmv1/products/alloydb/Instance.yaml index 3204fef7377e..e5c0adb30934 100644 --- a/mmv1/products/alloydb/Instance.yaml +++ b/mmv1/products/alloydb/Instance.yaml @@ -279,6 +279,32 @@ properties: values: - :ENCRYPTED_ONLY - :ALLOW_UNENCRYPTED_AND_ENCRYPTED + - !ruby/object:Api::Type::NestedObject + name: 'pscInstanceConfig' + description: | + Configuration for Private Service Connect (PSC) for the instance. + properties: + - !ruby/object:Api::Type::String + name: 'serviceAttachmentLink' + output: true + description: | + The service attachment created when Private Service Connect (PSC) is enabled for the instance. + The name of the resource will be in the format of + `projects//regions//serviceAttachments/` + - !ruby/object:Api::Type::Array + name: allowedConsumerProjects + item_type: Api::Type::String + description: | + List of consumer projects that are allowed to create PSC endpoints to service-attachments to this instance. + These should be specified as project numbers only. + item_validation: !ruby/object:Provider::Terraform::Validation + regex: '^\d+$' + - !ruby/object:Api::Type::String + name: 'pscDnsName' + output: true + description: | + The DNS name of the instance for PSC connectivity. + Name convention: ...alloydb-psc.goog - !ruby/object:Api::Type::NestedObject name: 'networkConfig' description: | diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_cluster_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_cluster_test.go index c620a8c9bcbc..7950ec9885fe 100644 --- a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_cluster_test.go +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_cluster_test.go @@ -1295,3 +1295,39 @@ resource "google_compute_network" "default" { data "google_project" "project" {} `, context) } + +// Ensures cluster creation succeeds for a Private Service Connect enabled cluster. +func TestAccAlloydbCluster_withPrivateServiceConnect(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbCluster_withPrivateServiceConnect(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_alloydb_cluster.default", "psc_config.0.psc_enabled", "true"), + ), + }, + }, + }) +} + +func testAccAlloydbCluster_withPrivateServiceConnect(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + psc_config { + psc_enabled = true + } +} +data "google_project" "project" {} +`, context) +} diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go index 0fa2c319df8a..f6475ce10bc0 100644 --- a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go @@ -704,3 +704,80 @@ data "google_compute_network" "default" { } `, context) } + +func TestAccAlloydbInstance_updatePscInstanceConfig(t *testing.T) { + t.Parallel() + + random_suffix := acctest.RandString(t, 10) + context := map[string]interface{}{ + "random_suffix": random_suffix, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAlloydbInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAlloydbInstance_pscInstanceConfig(context), + }, + { + Config: testAccAlloydbInstance_updatePscInstanceConfigAllowlist(context), + }, + }, + }) +} + +func testAccAlloydbInstance_pscInstanceConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + machine_config { + cpu_count = 2 + } + psc_instance_config { + allowed_consumer_projects = ["${data.google_project.project.number}"] + } +} +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + psc_config { + psc_enabled = true + } + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } +} +data "google_project" "project" {} +`, context) +} + +func testAccAlloydbInstance_updatePscInstanceConfigAllowlist(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_alloydb_instance" "default" { + cluster = google_alloydb_cluster.default.name + instance_id = "tf-test-alloydb-instance%{random_suffix}" + instance_type = "PRIMARY" + machine_config { + cpu_count = 2 + } + psc_instance_config { + allowed_consumer_projects = ["${data.google_project.project.number}", "1044355742748"] + } +} +resource "google_alloydb_cluster" "default" { + cluster_id = "tf-test-alloydb-cluster%{random_suffix}" + location = "us-central1" + psc_config { + psc_enabled = true + } + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } +} +data "google_project" "project" {} +`, context) +} From 093b6b035cd851d9083a643152040367748d6a4d Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Wed, 29 May 2024 07:28:13 +0000 Subject: [PATCH 012/356] Add support for ```google_container_cluster``` to TGC (#10810) --- mmv1/third_party/tgc/container.go | 22 ++- .../tests/data/example_container_cluster.json | 134 ++++++++++-------- .../tests/data/example_container_cluster.tf | 25 +++- 3 files changed, 115 insertions(+), 66 deletions(-) diff --git a/mmv1/third_party/tgc/container.go b/mmv1/third_party/tgc/container.go index 65fdf748fbcb..4c7caa378740 100644 --- a/mmv1/third_party/tgc/container.go +++ b/mmv1/third_party/tgc/container.go @@ -183,14 +183,12 @@ func GetContainerClusterApiObject(d tpgresource.TerraformResourceData, config *t } else if v, ok := d.GetOkExists("private_cluster_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(privateClusterConfigProp)) && (ok || !reflect.DeepEqual(v, privateClusterConfigProp)) { obj["privateClusterConfig"] = privateClusterConfigProp } - workloadIdentityConfigProp, err := expandContainerClusterWorkloadIdentityConfig(d.Get("workload_identity_config"), d, config) if err != nil { return nil, err } else if v, ok := d.GetOkExists("workload_identity_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(workloadIdentityConfigProp)) && (ok || !reflect.DeepEqual(v, workloadIdentityConfigProp)) { obj["workloadIdentityConfig"] = workloadIdentityConfigProp } - clusterIpv4CidrProp, err := expandContainerClusterClusterIpv4Cidr(d.Get("cluster_ipv4_cidr"), d, config) if err != nil { return nil, err @@ -293,6 +291,18 @@ func GetContainerClusterApiObject(d tpgresource.TerraformResourceData, config *t } else if v, ok := d.GetOkExists("kubectl_context"); !tpgresource.IsEmptyValue(reflect.ValueOf(kubectlContextProp)) && (ok || !reflect.DeepEqual(v, kubectlContextProp)) { obj["kubectlContext"] = kubectlContextProp } + databaseEncryptionProp, err := expandContainerClusterDatabaseEncryption(d.Get("database_encryption"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("database_encryption"); !tpgresource.IsEmptyValue(reflect.ValueOf(databaseEncryptionProp)) && (ok || !reflect.DeepEqual(v, databaseEncryptionProp)) { + obj["databaseEncryption"] = databaseEncryptionProp + } + releaseChannelProp, err := expandContainerClusterReleaseChannel(d.Get("release_channel"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("release_channel"); !tpgresource.IsEmptyValue(reflect.ValueOf(releaseChannelProp)) && (ok || !reflect.DeepEqual(v, releaseChannelProp)) { + obj["releaseChannel"] = releaseChannelProp + } return obj, nil } @@ -1169,6 +1179,14 @@ func expandContainerClusterKubectlContext(v interface{}, d tpgresource.Terraform return v, nil } +func expandContainerClusterDatabaseEncryption(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandContainerClusterReleaseChannel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + func GetContainerNodePoolCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { name, err := cai.AssetName(d, config, "//container.googleapis.com/projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/nodePools/{{name}}") if err != nil { diff --git a/mmv1/third_party/tgc/tests/data/example_container_cluster.json b/mmv1/third_party/tgc/tests/data/example_container_cluster.json index e863d272877f..c39ccef2fddd 100644 --- a/mmv1/third_party/tgc/tests/data/example_container_cluster.json +++ b/mmv1/third_party/tgc/tests/data/example_container_cluster.json @@ -1,61 +1,79 @@ [ - { - "name": "//container.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/clusters/my-gke-cluster", - "asset_type": "container.googleapis.com/Cluster", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/container/v1/rest", - "discovery_name": "Cluster", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "initialNodeCount": 1, - "location": "us-central1", - "name": "my-gke-cluster", - "network": "projects/{{.Provider.project}}/global/networks/default" - } - } - }, - { - "name": "//container.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/clusters/my-gke-cluster/nodePools/my-node-pool", - "asset_type": "container.googleapis.com/NodePool", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/container/v1/rest", - "discovery_name": "NodePool", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "cluster": "projects/{{.Provider.project}}/global/clusters/my-gke-cluster", - "config": { - "machineType": "n1-standard-1", - "metadata": { - "disable-legacy-endpoints": "true" - }, - "oauthScopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ], - "preemptible": true + { + "name": "//container.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/clusters/cluster-test", + "asset_type": "container.googleapis.com/Cluster", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/container/v1/rest", + "discovery_name": "Cluster", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "databaseEncryption": [ + { + "key_name": "", + "state": "DECRYPTED" + } + ], + "initialNodeCount": 3, + "location": "us-central1", + "name": "cluster-test", + "network": "projects/{{.Provider.project}}/global/networks/default", + "releaseChannel": [ + { + "channel": "RAPID" + } + ] + } }, - "location": "us-central1", - "name": "my-node-pool" - } - } - }, - { - "name": "//iam.googleapis.com/projects/{{.Provider.project}}/serviceAccounts/placeholder-unique-id", - "asset_type": "iam.googleapis.com/ServiceAccount", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://iam.googleapis.com/$discovery/rest", - "discovery_name": "ServiceAccount", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "displayName": "Service Account", - "email": "service-account-id@{{.Provider.project}}.iam.gserviceaccount.com", - "projectId": "{{.Provider.project}}" - } + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//container.googleapis.com/projects/{{.Provider.project}}/locations/us-central1/clusters/cluster-test/nodePools/node-pool-test", + "asset_type": "container.googleapis.com/NodePool", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/container/v1/rest", + "discovery_name": "NodePool", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "cluster": "projects/{{.Provider.project}}/global/clusters/cluster-test", + "config": { + "machineType": "n1-standard-1", + "metadata": { + "disable-legacy-endpoints": "true" + }, + "oauthScopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ], + "preemptible": true + }, + "location": "us-central1", + "management": { + "autoRepair": true, + "autoUpgrade": true + }, + "name": "node-pool-test" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//iam.googleapis.com/projects/{{.Provider.project}}/serviceAccounts/placeholder-QJgu5Gv8", + "asset_type": "iam.googleapis.com/ServiceAccount", + "resource": { + "version": "v1", + "discovery_document_uri": "https://iam.googleapis.com/$discovery/rest", + "discovery_name": "ServiceAccount", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "displayName": "Service Account", + "email": "service-account-cc@{{.Provider.project}}.iam.gserviceaccount.com", + "projectId": "{{.Provider.project}}" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" } - } -] +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_container_cluster.tf b/mmv1/third_party/tgc/tests/data/example_container_cluster.tf index d0c4ee600403..f7294a4c1303 100644 --- a/mmv1/third_party/tgc/tests/data/example_container_cluster.tf +++ b/mmv1/third_party/tgc/tests/data/example_container_cluster.tf @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - + terraform { required_providers { google = { @@ -28,26 +28,34 @@ provider "google" { } resource "google_service_account" "default" { - account_id = "service-account-id" + account_id = "service-account-cc" display_name = "Service Account" } resource "google_container_cluster" "primary" { - name = "my-gke-cluster" + name = "cluster-test" location = "us-central1" # We can't create a cluster with no node pool defined, but we want to only use # separately managed node pools. So we create the smallest possible default # node pool and immediately delete it. remove_default_node_pool = true - initial_node_count = 1 + initial_node_count = 3 + + database_encryption { + state = "DECRYPTED" + } + + release_channel { + channel = "RAPID" + } } resource "google_container_node_pool" "primary_preemptible_nodes" { - name = "my-node-pool" + name = "node-pool-test" location = "us-central1" cluster = google_container_cluster.primary.name - node_count = 1 + node_count = 3 node_config { preemptible = true @@ -62,5 +70,10 @@ resource "google_container_node_pool" "primary_preemptible_nodes" { "https://www.googleapis.com/auth/cloud-platform", ] } + + management { + auto_repair = true + auto_upgrade = true + } } From f155232464286a924acfe5fce3503b5fa409a3ec Mon Sep 17 00:00:00 2001 From: avijitagarwal195 <167739835+avijitagarwal195@users.noreply.github.com> Date: Wed, 29 May 2024 21:22:42 +0530 Subject: [PATCH 013/356] Terraform Restore Improvements for GKEBackup (#10562) --- mmv1/products/gkebackup/BackupPlan.yaml | 23 ++ mmv1/products/gkebackup/RestorePlan.yaml | 130 +++++++++++ .../gkebackup_backupplan_permissive.tf.erb | 44 ++++ .../gkebackup_restoreplan_gitops_mode.tf.erb | 43 ++++ ...gkebackup_restoreplan_restore_order.tf.erb | 65 ++++++ .../gkebackup_restoreplan_volume_res.tf.erb | 47 ++++ ...esource_gke_backup_backup_plan_test.go.erb | 45 ++++ ...source_gke_backup_restore_plan_test.go.erb | 208 ++++++++++++++++++ 8 files changed, 605 insertions(+) create mode 100644 mmv1/templates/terraform/examples/gkebackup_backupplan_permissive.tf.erb create mode 100644 mmv1/templates/terraform/examples/gkebackup_restoreplan_gitops_mode.tf.erb create mode 100644 mmv1/templates/terraform/examples/gkebackup_restoreplan_restore_order.tf.erb create mode 100644 mmv1/templates/terraform/examples/gkebackup_restoreplan_volume_res.tf.erb create mode 100644 mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_restore_plan_test.go.erb diff --git a/mmv1/products/gkebackup/BackupPlan.yaml b/mmv1/products/gkebackup/BackupPlan.yaml index 78d744326cba..91cd5c513ff4 100644 --- a/mmv1/products/gkebackup/BackupPlan.yaml +++ b/mmv1/products/gkebackup/BackupPlan.yaml @@ -105,6 +105,23 @@ examples: deletion_protection: 'false' test_env_vars: project: :PROJECT_NAME + - !ruby/object:Provider::Terraform::Examples + name: 'gkebackup_backupplan_permissive' + primary_resource_id: 'permissive' + vars: + name: 'permissive-plan' + cluster_name: 'permissive-cluster' + deletion_protection: 'true' + network_name: 'default' + subnetwork_name: 'default' + test_vars_overrides: + deletion_protection: 'false' + network_name: 'acctest.BootstrapSharedTestNetwork(t, "gke-cluster")' + subnetwork_name: 'acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster"))' + oics_vars_overrides: + deletion_protection: 'false' + test_env_vars: + project: :PROJECT_NAME - !ruby/object:Provider::Terraform::Examples name: 'gkebackup_backupplan_rpo_daily_window' primary_resource_id: 'rpo_daily_window' @@ -441,6 +458,12 @@ properties: required: true description: | The name of a Kubernetes Resource. + - !ruby/object:Api::Type::Boolean + name: permissiveMode + description: | + This flag specifies whether Backups will not fail when + Backup for GKE detects Kubernetes configuration that is + non-standard or requires additional setup to restore. - !ruby/object:Api::Type::Integer name: protectedPodCount output: true diff --git a/mmv1/products/gkebackup/RestorePlan.yaml b/mmv1/products/gkebackup/RestorePlan.yaml index 38bcb5216b5c..edb5f9747bbb 100644 --- a/mmv1/products/gkebackup/RestorePlan.yaml +++ b/mmv1/products/gkebackup/RestorePlan.yaml @@ -134,6 +134,54 @@ examples: subnetwork_name: 'acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster"))' oics_vars_overrides: deletion_protection: 'false' + - !ruby/object:Provider::Terraform::Examples + name: 'gkebackup_restoreplan_gitops_mode' + primary_resource_id: 'gitops_mode' + vars: + name: 'gitops-mode' + network_name: 'default' + subnetwork_name: 'default' + test_env_vars: + project: :PROJECT_NAME + deletion_protection: 'true' + test_vars_overrides: + deletion_protection: 'false' + network_name: 'acctest.BootstrapSharedTestNetwork(t, "gke-cluster")' + subnetwork_name: 'acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster"))' + oics_vars_overrides: + deletion_protection: 'false' + - !ruby/object:Provider::Terraform::Examples + name: 'gkebackup_restoreplan_restore_order' + primary_resource_id: 'restore_order' + vars: + name: 'restore-order' + network_name: 'default' + subnetwork_name: 'default' + test_env_vars: + project: :PROJECT_NAME + deletion_protection: 'true' + test_vars_overrides: + deletion_protection: 'false' + network_name: 'acctest.BootstrapSharedTestNetwork(t, "gke-cluster")' + subnetwork_name: 'acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster"))' + oics_vars_overrides: + deletion_protection: 'false' + - !ruby/object:Provider::Terraform::Examples + name: 'gkebackup_restoreplan_volume_res' + primary_resource_id: 'volume_res' + vars: + name: 'volume-res' + network_name: 'default' + subnetwork_name: 'default' + test_env_vars: + project: :PROJECT_NAME + deletion_protection: 'true' + test_vars_overrides: + deletion_protection: 'false' + network_name: 'acctest.BootstrapSharedTestNetwork(t, "gke-cluster")' + subnetwork_name: 'acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster"))' + oics_vars_overrides: + deletion_protection: 'false' parameters: - !ruby/object:Api::Type::String name: 'location' @@ -285,6 +333,9 @@ properties: values: - :DELETE_AND_RESTORE - :FAIL_ON_CONFLICT + - :MERGE_SKIP_ON_CONFLICT + - :MERGE_REPLACE_VOLUME_ON_CONFLICT + - :MERGE_REPLACE_ON_CONFLICT - !ruby/object:Api::Type::Enum name: volumeDataRestorePolicy description: | @@ -496,6 +547,85 @@ properties: description: | A string that specifies the desired value in string format to use for transformation. + - !ruby/object:Api::Type::Array + name: volumeDataRestorePolicyBindings + description: | + A table that binds volumes by their scope to a restore policy. Bindings + must have a unique scope. Any volumes not scoped in the bindings are + subject to the policy defined in volume_data_restore_policy. + item_type: !ruby/object:Api::Type::NestedObject + required: true + properties: + - !ruby/object:Api::Type::Enum + name: policy + required: true + description: | + Specifies the mechanism to be used to restore this volume data. + See https://cloud.google.com/kubernetes-engine/docs/add-on/backup-for-gke/reference/rest/v1/RestoreConfig#VolumeDataRestorePolicy + for more information on each policy option. + values: + - :RESTORE_VOLUME_DATA_FROM_BACKUP + - :REUSE_VOLUME_HANDLE_FROM_BACKUP + - :NO_VOLUME_DATA_RESTORATION + - !ruby/object:Api::Type::Enum + name: volumeType + required: true + description: | + The volume type, as determined by the PVC's + bound PV, to apply the policy to. + values: + - :GCE_PERSISTENT_DISK + - !ruby/object:Api::Type::NestedObject + name: restoreOrder + description: | + It contains custom ordering to use on a Restore. + properties: + - !ruby/object:Api::Type::Array + name: groupKindDependencies + required: true + description: | + A list of group kind dependency pairs + that is used by Backup for GKE to + generate a group kind restore order. + item_type: !ruby/object:Api::Type::NestedObject + required: true + properties: + - !ruby/object:Api::Type::NestedObject + name: satisfying + required: true + description: | + The satisfying group kind must be restored first + in order to satisfy the dependency. + properties: + - !ruby/object:Api::Type::String + name: resourceGroup + description: | + API Group of a Kubernetes resource, e.g. + "apiextensions.k8s.io", "storage.k8s.io", etc. + Use empty string for core group. + - !ruby/object:Api::Type::String + name: resourceKind + description: | + Kind of a Kubernetes resource, e.g. + "CustomResourceDefinition", "StorageClass", etc. + - !ruby/object:Api::Type::NestedObject + name: requiring + required: true + description: | + The requiring group kind requires that the satisfying + group kind be restored first. + properties: + - !ruby/object:Api::Type::String + name: resourceGroup + description: | + API Group of a Kubernetes resource, e.g. + "apiextensions.k8s.io", "storage.k8s.io", etc. + Use empty string for core group. + - !ruby/object:Api::Type::String + name: resourceKind + description: | + Kind of a Kubernetes resource, e.g. + "CustomResourceDefinition", "StorageClass", etc. - !ruby/object:Api::Type::String name: state output: true diff --git a/mmv1/templates/terraform/examples/gkebackup_backupplan_permissive.tf.erb b/mmv1/templates/terraform/examples/gkebackup_backupplan_permissive.tf.erb new file mode 100644 index 000000000000..764f82a37c61 --- /dev/null +++ b/mmv1/templates/terraform/examples/gkebackup_backupplan_permissive.tf.erb @@ -0,0 +1,44 @@ +resource "google_container_cluster" "primary" { + name = "<%= ctx[:vars]['cluster_name'] %>" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "<%= ctx[:test_env_vars]['project'] %>.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" + network = "<%= ctx[:vars]['network_name'] %>" + subnetwork = "<%= ctx[:vars]['subnetwork_name'] %>" +} + +resource "google_gke_backup_backup_plan" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + cluster = google_container_cluster.primary.id + location = "us-central1" + retention_policy { + backup_delete_lock_days = 30 + backup_retain_days = 180 + } + backup_schedule { + cron_schedule = "0 9 * * 1" + } + backup_config { + include_volume_data = true + include_secrets = true + permissive_mode = true + selected_applications { + namespaced_names { + name = "app1" + namespace = "ns1" + } + namespaced_names { + name = "app2" + namespace = "ns2" + } + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/gkebackup_restoreplan_gitops_mode.tf.erb b/mmv1/templates/terraform/examples/gkebackup_restoreplan_gitops_mode.tf.erb new file mode 100644 index 000000000000..42fc0f25c73e --- /dev/null +++ b/mmv1/templates/terraform/examples/gkebackup_restoreplan_gitops_mode.tf.erb @@ -0,0 +1,43 @@ +resource "google_container_cluster" "primary" { + name = "<%= ctx[:vars]['name'] %>-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "<%= ctx[:test_env_vars]['project'] %>.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" + network = "<%= ctx[:vars]['network_name'] %>" + subnetwork = "<%= ctx[:vars]['subnetwork_name'] %>" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "<%= ctx[:vars]['name'] %>" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "MERGE_SKIP_ON_CONFLICT" + volume_data_restore_policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/gkebackup_restoreplan_restore_order.tf.erb b/mmv1/templates/terraform/examples/gkebackup_restoreplan_restore_order.tf.erb new file mode 100644 index 000000000000..69649f0106cc --- /dev/null +++ b/mmv1/templates/terraform/examples/gkebackup_restoreplan_restore_order.tf.erb @@ -0,0 +1,65 @@ +resource "google_container_cluster" "primary" { + name = "<%= ctx[:vars]['name'] %>-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "<%= ctx[:test_env_vars]['project'] %>.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" + network = "<%= ctx[:vars]['network_name'] %>" + subnetwork = "<%= ctx[:vars]['subnetwork_name'] %>" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "<%= ctx[:vars]['name'] %>" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "FAIL_ON_CONFLICT" + volume_data_restore_policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + restore_order { + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindA" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + } + } + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/gkebackup_restoreplan_volume_res.tf.erb b/mmv1/templates/terraform/examples/gkebackup_restoreplan_volume_res.tf.erb new file mode 100644 index 000000000000..e607fa94d6e1 --- /dev/null +++ b/mmv1/templates/terraform/examples/gkebackup_restoreplan_volume_res.tf.erb @@ -0,0 +1,47 @@ +resource "google_container_cluster" "primary" { + name = "<%= ctx[:vars]['name'] %>-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "<%= ctx[:test_env_vars]['project'] %>.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" + network = "<%= ctx[:vars]['network_name'] %>" + subnetwork = "<%= ctx[:vars]['subnetwork_name'] %>" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "<%= ctx[:vars]['name'] %>" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "FAIL_ON_CONFLICT" + volume_data_restore_policy = "NO_VOLUME_DATA_RESTORATION" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + volume_data_restore_policy_bindings { + policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + volume_type = "GCE_PERSISTENT_DISK" + } + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_backup_plan_test.go.erb b/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_backup_plan_test.go.erb index 8e5781246f10..883ce78ef09d 100644 --- a/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_backup_plan_test.go.erb +++ b/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_backup_plan_test.go.erb @@ -34,6 +34,15 @@ func TestAccGKEBackupBackupPlan_update(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, + { + Config: testAccGKEBackupBackupPlan_permissive(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, { Config: testAccGKEBackupBackupPlan_full(context), }, @@ -109,6 +118,42 @@ resource "google_gke_backup_backup_plan" "backupplan" { `, context) } +func testAccGKEBackupBackupPlan_permissive(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-testcluster%{random_suffix}" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = false + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "backupplan" { + name = "tf-test-testplan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = false + include_secrets = false + all_namespaces = true + permissive_mode = true + } + labels = { + "some-key-1": "some-value-1" + } +} +`, context) +} + func testAccGKEBackupBackupPlan_full(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_container_cluster" "primary" { diff --git a/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_restore_plan_test.go.erb b/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_restore_plan_test.go.erb new file mode 100644 index 000000000000..68e90ba7d81c --- /dev/null +++ b/mmv1/third_party/terraform/services/gkebackup/resource_gke_backup_restore_plan_test.go.erb @@ -0,0 +1,208 @@ + + +<% autogen_exception -%> +package gkebackup_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + + +func TestAccGKEBackupRestorePlan_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "deletion_protection": false, + "network_name": acctest.BootstrapSharedTestNetwork(t, "gke-cluster"), + "subnetwork_name": acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster")), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEBackupRestorePlan_full(context), + }, + { + ResourceName: "google_gke_backup_restore_plan.restore_plan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "terraform_labels"}, + }, + { + Config: testAccGKEBackupRestorePlan_update(context), + }, + { + ResourceName: "google_gke_backup_restore_plan.restore_plan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccGKEBackupRestorePlan_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-restore-plan%{random_suffix}-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "%{deletion_protection}" + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "tf-test-restore-plan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "restore_plan" { + name = "tf-test-restore-plan%{random_suffix}" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "MERGE_SKIP_ON_CONFLICT" + volume_data_restore_policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + restore_order { + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindA" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + } + } + volume_data_restore_policy_bindings { + policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + volume_type = "GCE_PERSISTENT_DISK" + } + } +} +`, context) +} + +func testAccGKEBackupRestorePlan_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-restore-plan%{random_suffix}-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "%{deletion_protection}" + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "tf-test-restore-plan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "restore_plan" { + name = "tf-test-restore-plan%{random_suffix}" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "MERGE_REPLACE_VOLUME_ON_CONFLICT" + volume_data_restore_policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + restore_order { + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindA" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindD" + } + } + } + volume_data_restore_policy_bindings { + policy = "REUSE_VOLUME_HANDLE_FROM_BACKUP" + volume_type = "GCE_PERSISTENT_DISK" + } + } +} +`, context) +} \ No newline at end of file From b96ca8356fd19de4bbd54f2dedee31d9e56a350c Mon Sep 17 00:00:00 2001 From: Mikhail Zholobov Date: Wed, 29 May 2024 19:05:19 +0200 Subject: [PATCH 014/356] doc: Fix the description for attribute skip_initial_version_creation of the resource google_kms_crypto_key (#10014) --- mmv1/products/kms/CryptoKey.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mmv1/products/kms/CryptoKey.yaml b/mmv1/products/kms/CryptoKey.yaml index 2cad986a77fb..3bb03dcf7eb6 100644 --- a/mmv1/products/kms/CryptoKey.yaml +++ b/mmv1/products/kms/CryptoKey.yaml @@ -69,7 +69,8 @@ parameters: name: 'skipInitialVersionCreation' description: | If set to true, the request will create a CryptoKey without any CryptoKeyVersions. - You must use the `google_kms_key_ring_import_job` resource to import the CryptoKeyVersion. + You must use the `google_kms_crypto_key_version` resource to create a new CryptoKeyVersion + or `google_kms_key_ring_import_job` resource to import the CryptoKeyVersion. immutable: true url_param_only: true properties: From e60e70df3eecf73f6a9ba2e8fc6575e45f8246ed Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Wed, 29 May 2024 11:51:27 -0700 Subject: [PATCH 015/356] Reduced VCR nightly recording timeout for tests (#10826) --- .../go-plus/vcr-cassette-update/vcr_cassette_update.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh b/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh index 8baf28e037f7..691430c63fa4 100755 --- a/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh +++ b/.ci/scripts/go-plus/vcr-cassette-update/vcr_cassette_update.sh @@ -115,7 +115,7 @@ if [[ -n $FAILED_TESTS_PATTERN ]]; then FAILED_TESTS=$(grep "^--- FAIL: TestAcc" replaying_test.log | awk '{print $3}') # test_exit_code=0 - timeout 4h parallel --jobs 16 TF_LOG=DEBUG TF_LOG_PATH_MASK=$local_path/testlog/recording/%s.log TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test {1} -parallel 1 -v -run="{2}$" -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc" ">>" testlog/recording_build/{2}_recording_test.log ::: $GOOGLE_TEST_DIRECTORY ::: $FAILED_TESTS + timeout 3h parallel --jobs 16 TF_LOG=DEBUG TF_LOG_PATH_MASK=$local_path/testlog/recording/%s.log TF_ACC=1 TF_SCHEMA_PANIC_ON_ERROR=1 go test {1} -parallel 1 -v -run="{2}$" -timeout 240m -ldflags="-X=github.com/hashicorp/terraform-provider-google-beta/version.ProviderVersion=acc" ">>" testlog/recording_build/{2}_recording_test.log ::: $GOOGLE_TEST_DIRECTORY ::: $FAILED_TESTS test_exit_code=$? @@ -195,4 +195,4 @@ else fi fi -set -e \ No newline at end of file +set -e From b8b4ffee424f838306af817902876337c05aff54 Mon Sep 17 00:00:00 2001 From: delimaneto <167232526+delimaneto@users.noreply.github.com> Date: Wed, 29 May 2024 18:52:02 +0000 Subject: [PATCH 016/356] Add support for `google_compute_node_group` to TGC (#10635) Co-authored-by: Stephen Lewis (Burrows) --- mmv1/templates/tgc/resource_converters.go.erb | 1 + .../example_google_compute_node_group.json | 50 +++++++++++++++++++ .../data/example_google_compute_node_group.tf | 38 ++++++++++++++ 3 files changed, 89 insertions(+) create mode 100644 mmv1/third_party/tgc/tests/data/example_google_compute_node_group.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_compute_node_group.tf diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index 12e0a514ba4c..3a48907fcaaf 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -107,6 +107,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_storage_bucket_iam_policy": {resourceConverterStorageBucketIamPolicy()}, "google_storage_bucket_iam_binding": {resourceConverterStorageBucketIamBinding()}, "google_storage_bucket_iam_member": {resourceConverterStorageBucketIamMember()}, + "google_compute_node_group": {compute.ResourceConverterComputeNodeGroup()}, "google_cloud_tasks_queue": {cloudtasks.ResourceConverterCloudTasksQueue()}, "google_pubsub_topic": {pubsub.ResourceConverterPubsubTopic()}, "google_kms_crypto_key": {kms.ResourceConverterKMSCryptoKey()}, diff --git a/mmv1/third_party/tgc/tests/data/example_google_compute_node_group.json b/mmv1/third_party/tgc/tests/data/example_google_compute_node_group.json new file mode 100644 index 000000000000..55f1d8fec834 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_compute_node_group.json @@ -0,0 +1,50 @@ +[ + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/regions/us-central1/nodeTemplates/soletenant-tmpl", + "asset_type": "compute.googleapis.com/NodeTemplate", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "NodeTemplate", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "cpuOvercommitType": "NONE", + "name": "soletenant-tmpl", + "nodeType": "n1-node-96-624", + "region": "projects/{{.Provider.project}}/global/regions/us-central1" + } + }, + "ancestors": [ + "organizations/{{.OrgID}}" + ] + }, + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-f/nodeGroups/soletenant-group", + "asset_type": "compute.googleapis.com/NodeGroup", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "NodeGroup", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "autoscalingPolicy": { + "maxNodes": 10, + "minNodes": 1, + "mode": "ONLY_SCALE_OUT" + }, + "description": "example google_compute_node_group for Terraform Google Provider", + "maintenancePolicy": "RESTART_IN_PLACE", + "maintenanceWindow": { + "startTime": "08:00" + }, + "name": "soletenant-group", + "zone": "projects/{{.Provider.project}}/global/zones/us-central1-f" + } + }, + "ancestors": [ + "organizations/{{.OrgID}}" + ] + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_compute_node_group.tf b/mmv1/third_party/tgc/tests/data/example_google_compute_node_group.tf new file mode 100644 index 000000000000..7ea33cd000e3 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_compute_node_group.tf @@ -0,0 +1,38 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_compute_node_template" "soletenant-tmpl" { + name = "soletenant-tmpl" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_group" "nodes" { + project = "{{.Provider.project}}" + provider = google-beta + name = "soletenant-group" + zone = "us-central1-f" + description = "example google_compute_node_group for Terraform Google Provider" + maintenance_policy = "RESTART_IN_PLACE" + maintenance_window { + start_time = "08:00" + } + + initial_size = 1 + node_template = google_compute_node_template.soletenant-tmpl.id + autoscaling_policy { + mode = "ONLY_SCALE_OUT" + min_nodes = 1 + max_nodes = 10 + } +} \ No newline at end of file From d60a11db46f4e4911abf32f6d93f9eb5029803bb Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 29 May 2024 15:09:50 -0500 Subject: [PATCH 017/356] Add 6.0.0 upgrade guide template (#10811) --- .../make-a-breaking-change.md | 6 +- .../guides/version_6_upgrade.html.markdown | 109 ++++++++++++++++++ 2 files changed, 112 insertions(+), 3 deletions(-) create mode 100644 mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown diff --git a/docs/content/develop/breaking-changes/make-a-breaking-change.md b/docs/content/develop/breaking-changes/make-a-breaking-change.md index b9f28f9f860c..a7ead0be56bf 100644 --- a/docs/content/develop/breaking-changes/make-a-breaking-change.md +++ b/docs/content/develop/breaking-changes/make-a-breaking-change.md @@ -1,6 +1,6 @@ --- -majorVersion: "5.0.0" -upgradeGuide: "version_5_upgrade.html.markdown" +majorVersion: "6.0.0" +upgradeGuide: "version_6_upgrade.html.markdown" title: "Make a breaking change" summary: "Guidance on making a breaking changes" weight: 20 @@ -177,7 +177,7 @@ Entries should focus on the changes that users need to make when upgrading to `{{% param "majorVersion" %}}`, rather than how to write configurations after upgrading. -See [Terraform provider for Google Cloud 4.0.0 Upgrade Guide](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version_4_upgrade) +See [Terraform provider for Google Cloud 5.0.0 Upgrade Guide](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version_5_upgrade) and other upgrade guides for examples. The upgrade guide and the actual breaking change will be merged only after both are completed. diff --git a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown new file mode 100644 index 000000000000..48044048fffc --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown @@ -0,0 +1,109 @@ +--- +page_title: "Terraform Google Provider 6.0.0 Upgrade Guide" +description: |- + Terraform Google Provider 6.0.0 Upgrade Guide +--- + +# Terraform Google Provider 6.0.0 Upgrade Guide + +The `6.0.0` release of the Google provider for Terraform is a major version and +includes some changes that you will need to consider when upgrading. This guide +is intended to help with that process and focuses only on the changes necessary +to upgrade from the final `5.X` series release to `6.0.0`. + +Most of the changes outlined in this guide have been previously marked as +deprecated in the Terraform `plan`/`apply` output throughout previous provider +releases, up to and including the final `5.X` series release. These changes, +such as deprecation notices, can always be found in the CHANGELOG of the +affected providers. [google](https://github.com/hashicorp/terraform-provider-google/blob/main/CHANGELOG.md) +[google-beta](https://github.com/hashicorp/terraform-provider-google-beta/blob/main/CHANGELOG.md) + +## I accidentally upgraded to 6.0.0, how do I downgrade to `5.X`? + +If you've inadvertently upgraded to `6.0.0`, first see the +[Provider Version Configuration Guide](#provider-version-configuration) to lock +your provider version; if you've constrained the provider to a lower version +such as shown in the previous version example in that guide, Terraform will pull +in a `5.X` series release on `terraform init`. + +If you've only ran `terraform init` or `terraform plan`, your state will not +have been modified and downgrading your provider is sufficient. + +If you've ran `terraform refresh` or `terraform apply`, Terraform may have made +state changes in the meantime. + +* If you're using a local state, or a remote state backend that does not support +versioning, `terraform refresh` with a downgraded provider is likely sufficient +to revert your state. The Google provider generally refreshes most state +information from the API, and the properties necessary to do so have been left +unchanged. + +* If you're using a remote state backend that supports versioning such as +[Google Cloud Storage](https://developer.hashicorp.com/terraform/language/settings/backends/gcs), +you can revert the Terraform state file to a previous version. If you do +so and Terraform had created resources as part of a `terraform apply` in the +meantime, you'll need to either delete them by hand or `terraform import` them +so Terraform knows to manage them. + +## Provider Version Configuration + +-> Before upgrading to version 6.0.0, it is recommended to upgrade to the most +recent `5.X` series release of the provider, make the changes noted in this guide, +and ensure that your environment successfully runs +[`terraform plan`](https://developer.hashicorp.com/terraform/cli/commands/plan) +without unexpected changes or deprecation notices. + +It is recommended to use [version constraints](https://developer.hashicorp.com/terraform/language/providers/requirements#requiring-providers) +when configuring Terraform providers. If you are following that recommendation, +update the version constraints in your Terraform configuration and run +[`terraform init`](https://developer.hashicorp.com/terraform/cli/commands/init) to download +the new version. + +If you aren't using version constraints, you can use `terraform init -upgrade` +in order to upgrade your provider to the latest released version. + +For example, given this previous configuration: + +```hcl +terraform { + required_providers { + google = { + version = "~> 5.30.0" + } + } +} +``` + +An updated configuration: + +```hcl +terraform { + required_providers { + google = { + version = "~> 6.0.0" + } + } +} +``` + +## Provider + +### Provider-level change example header + +Description of the change and how users should adjust their configuration (if needed). + +## Datasources + +## Datasource: `google_product_datasource` + +### Datasource-level change example header + +Description of the change and how users should adjust their configuration (if needed). + +## Resources + +## Resource: `google_product_resource` + +### Resource-level change example header + +Description of the change and how users should adjust their configuration (if needed). \ No newline at end of file From 1a1e62129ec2a6171019fcde13eea97c801c7e4b Mon Sep 17 00:00:00 2001 From: Gustavo Kotarsky <167570715+kotarsky@users.noreply.github.com> Date: Wed, 29 May 2024 20:32:37 +0000 Subject: [PATCH 018/356] Add support for google_compute_url_map to TGC (#10540) --- mmv1/templates/tgc/resource_converters.go.erb | 1 + .../tests/data/example_compute_url_map.json | 19 ++++++++++ .../tgc/tests/data/example_compute_url_map.tf | 20 ++++++++++ ...mple_google_compute_target_http_proxy.json | 37 +++++++++++++++++++ 4 files changed, 77 insertions(+) create mode 100644 mmv1/third_party/tgc/tests/data/example_compute_url_map.json create mode 100644 mmv1/third_party/tgc/tests/data/example_compute_url_map.tf diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index 3a48907fcaaf..e5068b51c248 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -60,6 +60,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_compute_subnetwork": {compute.ResourceConverterComputeSubnetwork()}, "google_compute_ssl_policy": {compute.ResourceConverterComputeSslPolicy()}, "google_compute_ssl_certificate": {compute.ResourceConverterComputeSslCertificate()}, + "google_compute_url_map": {compute.ResourceConverterComputeUrlMap()}, "google_compute_target_http_proxy": {compute.ResourceConverterComputeTargetHttpProxy()}, "google_compute_target_https_proxy": {compute.ResourceConverterComputeTargetHttpsProxy()}, "google_compute_target_ssl_proxy": {compute.ResourceConverterComputeTargetSslProxy()}, diff --git a/mmv1/third_party/tgc/tests/data/example_compute_url_map.json b/mmv1/third_party/tgc/tests/data/example_compute_url_map.json new file mode 100644 index 000000000000..fa42bbbfc670 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_compute_url_map.json @@ -0,0 +1,19 @@ +[ + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/global/urlMaps/urlmap", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "asset_type": "compute.googleapis.com/UrlMap", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "UrlMap", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "defaultService": "projects/{{.Provider.project}}/global/backendServices/default_service", + "description": "a description", + "name": "urlmap" + } + }, + "ancestors": ["organizations/{{.OrgID}}"] + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_compute_url_map.tf b/mmv1/third_party/tgc/tests/data/example_compute_url_map.tf new file mode 100644 index 000000000000..56ce3fcefb40 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_compute_url_map.tf @@ -0,0 +1,20 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_compute_url_map" "urlmap" { + name = "urlmap" + description = "a description" + + default_service = "default_service" + +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_compute_target_http_proxy.json b/mmv1/third_party/tgc/tests/data/example_google_compute_target_http_proxy.json index 9835b902692d..5c0c8d1ad48b 100644 --- a/mmv1/third_party/tgc/tests/data/example_google_compute_target_http_proxy.json +++ b/mmv1/third_party/tgc/tests/data/example_google_compute_target_http_proxy.json @@ -15,5 +15,42 @@ "ancestors": [ "organizations/{{.OrgID}}" ] + }, + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/global/urlMaps/url-map", + "asset_type": "compute.googleapis.com/UrlMap", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "UrlMap", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "hostRules": [ + { + "hosts": [ + "mysite.com" + ], + "pathMatcher": "allpaths" + } + ], + "name": "url-map", + "pathMatchers": [ + { + "name": "allpaths", + "pathRules": [ + { + "paths": [ + "/*" + ] + } + ] + } + ] + } + }, + "ancestors": [ + "organizations/{{.OrgID}}" + ] } ] \ No newline at end of file From 36e829bdebed31cc09dbf3b2a484d75ba3a21baf Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 29 May 2024 16:27:44 -0500 Subject: [PATCH 019/356] Fix universe_domain for ADC and access token auth cases (#10517) Co-authored-by: Riley Karson --- .../terraform/provider/provider.go.erb | 44 +++++-------- .../terraform/transport/config.go.erb | 63 ++++++++++++++----- 2 files changed, 63 insertions(+), 44 deletions(-) diff --git a/mmv1/third_party/terraform/provider/provider.go.erb b/mmv1/third_party/terraform/provider/provider.go.erb index fd3b7927461b..2400c070bc7d 100644 --- a/mmv1/third_party/terraform/provider/provider.go.erb +++ b/mmv1/third_party/terraform/provider/provider.go.erb @@ -3,7 +3,6 @@ package provider import ( "context" - "encoding/json" "fmt" "os" "strings" @@ -279,35 +278,11 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr }) } - // set universe_domain based on the service account key file. - if config.Credentials != "" { - contents, _, err := verify.PathOrContents(config.Credentials) - if err != nil { - return nil, diag.FromErr(fmt.Errorf("error loading service account credentials: %s", err)) - } - var content map[string]any - - if err := json.Unmarshal([]byte(contents), &content); err != nil { - return nil, diag.FromErr(err) - } - - if content["universe_domain"] != nil { - config.UniverseDomain = content["universe_domain"].(string) - } - } - - // Check if the user provided a value from the universe_domain field other than the default - if v, ok := d.GetOk("universe_domain"); ok && v.(string) != "googleapis.com" { - if config.UniverseDomain == "" { - return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' supplied directly to Terraform with no matching universe domain in credentials. Credentials with no 'universe_domain' set are assumed to be in the default universe.", v)) - } else if v.(string) != config.UniverseDomain { - if _, err := os.Stat(config.Credentials); err == nil { - return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' does not match the universe domain '%s' already set in the credential file '%s'. The 'universe_domain' provider configuration can not be used to override the universe domain that is defined in the active credential. Set the 'universe_domain' provider configuration when universe domain information is not already available in the credential, e.g. when authenticating with a JWT token.", v, config.UniverseDomain, config.Credentials)) - } else { - return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' does not match the universe domain '%s' supplied directly to Terraform. The 'universe_domain' provider configuration can not be used to override the universe domain that is defined in the active credential. Set the 'universe_domain' provider configuration when universe domain information is not already available in the credential, e.g. when authenticating with a JWT token.", v, config.UniverseDomain)) - } - } + // Set the universe domain to the configured value, if any + if v, ok := d.GetOk("universe_domain"); ok { + config.UniverseDomain = v.(string) } + // Configure DCL basePath transport_tpg.ProviderDCLConfigure(d, &config) @@ -406,6 +381,17 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr return nil, diag.FromErr(err) } + // Verify that universe domains match between credentials and configuration + if v, ok := d.GetOk("universe_domain"); ok { + if config.UniverseDomain == "" && v.(string) != "googleapis.com" { // v can't be "", as it wouldn't pass `ok` above + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' supplied directly to Terraform with no matching universe domain in credentials. Credentials with no 'universe_domain' set are assumed to be in the default universe.", v)) + } else if v.(string) != config.UniverseDomain && !(config.UniverseDomain == "" && v.(string) == "googleapis.com") { + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: '%s' does not match the universe domain '%s' supplied directly to Terraform. The 'universe_domain' provider configuration must match the universe domain supplied by credentials.", config.UniverseDomain, v)) + } + } else if config.UniverseDomain != "" && config.UniverseDomain != "googleapis.com" { + return nil, diag.FromErr(fmt.Errorf("Universe domain mismatch: Universe domain '%s' was found in credentials without a corresponding 'universe_domain' provider configuration set. Please set 'universe_domain' to '%s' or use different credentials.", config.UniverseDomain, config.UniverseDomain)) + } + return &config, nil } diff --git a/mmv1/third_party/terraform/transport/config.go.erb b/mmv1/third_party/terraform/transport/config.go.erb index 3bace4f1d380..7c9667635655 100644 --- a/mmv1/third_party/terraform/transport/config.go.erb +++ b/mmv1/third_party/terraform/transport/config.go.erb @@ -1136,6 +1136,7 @@ type StaticTokenSource struct { // If initialCredentialsOnly is true, don't follow the impersonation settings and return the initial set of creds // instead. func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bool) (googleoauth.Credentials, error) { + // UniverseDomain is assumed to be the previously set provider-configured value for access tokens if c.AccessToken != "" { contents, _, err := verify.PathOrContents(c.AccessToken) if err != nil { @@ -1159,12 +1160,25 @@ func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bo }, nil } + // UniverseDomain is set by the credential file's "universe_domain" field if c.Credentials != "" { contents, _, err := verify.PathOrContents(c.Credentials) if err != nil { return googleoauth.Credentials{}, fmt.Errorf("error loading credentials: %s", err) } + var content map[string]any + if err := json.Unmarshal([]byte(contents), &content); err != nil { + return googleoauth.Credentials{}, fmt.Errorf("error unmarshaling credentials: %s", err) + } + + if content["universe_domain"] != nil { + c.UniverseDomain = content["universe_domain"].(string) + } else { + // Unset UniverseDomain if not found in credentials file + c.UniverseDomain = "" + } + if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { opts := []option.ClientOption{option.WithCredentialsJSON([]byte(contents)), option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...), option.WithScopes(clientScopes...)} creds, err := transport.Creds(context.TODO(), opts...) @@ -1194,33 +1208,52 @@ func (c *Config) GetCredentials(clientScopes []string, initialCredentialsOnly bo } } + var creds *googleoauth.Credentials + var err error if c.ImpersonateServiceAccount != "" && !initialCredentialsOnly { opts := option.ImpersonateCredentials(c.ImpersonateServiceAccount, c.ImpersonateServiceAccountDelegates...) - creds, err := transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) + creds, err = transport.Creds(context.TODO(), opts, option.WithScopes(clientScopes...)) if err != nil { return googleoauth.Credentials{}, err } + } else { + log.Printf("[INFO] Authenticating using DefaultClient...") + log.Printf("[INFO] -- Scopes: %s", clientScopes) - return *creds, nil + if c.UniverseDomain != "" && c.UniverseDomain != "googleapis.com" { + log.Printf("[INFO] -- Sending JwtWithScope option") + creds, err = transport.Creds(context.Background(), option.WithScopes(clientScopes...), internaloption.EnableJwtWithScope()) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) + } + } else { + creds, err = transport.Creds(context.Background(), option.WithScopes(clientScopes...)) + if err != nil { + return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) + } + } } - log.Printf("[INFO] Authenticating using DefaultClient...") - log.Printf("[INFO] -- Scopes: %s", clientScopes) - - if c.UniverseDomain != "" && c.UniverseDomain != "googleapis.com" { - log.Printf("[INFO] -- Sending JwtWithScope option") - creds, err := transport.Creds(context.Background(), option.WithScopes(clientScopes...), internaloption.EnableJwtWithScope()) + if creds.JSON != nil { + var content map[string]any + if err := json.Unmarshal([]byte(creds.JSON), &content); err != nil { + log.Printf("[WARN] error unmarshaling credentials, skipping Universe Domain detection") + c.UniverseDomain = "" + } else if content["universe_domain"] != nil { + c.UniverseDomain = content["universe_domain"].(string) + } else { + // Unset UniverseDomain if not found in ADC credentials file + c.UniverseDomain = "" + } + } else { + // creds.GetUniverseDomain may retrieve a domain from the metadata server + ud, err := creds.GetUniverseDomain() if err != nil { - return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) + log.Printf("[WARN] Error retrieving universe domain: %s", err) } - return *creds, nil + c.UniverseDomain = ud } - creds, err := transport.Creds(context.Background(), option.WithScopes(clientScopes...)) - if err != nil { - return googleoauth.Credentials{}, fmt.Errorf("Attempted to load application default credentials since neither `credentials` nor `access_token` was set in the provider block. No credentials loaded. To use your gcloud credentials, run 'gcloud auth application-default login'. Original error: %w", err) - } - return *creds, nil } From 1c8ae12dd8d210da0059465e09015c252408f2b4 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 29 May 2024 14:28:55 -0700 Subject: [PATCH 020/356] Convert iam test template with Go (#10824) --- mmv1/api/resource.go | 81 ++- mmv1/api/resource/iam_policy.go | 8 +- mmv1/provider/template_data.go | 5 + .../datasource_iam.html.markdown.tmpl | 2 +- .../base_configs/iam_test_file.go.tmpl | 674 ++++++++++++++++++ mmv1/templates/terraform/iam_policy.go.tmpl | 4 +- .../terraform/resource_iam.html.markdown.tmpl | 2 +- 7 files changed, 764 insertions(+), 12 deletions(-) create mode 100644 mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index a363e6d05765..a750741ecce9 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1014,6 +1014,9 @@ func (r Resource) IsInIdentity(t Type) bool { return false } +// ==================== +// Iam Methods +// ==================== func (r Resource) IamParentResourceName() string { var parentResourceName string @@ -1028,6 +1031,7 @@ func (r Resource) IamParentResourceName() string { return parentResourceName } +// For example: "projects/{{project}}/schemas/{{name}}" func (r Resource) IamResourceUri() string { var resourceUri string if r.IamPolicy != nil { @@ -1039,13 +1043,15 @@ func (r Resource) IamResourceUri() string { return resourceUri } -func (r Resource) IamImportUrl() string { - r.IamResourceUri() +// For example: "projects/%s/schemas/%s" +func (r Resource) IamResourceUriFormat() string { return regexp.MustCompile(`\{\{%?(\w+)\}\}`).ReplaceAllString(r.IamResourceUri(), "%s") } +// For example: the uri "projects/{{project}}/schemas/{{name}}" +// The paramerters are "project", "schema". func (r Resource) IamResourceParams() []string { - resourceUri := strings.ReplaceAll(r.IamResourceUri(), "{{name}}", fmt.Sprintf("{{%s}}}", r.IamParentResourceName())) + resourceUri := strings.ReplaceAll(r.IamResourceUri(), "{{name}}", fmt.Sprintf("{{%s}}", r.IamParentResourceName())) return r.ExtractIdentifiers(resourceUri) } @@ -1054,7 +1060,9 @@ func (r Resource) IsInIamResourceParams(param string) bool { return slices.Contains(r.IamResourceParams(), param) } -func (r Resource) IamStringQualifiers() string { +// For example: for the uri "projects/{{project}}/schemas/{{name}}", +// the string qualifiers are "u.project, u.schema" +func (r Resource) IamResourceUriStringQualifiers() string { var transformed []string for _, param := range r.IamResourceParams() { transformed = append(transformed, fmt.Sprintf("u.%s", google.Camelize(param, "lower"))) @@ -1062,6 +1070,8 @@ func (r Resource) IamStringQualifiers() string { return strings.Join(transformed[:], ", ") } +// For example, for the url "projects/{{project}}/schemas/{{schema}}", +// the identifiers are "project", "schema". // def extract_identifiers(url) func (r Resource) ExtractIdentifiers(url string) []string { matches := regexp.MustCompile(`\{\{%?(\w+)\}\}`).FindAllStringSubmatch(url, -1) @@ -1072,6 +1082,7 @@ func (r Resource) ExtractIdentifiers(url string) []string { return result } +// For example, "projects/{{project}}/schemas/{{name}}", "{{project}}/{{name}}", "{{name}}" func (r Resource) RawImportIdFormatsFromIam() []string { var importFormat []string @@ -1085,6 +1096,7 @@ func (r Resource) RawImportIdFormatsFromIam() []string { return ImportIdFormats(importFormat, r.Identity, r.BaseUrl) } +// For example, projects/(?P[^/]+)/schemas/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+) func (r Resource) ImportIdRegexesFromIam() string { var transformed []string @@ -1098,6 +1110,7 @@ func (r Resource) ImportIdRegexesFromIam() string { return strings.Join(transformed[:], "\", \"") } +// For example, "projects/{{project}}/schemas/{{name}}", "{{project}}/{{name}}", "{{name}}" func (r Resource) ImportIdFormatsFromIam() []string { importIdFormats := r.RawImportIdFormatsFromIam() var transformed []string @@ -1107,6 +1120,7 @@ func (r Resource) ImportIdFormatsFromIam() []string { return transformed } +// For example, projects/{{project}}/schemas/{{schema}} func (r Resource) FirstIamImportIdFormat() string { importIdFormats := r.ImportIdFormatsFromIam() if len(importIdFormats) == 0 { @@ -1133,6 +1147,7 @@ func (r Resource) IamSelfLinkIdentifiers() []string { return r.ExtractIdentifiers(selfLink) } +// Returns the resource properties that are idenfifires in the selflink url func (r Resource) IamSelfLinkProperties() []*Type { params := r.IamSelfLinkIdentifiers() @@ -1143,6 +1158,7 @@ func (r Resource) IamSelfLinkProperties() []*Type { return urlProperties } +// Returns the attributes from the selflink url func (r Resource) IamAttributes() []string { var attributes []string ids := r.IamSelfLinkIdentifiers() @@ -1161,6 +1177,19 @@ func (r Resource) IamAttributes() []string { return attributes } +// Since most resources define a "basic" config as their first example, +// we can reuse that config to create a resource to test IAM resources with. +func (r Resource) FirstTestExample() resource.Examples { + examples := google.Reject(r.Examples, func(e resource.Examples) bool { + return e.SkipTest + }) + examples = google.Reject(examples, func(e resource.Examples) bool { + return (r.ProductMetadata.VersionObjOrClosest(r.TargetVersionName).CompareTo(r.ProductMetadata.VersionObjOrClosest(e.MinVersion)) < 0) + }) + + return examples[0] +} + func (r Resource) ExamplePrimaryResourceId() string { examples := google.Reject(r.Examples, func(e resource.Examples) bool { return e.SkipTest @@ -1185,6 +1214,50 @@ func (r Resource) IamParentSourceType() string { return t } +func (r Resource) IamImportQualifiersForTest() string { + var importFormat string + if len(r.IamPolicy.ImportFormat) > 0 { + importFormat = r.IamPolicy.ImportFormat[0] + } else { + importFormat = r.IamPolicy.SelfLink + if importFormat == "" { + importFormat = r.SelfLinkUrl() + } + } + + params := r.ExtractIdentifiers(importFormat) + var importQualifiers []string + for i, param := range params { + if param == "project" { + if i != len(params)-1 { + // If the last parameter is project then we want to create a new project to use for the test, so don't default from the environment + importQualifiers = append(importQualifiers, "envvar.GetTestProjectFromEnv()") + } else { + importQualifiers = append(importQualifiers, `context["project_id"]`) + } + } else if param == "zone" && r.IamPolicy.SubstituteZoneValue { + importQualifiers = append(importQualifiers, "envvar.GetTestZoneFromEnv()") + } else if param == "region" || param == "location" { + example := r.FirstTestExample() + if example.RegionOverride == "" { + importQualifiers = append(importQualifiers, "envvar.GetTestRegionFromEnv()") + } else { + importQualifiers = append(importQualifiers, example.RegionOverride) + } + } else if param == "universe_domain" { + importQualifiers = append(importQualifiers, "envvar.GetTestUniverseDomainFromEnv()") + } else { + break + } + } + + if len(importQualifiers) == 0 { + return "" + } + + return strings.Join(importQualifiers, ", ") +} + func OrderProperties(props []*Type) []*Type { req := google.Select(props, func(p *Type) bool { return p.Required diff --git a/mmv1/api/resource/iam_policy.go b/mmv1/api/resource/iam_policy.go index 84f90f7e0c8f..812e18ab8170 100644 --- a/mmv1/api/resource/iam_policy.go +++ b/mmv1/api/resource/iam_policy.go @@ -40,9 +40,9 @@ type IamPolicy struct { // While Compute subnetwork uses {resource}/getIamPolicy MethodNameSeparator string `yaml:"method_name_separator"` - // The terraform type of the parent resource if it is not the same as the - // IAM resource. The IAP product needs these as its IAM policies refer - // to compute resources + // The terraform type (e.g. 'google_endpoints_service') of the parent resource + // if it is not the same as the IAM resource. The IAP product needs these + // as its IAM policies refer to compute resources. ParentResourceType string `yaml:"parent_resource_type"` // Some resources allow retrieving the IAM policy with GET requests, @@ -84,7 +84,7 @@ type IamPolicy struct { // Some resources (IAP) use fields named differently from the parent resource. // We need to use the parent's attributes to create an IAM policy, but they may not be - // named as the IAM IAM resource expects. + // named as the IAM resource expects. // This allows us to specify a file (relative to MM root) containing a partial terraform // config with the test/example attributes of the IAM resource. ExampleConfigBody string `yaml:"example_config_body"` diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index ba75aea2e1e4..63ab416c88f8 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -185,6 +185,11 @@ func (td *TemplateData) GenerateIamDatasourceDocumentationFile(filePath string, } func (td *TemplateData) GenerateIamPolicyTestFile(filePath string, resource api.Resource) { + templatePath := "templates/terraform/examples/base_configs/iam_test_file.go.tmpl" + templates := []string{ + templatePath, + } + td.GenerateFile(filePath, templatePath, resource, false, templates...) } func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, goFormat bool, templates ...string) { diff --git a/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl b/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl index 4bc96a6a7851..db0d361cc133 100644 --- a/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl @@ -69,7 +69,7 @@ data "{{ $.IamTerraformName }}_policy" "policy" { {{ if eq $.MinVersionObj.Name "beta" }} provider = google-beta {{- end }} -{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody}} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} } ``` diff --git a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl new file mode 100644 index 000000000000..044f937ee1cc --- /dev/null +++ b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl @@ -0,0 +1,674 @@ +{{/* <% if hc_downstream */}} +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package {{ lower $.ProductMetadata.Name }}_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "{{ $.ImportPath }}/acctest" + "{{ $.ImportPath }}/envvar" + "{{ $.ImportPath }}/tpgresource" +) +{{ $example := $.FirstTestExample }} +func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { + t.Parallel() + +{{/* iam_context.go.erb */}} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + {{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, + {{- end }} +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamBinding_basicGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_binding.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + { + // Test Iam Binding update + Config: testAcc{{ $.ResourceName }}IamBinding_updateGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_binding.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +func TestAcc{{ $.ResourceName }}IamMemberGenerated(t *testing.T) { + t.Parallel() + +{{/* iam_context.go.erb */}} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAcc{{ $.ResourceName }}IamMember_basicGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_member.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { + t.Parallel() + +{{- if $.IamPolicy.AdminIamRole }} + // This may skip test, so do it first + sa := envvar.GetTestServiceAccountFromEnv(t) +{{- end }} +{{/* iam_context.go.erb */}} +{{- if $.IamPolicy.AdminIamRole }} + context["service_account"] = sa +{{- end }} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamPolicy_basicGenerated(context), + Check: resource.TestCheckResourceAttrSet("data.{{ $.IamTerraformName }}_policy.foo", "policy_data"), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_policy.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + { + Config: testAcc{{ $.ResourceName }}IamPolicy_emptyBinding(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_policy.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +{{- if $.IamPolicy.IamConditionsRequestType }} +func TestAcc{{ $.ResourceName }}IamBindingGenerated_withCondition(t *testing.T) { + t.Parallel() + +{{/* iam_context.go.erb */}} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamBinding_withConditionGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_binding.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +func TestAcc{{ $.ResourceName }}IamBindingGenerated_withAndWithoutCondition(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + +{{/* iam_context.go.erb */}} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamBinding_withAndWithoutConditionGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_binding.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "{{ $.IamTerraformName }}_binding.foo2", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "{{ $.IamTerraformName }}_binding.foo3", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +func TestAcc{{ $.ResourceName }}IamMemberGenerated_withCondition(t *testing.T) { + t.Parallel() + +{{/* iam_context.go.erb */}} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamMember_withConditionGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_member.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +func TestAcc{{ $.ResourceName }}IamMemberGenerated_withAndWithoutCondition(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + +{{/* iam_context.go.erb */}} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamMember_withAndWithoutConditionGenerated(context), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_member.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "{{ $.IamTerraformName }}_member.foo2", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "{{ $.IamTerraformName }}_member.foo3", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} + +func TestAcc{{ $.ResourceName }}IamPolicyGenerated_withCondition(t *testing.T) { + t.Parallel() + +{{- if $.IamPolicy.AdminIamRole }} + // This may skip test, so do it first + sa := envvar.GetTestServiceAccountFromEnv(t) +{{- end }} +{{/* iam_context.go.erb */}} +{{- if $.IamPolicy.AdminIamRole }} + context["service_account"] = sa +{{- end }} + +{{- if $.IamPolicy.AdminIamRole }} + // Test should have 2 bindings: one with a description and one without. Any < chars are converted to a unicode character by the API. + expectedPolicyData := acctest.Nprintf(`{"bindings":[{"condition":{"description":"%{condition_desc}","expression":"%{condition_expr}","title":"%{condition_title}"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"},{"condition":{"expression":"%{condition_expr}","title":"%{condition_title}-no-description"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"}]}`, context) +{{- else }} + // Test should have 3 bindings: one with a description and one without, and a third for an admin role. Any < chars are converted to a unicode character by the API. + expectedPolicyData := acctest.Nprintf(`{"bindings":[{"members":["serviceAccount:%{service_account}"],"role":"%{admin_role}"},{"condition":{"description":"%{condition_desc}","expression":"%{condition_expr}","title":"%{condition_title}"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"},{"condition":{"expression":"%{condition_expr}","title":"%{condition_title}-no-description"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"}]}`, context) +{{- end }} + expectedPolicyData = strings.Replace(expectedPolicyData, "<", "\\u003c", -1) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, +{{- if eq $.MinVersionObj.Name "beta" }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), +{{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +{{- end }} +{{- if $example.ExternalProviders }} + ExternalProviders: map[string]resource.ExternalProvider{ + {{- range $provider := $example.ExternalProviders }} + "{{$provider}}": {}, + {{- end }} + }, +{{- end }} + Steps: []resource.TestStep{ + { + Config: testAcc{{ $.ResourceName }}IamPolicy_withConditionGenerated(context), + Check: resource.ComposeAggregateTestCheckFunc( + // TODO(SarahFrench) - uncomment once https://github.com/GoogleCloudPlatform/magic-modules/pull/6466 merged + // resource.TestCheckResourceAttr("data.google_iam_policy.foo", "policy_data", expectedPolicyData), + resource.TestCheckResourceAttr("{{ $.IamTerraformName }}_policy.foo", "policy_data", expectedPolicyData), + resource.TestCheckResourceAttrWith("data.google_iam_policy.foo", "policy_data", tpgresource.CheckGoogleIamPolicy), + ), + }, +{{- if not $.IamPolicy.SkipImportTest }} + { + ResourceName: "{{ $.IamTerraformName }}_policy.foo", + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportState: true, + ImportStateVerify: true, + }, +{{- end }} + }, + }) +} +{{- end }} + +func testAcc{{ $.ResourceName }}IamMember_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_member" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamPolicy_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +data "google_iam_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +{{- if $.IamPolicy.AdminIamRole }} + binding { + role = "%{admin_role}" + members = ["serviceAccount:%{service_account}"] + } +{{- end }} +} + +resource "{{ $.IamTerraformName }}_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + policy_data = data.google_iam_policy.foo.policy_data +} + +data "{{ $.IamTerraformName }}_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + depends_on = [ + {{ $.IamTerraformName }}_policy.foo + ] +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +data "google_iam_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +} + +resource "{{ $.IamTerraformName }}_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamBinding_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_binding" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamBinding_updateGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_binding" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} + +{{- if $.IamPolicy.IamConditionsRequestType }} +func testAcc{{ $.ResourceName }}IamBinding_withConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_binding" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamBinding_withAndWithoutConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_binding" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} + +resource "{{ $.IamTerraformName }}_binding" "foo2" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} + +resource "{{ $.IamTerraformName }}_binding" "foo3" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + # Check that lack of description doesn't cause any issues + # Relates to issue : https://github.com/hashicorp/terraform-provider-google/issues/8701 + title = "%{condition_title_no_desc}" + expression = "%{condition_expr_no_desc}" + } +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamMember_withConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_member" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + member = "user:admin@hashicorptest.com" + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamMember_withAndWithoutConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +resource "{{ $.IamTerraformName }}_member" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + member = "user:admin@hashicorptest.com" +} + +resource "{{ $.IamTerraformName }}_member" "foo2" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + member = "user:admin@hashicorptest.com" + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} + +resource "{{ $.IamTerraformName }}_member" "foo3" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + role = "%{role}" + member = "user:admin@hashicorptest.com" + condition { + # Check that lack of description doesn't cause any issues + # Relates to issue : https://github.com/hashicorp/terraform-provider-google/issues/8701 + title = "%{condition_title_no_desc}" + expression = "%{condition_expr_no_desc}" + } +} +`, context) +} + +func testAcc{{ $.ResourceName }}IamPolicy_withConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +{{/* example.config_test_body */}} + +data "google_iam_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + # Check that lack of description doesn't cause any issues + # Relates to issue : https://github.com/hashicorp/terraform-provider-google/issues/8701 + title = "%{condition_title_no_desc}" + expression = "%{condition_expr_no_desc}" + } + } + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } + } +{{- if $.IamPolicy.AdminIamRole }} + binding { + role = "%{admin_role}" + members = ["serviceAccount:%{service_account}"] + } +{{- end }} +} + +resource "{{ $.IamTerraformName }}_policy" "foo" { +{{- if eq $.MinVersionObj.Name "beta" }} + provider = google-beta +{{- end }} +{{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} +{{- end }} diff --git a/mmv1/templates/terraform/iam_policy.go.tmpl b/mmv1/templates/terraform/iam_policy.go.tmpl index 5979b02aab47..1c476b0631c6 100644 --- a/mmv1/templates/terraform/iam_policy.go.tmpl +++ b/mmv1/templates/terraform/iam_policy.go.tmpl @@ -309,7 +309,7 @@ func (u *{{ $.ResourceName }}IamUpdater) SetResourceIamPolicy(policy *cloudresou } func (u *{{ $.ResourceName }}IamUpdater) qualify{{ $.Name }}Url(methodIdentifier string) (string, error) { - urlTemplate := fmt.Sprintf("{{"{{"}}{{ $.ProductMetadata.Name }}BasePath{{"}}"}}%s{{ $.IamPolicy.MethodNameSeparator }}%s", fmt.Sprintf("{{ $.IamImportUrl }}", {{ $.IamStringQualifiers }}), methodIdentifier) + urlTemplate := fmt.Sprintf("{{"{{"}}{{ $.ProductMetadata.Name }}BasePath{{"}}"}}%s{{ $.IamPolicy.MethodNameSeparator }}%s", fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamResourceUriStringQualifiers }}), methodIdentifier) url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) if err != nil { return "", err @@ -318,7 +318,7 @@ func (u *{{ $.ResourceName }}IamUpdater) qualify{{ $.Name }}Url(methodIdentifier } func (u *{{ $.ResourceName }}IamUpdater) GetResourceId() string { - return fmt.Sprintf("{{ $.IamImportUrl }}", {{ $.IamStringQualifiers }}) + return fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamResourceUriStringQualifiers }}) } func (u *{{ $.ResourceName }}IamUpdater) GetMutexKey() string { diff --git a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl index bd98136f76da..88bd230b9359 100644 --- a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl @@ -204,7 +204,7 @@ The following arguments are supported: {{ range $param := $.IamSelfLinkProperties }} {{- if eq $param.Name "name" -}} -* `{{if $.IamPolicy.ParentResourceAttribute}}{{$.IamPolicy.ParentResourceAttribute}}{{else}}{{underscore $.Name}}{{end}}` - (Required) Used to find the parent resource to bind the IAM policy to +* `{{ $.IamParentResourceName }}` - (Required) Used to find the parent resource to bind the IAM policy to {{- else if or (eq (underscore $param.Name) "region") (eq (underscore $param.Name) "zone") }} * `{{ underscore $param.Name }}` - (Optional) {{ $param.Description }} Used to find the parent resource to bind the IAM policy to. If not specified, the value will be parsed from the identifier of the parent resource. If no {{ underscore $param.Name }} is provided in the parent identifier and no From 382815b0f9e28398d840b7ed503c97a811280f42 Mon Sep 17 00:00:00 2001 From: SizzleHsu Date: Wed, 29 May 2024 21:33:01 +0000 Subject: [PATCH 021/356] Add support for specifying storage pool when creating a disk. (#10793) --- mmv1/products/compute/Disk.yaml | 11 ++ .../compute/resource_compute_disk_test.go.erb | 109 ++++++++++++++++++ 2 files changed, 120 insertions(+) diff --git a/mmv1/products/compute/Disk.yaml b/mmv1/products/compute/Disk.yaml index 686bc6fcbd04..c848552852f3 100644 --- a/mmv1/products/compute/Disk.yaml +++ b/mmv1/products/compute/Disk.yaml @@ -513,3 +513,14 @@ properties: resource: 'License' imports: 'selfLink' custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' + - !ruby/object:Api::Type::String + name: 'storagePool' + required: false + immutable: true + description: | + The URL of the storage pool in which the new disk is created. + For example: + * https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool} + * /projects/{project}/zones/{zone}/storagePools/{storagePool} + diff_suppress_func: 'tpgresource.CompareResourceNames' + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb index cab535e74b9b..d116108ebd69 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb @@ -3,14 +3,17 @@ package compute_test import ( "fmt" + "net/http" "os" "testing" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" <% if version == "ga" -%> "google.golang.org/api/compute/v1" @@ -1516,3 +1519,109 @@ resource "google_compute_disk" "foobar" { `, add, strategy, diskName) } +func TestAccComputeDisk_storagePoolSpecified(t *testing.T) { + t.Parallel() + + storagePoolName := fmt.Sprintf("tf-test-storage-pool-%s", acctest.RandString(t, 10)) + storagePoolUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePools/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), storagePoolName) + diskName := fmt.Sprintf("tf-test-disk-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + PreConfig: setupTestingStoragePool(t, storagePoolName), + Config: testAccComputeDisk_storagePoolSpecified(diskName, storagePoolUrl), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "storage_pool", storagePoolName), + ), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) + + cleanupTestingStoragePool(t, storagePoolName) +} + +func setupTestingStoragePool(t *testing.T, storagePoolName string) func() { + return func() { + config := acctest.GoogleProviderConfig(t) + headers := make(http.Header) + project := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, project, zone) + storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/hyperdisk-throughput", project, zone) + defaultTimeout := 20 * time.Minute + obj := make(map[string]interface{}) + obj["name"] = storagePoolName + obj["poolProvisionedCapacityGb"] = 10240 + obj["poolProvisionedThroughput"] = 180 + obj["storagePoolType"] = storagePoolTypeUrl + obj["capacityProvisioningType"] = "ADVANCED" + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: config.UserAgent, + Body: obj, + Timeout: defaultTimeout, + Headers: headers, + }) + if err != nil { + t.Errorf("Error creating StoragePool: %s", err) + } + + err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Creating StoragePool", config.UserAgent, defaultTimeout) + if err != nil { + t.Errorf("Error waiting to create StoragePool: %s", err) + } + } +} + +func cleanupTestingStoragePool(t *testing.T, storagePoolName string) { + config := acctest.GoogleProviderConfig(t) + headers := make(http.Header) + project := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools/%s", config.ComputeBasePath, project, zone, storagePoolName) + defaultTimeout := 20 * time.Minute + var obj map[string]interface{} + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: config.UserAgent, + Body: obj, + Timeout: defaultTimeout, + Headers: headers, + }) + if err != nil { + t.Errorf("Error deleting StoragePool: %s", err) + } + + err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Deleting StoragePool", config.UserAgent, defaultTimeout) + if err != nil { + t.Errorf("Error waiting to delete StoragePool: %s", err) + } +} + +func testAccComputeDisk_storagePoolSpecified(diskName, storagePoolUrl string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + type = "hyperdisk-throughput" + size = 2048 + provisioned_throughput = 140 + storage_pool = "%s" +} +`, diskName, storagePoolUrl) +} From 229dc05f8aa1deb55c20080dd47489aa70490779 Mon Sep 17 00:00:00 2001 From: Feng Zhe Date: Wed, 29 May 2024 14:34:50 -0700 Subject: [PATCH 022/356] Add deprecation notice for require_ssl field (#10797) --- .../services/sql/resource_sql_database_instance.go.erb | 3 ++- .../website/docs/r/sql_database_instance.html.markdown | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb index 47d160befc20..825f8c2c5adc 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb @@ -443,6 +443,7 @@ is set to true. Defaults to ZONAL.`, Optional: true, AtLeastOneOf: ipConfigurationKeys, Description: `Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode if it has been set too.`, + Deprecated: "`require_ssl` will be fully deprecated in a future major release. For now, please use `ssl_mode` with a compatible `require_ssl` value instead.", }, "private_network": { Type: schema.TypeString, @@ -492,7 +493,7 @@ is set to true. Defaults to ZONAL.`, Optional: true, Computed: true, ValidateFunc: validation.StringInSlice([]string{"ALLOW_UNENCRYPTED_AND_ENCRYPTED", "ENCRYPTED_ONLY", "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"}, false), - Description: `Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to require_ssl. To change this field, also set the correspoding value in require_ssl.`, + Description: `Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to require_ssl. To change this field, also set the correspoding value in require_ssl until next major release.`, AtLeastOneOf: ipConfigurationKeys, }, }, diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index 0be73458f650..caea50a3601b 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -373,7 +373,7 @@ Specifying a network enables private IP. At least `ipv4_enabled` must be enabled or a `private_network` must be configured. This setting can be updated, but it cannot be removed after it is set. -* `require_ssl` - (Optional) Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in `ssl_mode`. +* `require_ssl` - (Optional, Deprecated) Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in `ssl_mode`. It will be fully deprecated in a future major release. For now, please use `ssl_mode` with a compatible `require_ssl` value instead. * `ssl_mode` - (Optional) Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to `require_ssl`. To change this field, also set the correspoding value in `require_ssl`. * For PostgreSQL instances, the value pairs are listed in the [API reference doc](https://cloud.google.com/sql/docs/postgres/admin-api/rest/v1beta4/instances#ipconfiguration) for `ssl_mode` field. From 65036311d09c8aa076d92a72e23ed78d89f358c8 Mon Sep 17 00:00:00 2001 From: chayan kumar roy Date: Wed, 29 May 2024 22:15:15 +0000 Subject: [PATCH 023/356] Add standby_policy along with suspended / stopped target size. (#10776) --- ...urce_compute_instance_group_manager.go.erb | 104 ++++++++++++ ...compute_instance_group_manager_test.go.erb | 130 +++++++++++++++ ...mpute_region_instance_group_manager.go.erb | 78 +++++++++ ..._region_instance_group_manager_test.go.erb | 148 ++++++++++++++++++ ...mpute_instance_group_manager.html.markdown | 37 +++++ ...egion_instance_group_manager.html.markdown | 36 +++++ 6 files changed, 533 insertions(+) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb index 3f84c5a81488..043806126e5a 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb @@ -216,6 +216,49 @@ func ResourceComputeInstanceGroupManager() *schema.Resource { }, }, + <% unless version == "ga" -%> + "standby_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Standby policy for stopped and suspended instances.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_delay_sec": { + Computed: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.`, + }, + + "mode": { + Computed: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"MANUAL", "SCALE_OUT_POOL"}, true), + Description: `Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is "MANUAL".`, + }, + }, + }, + }, + + "target_suspended_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of suspended instances for this managed instance group.`, + }, + + "target_stopped_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of stopped instances for this managed instance group.`, + }, + <% end -%> + "update_policy": { Computed: true, Type: schema.TypeList, @@ -609,6 +652,11 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte TargetPools: tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)), AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), + <% unless version == "ga" -%> + StandbyPolicy: expandStandbyPolicy(d), + TargetSuspendedSize: int64(d.Get("target_suspended_size").(int)), + TargetStoppedSize: int64(d.Get("target_stopped_size").(int)), + <% end -%> UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), @@ -832,6 +880,17 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { return err } + <% unless version == "ga" -%> + if err = d.Set("standby_policy", flattenStandbyPolicy(manager.StandbyPolicy)); err != nil { + return fmt.Errorf("Error setting standby_policy in state: %s", err.Error()) + } + if err := d.Set("target_suspended_size", manager.TargetSuspendedSize); err != nil { + return fmt.Errorf("Error setting target_suspended_size: %s", err) + } + if err := d.Set("target_stopped_size", manager.TargetStoppedSize); err != nil { + return fmt.Errorf("Error setting target_stopped_size: %s", err) + } + <% end -%> if err = d.Set("update_policy", flattenUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } @@ -903,6 +962,25 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte change = true } + <% unless version == "ga" -%> + if d.HasChange("standby_policy") { + updatedManager.StandbyPolicy = expandStandbyPolicy(d) + change = true + } + + if d.HasChange("target_suspended_size") { + updatedManager.TargetSuspendedSize = int64(d.Get("target_suspended_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetSuspendedSize") + change = true + } + + if d.HasChange("target_stopped_size") { + updatedManager.TargetStoppedSize = int64(d.Get("target_stopped_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetStoppedSize") + change = true + } + <% end -%> + if d.HasChange("update_policy") { updatedManager.UpdatePolicy = expandUpdatePolicy(d.Get("update_policy").([]interface{})) change = true @@ -1227,6 +1305,19 @@ func expandInstanceLifecyclePolicy(configured []interface{}) *compute.InstanceGr return instanceLifecyclePolicy } +<% unless version == "ga" -%> +func expandStandbyPolicy(d *schema.ResourceData) *compute.InstanceGroupManagerStandbyPolicy { + standbyPolicy := &compute.InstanceGroupManagerStandbyPolicy{} + for _, sp := range d.Get("standby_policy").([]any) { + spData := sp.(map[string]any) + standbyPolicy.InitialDelaySec = int64(spData["initial_delay_sec"].(int)) + standbyPolicy.ForceSendFields = []string{"InitialDelaySec"} + standbyPolicy.Mode = spData["mode"].(string) + } + return standbyPolicy +} +<% end -%> + func expandUpdatePolicy(configured []interface{}) *compute.InstanceGroupManagerUpdatePolicy { updatePolicy := &compute.InstanceGroupManagerUpdatePolicy{} @@ -1363,6 +1454,19 @@ func flattenStatefulPolicyStatefulIps(d *schema.ResourceData, ipfieldName string return sorted } +<% unless version == "ga" -%> +func flattenStandbyPolicy(standbyPolicy *compute.InstanceGroupManagerStandbyPolicy) []map[string]any{ + results := []map[string]any{} + if standbyPolicy != nil { + sp := map[string]any{} + sp["initial_delay_sec"] = standbyPolicy.InitialDelaySec + sp["mode"] = standbyPolicy.Mode + results = append(results, sp) + } + return results +} +<% end -%> + func flattenUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdatePolicy) []map[string]interface{} { results := []map[string]interface{}{} if updatePolicy != nil { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager_test.go.erb index ab1cf72bf58a..e7ad65047b09 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager_test.go.erb @@ -392,6 +392,42 @@ func TestAccInstanceGroupManager_stateful(t *testing.T) { }) } +<% unless version == "ga" -%> +func TestAccInstanceGroupManager_stoppedSuspendedTargetSize(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_stoppedSuspendedTargetSize(template, network, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(template, network, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} +<% end -%> + func TestAccInstanceGroupManager_waitForStatus(t *testing.T) { t.Parallel() @@ -1705,6 +1741,100 @@ resource "google_compute_instance_group_manager" "igm-basic" { `, network, template, target, igm) } +<% unless version == "ga" -%> +func testAccInstanceGroupManager_stoppedSuspendedTargetSize(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + zone = "us-central1-c" + target_size = 2 + standby_policy { + initial_delay_sec = 20 + mode = "MANUAL" + } + target_suspended_size = 2 + target_stopped_size = 1 +} +`, network, template, igm) +} + +func testAccInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + zone = "us-central1-c" + target_size = 2 + standby_policy { + mode = "SCALE_OUT_POOL" + } + target_suspended_size = 1 +} +`, network, template, igm) +} +<% end -%> + func testAccInstanceGroupManager_waitForStatus(template, target, igm, perInstanceConfig string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb index 548f447c61b8..e6d363fd1fe8 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb @@ -281,6 +281,49 @@ func ResourceComputeRegionInstanceGroupManager() *schema.Resource { }, }, + <% unless version == "ga" -%> + "standby_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Description: `Standby policy for stopped and suspended instances.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_delay_sec": { + Computed: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.`, + }, + + "mode": { + Computed: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"MANUAL", "SCALE_OUT_POOL"}, true), + Description: `Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is "MANUAL".`, + }, + }, + }, + }, + + "target_suspended_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of suspended instances for this managed instance group.`, + }, + + "target_stopped_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of stopped instances for this managed instance group.`, + }, + <% end -%> + "update_policy": { Type: schema.TypeList, Computed: true, @@ -583,6 +626,11 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met TargetPools: tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)), AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), Versions: expandVersions(d.Get("version").([]interface{})), + <% unless version == "ga" -%> + StandbyPolicy: expandStandbyPolicy(d), + TargetSuspendedSize: int64(d.Get("target_suspended_size").(int)), + TargetStoppedSize: int64(d.Get("target_stopped_size").(int)), + <% end -%> UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), @@ -775,6 +823,17 @@ func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { return err } + <% unless version == "ga" -%> + if err = d.Set("standby_policy", flattenStandbyPolicy(manager.StandbyPolicy)); err != nil { + return fmt.Errorf("Error setting standby_policy in state: %s", err.Error()) + } + if err := d.Set("target_suspended_size", manager.TargetSuspendedSize); err != nil { + return fmt.Errorf("Error setting target_suspended_size: %s", err) + } + if err := d.Set("target_stopped_size", manager.TargetStoppedSize); err != nil { + return fmt.Errorf("Error setting target_stopped_size: %s", err) + } + <% end -%> if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil { return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) } @@ -853,6 +912,25 @@ func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, met change = true } + <% unless version == "ga" -%> + if d.HasChange("standby_policy") { + updatedManager.StandbyPolicy = expandStandbyPolicy(d) + change = true + } + + if d.HasChange("target_suspended_size") { + updatedManager.TargetSuspendedSize = int64(d.Get("target_suspended_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetSuspendedSize") + change = true + } + + if d.HasChange("target_stopped_size") { + updatedManager.TargetStoppedSize = int64(d.Get("target_stopped_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetStoppedSize") + change = true + } + <% end -%> + if d.HasChange("update_policy") { updatedManager.UpdatePolicy = expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})) change = true diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb index a74e7c0d4bad..80d62f7435cc 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager_test.go.erb @@ -377,6 +377,43 @@ func TestAccRegionInstanceGroupManager_stateful(t *testing.T) { }) } + +<% unless version == "ga" -%> +func TestAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} +<% end -%> + func TestAccRegionInstanceGroupManager_APISideListRecordering(t *testing.T) { t.Parallel() @@ -1715,6 +1752,117 @@ resource "google_compute_region_instance_group_manager" "igm-basic" { `, context) } +<% unless version == "ga" -%> +func testAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + region = "us-central1" + target_size = 2 + distribution_policy_target_shape = "ANY_SINGLE_ZONE" + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + standby_policy { + initial_delay_sec = 20 + mode = "SCALE_OUT_POOL" + } + target_suspended_size = 2 + target_stopped_size = 1 +} +`, network, template, igm) +} + +func testAccRegionInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + region = "us-central1" + target_size = 2 + distribution_policy_target_shape = "ANY_SINGLE_ZONE" + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + standby_policy { + initial_delay_sec = 30 + } + target_suspended_size = 1 + target_stopped_size = 2 +} +`, network, template, igm) +} +<% end -%> + <% unless version == "ga" -%> func testAccRegionInstanceGroupManager_resourceManagerTags(template_name, tag_name, igm_name, project_id string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown index abdbd20091c3..7f59c534c5a4 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_group_manager.html.markdown @@ -89,6 +89,31 @@ resource "google_compute_instance_group_manager" "appserver" { } ``` +## Example Usage with standby policy (`google-beta` provider) +```hcl +resource "google_compute_instance_group_manager" "igm-sr" { + provider = google-beta + name = "tf-sr-igm" + + base_instance_name = "tf-sr-igm-instance" + zone = "us-central1-a" + + target_size = 5 + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + standby_policy { + initial_delay_sec = 30 + mode = "MANUAL" + } + target_suspended_size = 2 + target_stopped_size = 1 +} +``` + ## Argument Reference The following arguments are supported: @@ -158,6 +183,12 @@ group. You can specify only one value. Structure is [documented below](#nested_a allInstancesConfig on the group, you must update the group's instances to apply the configuration. +* `standby_policy` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/suspended-and-stopped-vms-in-mig) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch) + +* `target_suspended_size` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The target number of suspended instances for this managed instance group. + +* `target_stopped_size` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The target number of stopped instances for this managed instance group. + * `stateful_disk` - (Optional) Disks created on the instances that will be preserved on instance delete, update, etc. Structure is [documented below](#nested_stateful_disk). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). * `stateful_internal_ip` - (Optional) Internal network IPs assigned to the instances that will be preserved on instance delete, update, etc. This map is keyed with the network interface name. Structure is [documented below](#nested_stateful_internal_ip). @@ -170,6 +201,12 @@ group. You can specify only one value. Structure is [documented below](#nested_a - - - +The `standby_policy` block supports: + +* `initial_delay_sec` - (Optional) - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. +* `mode` - (Optional) - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: `MANUAL`, `SCALE_OUT_POOL`. If `MANUAL`(default), you have full control over which VMs are stopped and suspended in the MIG. If `SCALE_OUT_POOL`, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. +- - - + The `update_policy` block supports: ```hcl diff --git a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown index 7ca78744a793..4a4e7be0e559 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_group_manager.html.markdown @@ -91,6 +91,30 @@ resource "google_compute_region_instance_group_manager" "appserver" { } } ``` +## Example Usage with standby policy (`google-beta` provider) +```hcl +resource "google_compute_region_instance_group_manager" "igm-sr" { + provider = google-beta + name = "tf-sr-igm" + + base_instance_name = "tf-sr-igm-instance" + region = "us-central1" + + target_size = 5 + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + standby_policy { + initial_delay_sec = 50 + mode = "SCALE_OUT_POOL" + } + target_suspended_size = 1 + target_stopped_size = 1 +} +``` ## Argument Reference @@ -160,6 +184,12 @@ group. You can specify only one value. Structure is documented below. For more i allInstancesConfig on the group, you must update the group's instances to apply the configuration. +* `standby_policy` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The standby policy for stopped and suspended instances. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/suspended-and-stopped-vms-in-mig) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch) + +* `target_suspended_size` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The target number of suspended instances for this managed instance group. + +* `target_stopped_size` - (Optional [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) The target number of stopped instances for this managed instance group. + * `update_policy` - (Optional) The update policy for this managed instance group. Structure is [documented below](#nested_update_policy). For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch) * `distribution_policy_zones` - (Optional) The distribution policy for this managed instance @@ -177,6 +207,12 @@ group. You can specify one or more values. For more information, see the [offici - - - +The `standby_policy` block supports: + +* `initial_delay_sec` - (Optional) - Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0. +* `mode` - (Optional) - Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. Valid options are: `MANUAL`, `SCALE_OUT_POOL`. If `MANUAL`(default), you have full control over which VMs are stopped and suspended in the MIG. If `SCALE_OUT_POOL`, the MIG uses the VMs from the standby pools to accelerate the scale out by resuming or starting them and then automatically replenishes the standby pool with new VMs to maintain the target sizes. +- - - + The `update_policy` block supports: ```hcl From ebf018ad38d3905fa11e83c3d695b15e9b191d1d Mon Sep 17 00:00:00 2001 From: Feng Zhe Date: Thu, 30 May 2024 10:08:43 -0700 Subject: [PATCH 024/356] Add 6.0.0 upgrade guide for the deprecated `require_ssl` field. (#10833) --- .../website/docs/guides/version_6_upgrade.html.markdown | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown index 48044048fffc..b0530361aeb0 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown @@ -106,4 +106,10 @@ Description of the change and how users should adjust their configuration (if ne ### Resource-level change example header -Description of the change and how users should adjust their configuration (if needed). \ No newline at end of file +Description of the change and how users should adjust their configuration (if needed). + +## Resource: `google_sql_database_instance` + +### `settings.ip_configuration.require_ssl` is now removed + +Removed in favor of field `settings.ip_configuration.ssl_mode`. From fb80d263894fc7a9339554714841f4a9560bd870 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Thu, 30 May 2024 10:54:53 -0700 Subject: [PATCH 025/356] Go push jobs (#10659) Co-authored-by: Stephen Lewis (Burrows) --- .ci/gcb-push-downstream.yml | 60 +++++++++++--------------- .ci/magician/cmd/sync_branch.go | 67 +++++++++++++++++++++++++++++ .ci/magician/cmd/wait_for_commit.go | 21 ++++++--- 3 files changed, 107 insertions(+), 41 deletions(-) create mode 100644 .ci/magician/cmd/sync_branch.go diff --git a/.ci/gcb-push-downstream.yml b/.ci/gcb-push-downstream.yml index 3794feccf8bd..c2081f601a6f 100644 --- a/.ci/gcb-push-downstream.yml +++ b/.ci/gcb-push-downstream.yml @@ -88,18 +88,15 @@ steps: - 'ga' - $COMMIT_SHA - - name: 'gcr.io/cloud-builders/git' + - name: 'gcr.io/graphite-docker-images/go-plus' waitFor: ["tpg-push"] secretEnv: ["GITHUB_TOKEN_CLASSIC"] - entrypoint: 'bash' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' args: - - -c - - | - if [ "$BRANCH_NAME" == "main" ]; then - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tpg-sync - else - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tpg-sync-$BRANCH_NAME - fi + - 'sync-branch' + - 'tpg-sync' + - $BRANCH_NAME + - $COMMIT_SHA # TPGB - name: 'gcr.io/graphite-docker-images/build-environment' @@ -116,18 +113,15 @@ steps: - 'beta' - $COMMIT_SHA - - name: 'gcr.io/cloud-builders/git' + - name: 'gcr.io/graphite-docker-images/go-plus' waitFor: ["tpgb-push"] secretEnv: ["GITHUB_TOKEN_CLASSIC"] - entrypoint: 'bash' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' args: - - -c - - | - if [ "$BRANCH_NAME" == "main" ]; then - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tpgb-sync - else - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tpgb-sync-$BRANCH_NAME - fi + - 'sync-branch' + - 'tpgb-sync' + - $BRANCH_NAME + - $COMMIT_SHA # TGC - name: 'gcr.io/graphite-docker-images/build-environment' @@ -144,18 +138,15 @@ steps: - 'beta' - $COMMIT_SHA - - name: 'gcr.io/cloud-builders/git' + - name: 'gcr.io/graphite-docker-images/go-plus' waitFor: ["tgc-push"] secretEnv: ["GITHUB_TOKEN_CLASSIC"] - entrypoint: 'bash' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' args: - - -c - - | - if [ "$BRANCH_NAME" == "main" ]; then - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tgc-sync - else - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tgc-sync-$BRANCH_NAME - fi + - 'sync-branch' + - 'tgc-sync' + - $BRANCH_NAME + - $COMMIT_SHA # TF-OICS - name: 'gcr.io/graphite-docker-images/build-environment' @@ -172,18 +163,15 @@ steps: - 'beta' - $COMMIT_SHA - - name: 'gcr.io/cloud-builders/git' + - name: 'gcr.io/graphite-docker-images/go-plus' waitFor: ["tf-oics-push"] secretEnv: ["GITHUB_TOKEN_CLASSIC"] - entrypoint: 'bash' + entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' args: - - -c - - | - if [ "$BRANCH_NAME" == "main" ]; then - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tf-oics-sync - else - git push https://modular-magician:$$GITHUB_TOKEN_CLASSIC@github.com/GoogleCloudPlatform/magic-modules $COMMIT_SHA:tf-oics-sync-$BRANCH_NAME - fi + - 'sync-branch' + - 'tf-oics-sync' + - $BRANCH_NAME + - $COMMIT_SHA - name: 'gcr.io/graphite-docker-images/go-plus' entrypoint: '/workspace/.ci/scripts/go-plus/magician/exec.sh' diff --git a/.ci/magician/cmd/sync_branch.go b/.ci/magician/cmd/sync_branch.go new file mode 100644 index 000000000000..7722cc4f4c83 --- /dev/null +++ b/.ci/magician/cmd/sync_branch.go @@ -0,0 +1,67 @@ +/* +* Copyright 2024 Google LLC. All Rights Reserved. +* +* Licensed under the Apache License, Version 2.0 (the "License"); +* you may not use this file except in compliance with the License. +* You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. + */ +package cmd + +import ( + "fmt" + "magician/exec" + "magician/source" + "os" + + "github.com/spf13/cobra" +) + +var syncBranchCmd = &cobra.Command{ + Use: "sync-branch", + Short: "Push the given commit to the given sync branch", + Long: `This command updates the given sync branch with the given commit SHA. + + It expects the following parameters: + 1. SYNC_BRANCH_PREFIX + 2. BASE_BRANCH + 3. SHA + + It also expects the following environment variables: + 1. GITHUB_TOKEN_CLASSIC`, + RunE: func(cmd *cobra.Command, args []string) error { + syncBranchPrefix := args[0] + baseBranch := args[1] + sha := args[2] + + githubToken, ok := os.LookupEnv("GITHUB_TOKEN_CLASSIC") + if !ok { + return fmt.Errorf("did not provide GITHUB_TOKEN_CLASSIC environment variable") + } + + rnr, err := exec.NewRunner() + if err != nil { + return fmt.Errorf("error creating Runner: %s", err) + } + return execSyncBranchCmd(syncBranchPrefix, baseBranch, sha, githubToken, rnr) + }, +} + +func execSyncBranchCmd(syncBranchPrefix, baseBranch, sha, githubToken string, runner source.Runner) error { + syncBranch := getSyncBranch(syncBranchPrefix, baseBranch) + fmt.Println("SYNC_BRANCH: ", syncBranch) + + _, err := runner.Run("git", []string{"push", fmt.Sprintf("https://modular-magician:%s@github.com/GoogleCloudPlatform/magic-modules", githubToken), fmt.Sprintf("%s:%s", sha, syncBranch)}, nil) + return err +} + +func init() { + rootCmd.AddCommand(syncBranchCmd) +} diff --git a/.ci/magician/cmd/wait_for_commit.go b/.ci/magician/cmd/wait_for_commit.go index ae25dc153b2e..7aec749f2449 100644 --- a/.ci/magician/cmd/wait_for_commit.go +++ b/.ci/magician/cmd/wait_for_commit.go @@ -46,13 +46,10 @@ var waitFunc = func() { } func execWaitForCommit(syncBranchPrefix, baseBranch, sha string, runner source.Runner) error { - syncBranch := syncBranchPrefix + "-" + baseBranch - if baseBranch == "main" { - syncBranch = syncBranchPrefix - } + syncBranch := getSyncBranch(syncBranchPrefix, baseBranch) fmt.Println("SYNC_BRANCH: ", syncBranch) - if _, err := runner.Run("git", []string{"merge-base", "--is-ancestor", sha, "origin/" + syncBranch}, nil); err == nil { + if syncBranchHasCommit(sha, syncBranch, runner) { return fmt.Errorf("found %s in history of %s - dying to avoid double-generating that commit", sha, syncBranch) } @@ -97,6 +94,20 @@ func execWaitForCommit(syncBranchPrefix, baseBranch, sha string, runner source.R } } +func getSyncBranch(syncBranchPrefix, baseBranch string) string { + if baseBranch == "main" { + return syncBranchPrefix + } + return fmt.Sprintf("%s-%s", syncBranchPrefix, baseBranch) +} + +func syncBranchHasCommit(sha, syncBranch string, runner source.Runner) bool { + if _, err := runner.Run("git", []string{"merge-base", "--is-ancestor", sha, "origin/" + syncBranch}, nil); err == nil { + return true + } + return false +} + func gitRevParse(target string, runner source.Runner) (string, error) { return runner.Run("git", []string{"rev-parse", "--short", target}, nil) } From 64ac7343f38762501c3e6e40f409e99353eeb8b2 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Thu, 30 May 2024 14:47:25 -0500 Subject: [PATCH 026/356] Temporarily remove instructions for feature branch work (#10831) --- .../make-a-breaking-change.md | 37 +++---------------- 1 file changed, 6 insertions(+), 31 deletions(-) diff --git a/docs/content/develop/breaking-changes/make-a-breaking-change.md b/docs/content/develop/breaking-changes/make-a-breaking-change.md index a7ead0be56bf..7fe9526760fd 100644 --- a/docs/content/develop/breaking-changes/make-a-breaking-change.md +++ b/docs/content/develop/breaking-changes/make-a-breaking-change.md @@ -63,7 +63,7 @@ The general process for contributing a breaking change to the 1. Make the `main` branch forwards-compatible with the major release 2. Add deprecations and warnings to the `main` branch of `magic-modules` 3. Add upgrade guide entries to the `main` branch of `magic-modules` -4. Make the breaking change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` +4. Make the breaking change on ~~`FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}`~~ `main` temporarily These are covered in more detail in the following sections. The upgrade guide and the actual breaking change will be merged only after both are completed. @@ -184,36 +184,11 @@ The upgrade guide and the actual breaking change will be merged only after both ### Make the breaking change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` -When working on your breaking change, make sure that your base branch -is `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}`. This -means that you will follow the standard -[contribution process]({{< ref "/get-started/contribution-process" >}}) -with the following changes: - -1. Before you start, check out and sync your local `magic-modules` and provider - repositories with the upstream major release branches. - ```bash - cd ~/magic-modules - git checkout FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} - git pull --ff-only origin FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} - cd $GOPATH/src/github.com/hashicorp/terraform-provider-google - git checkout FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} - git pull --ff-only origin FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} - cd $GOPATH/src/github.com/hashicorp/terraform-provider-google-beta - git checkout FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} - git pull --ff-only origin FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} - ``` -1. Make sure that any deprecation notices and warnings that you added in previous sections - are present on the major release branch. Changes to the `main` branch will be - merged into the major release branch every Monday. -1. Make the breaking change. -1. Remove any deprecation notices and warnings (including in documentation) not already removed by the breaking change. -1. When you create your pull request, - [change the base branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-base-branch-of-a-pull-request) - to `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` -1. To resolve merge conflicts with `git rebase` or `git merge`, use `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` instead of `main`. - -The upgrade guide and the actual breaking change will be merged only after both are completed. +> [!CAUTION] +> `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` is not yet ready. If you want to make your +> breaking change ahead of time (possibly for early review), please submit a PR on `main` with the title prefix "6.0.0 - ". +> Ensure that a Github Issue is created as per all PR's, and our team will manually switch your PR over to +> `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} when it is ready. ## What's next? From fd55ea7f75759d64fb2874ba8528e6ef0632e0a1 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 30 May 2024 14:48:46 -0700 Subject: [PATCH 027/356] Add async type when converting yaml files (#10832) --- mmv1/products/datafusion/go_instance.yaml | 2 +- mmv1/products/pubsub/go_Schema.yaml | 2 +- mmv1/products/pubsub/go_Subscription.yaml | 2 +- mmv1/products/pubsub/go_Topic.yaml | 2 +- mmv1/templates/terraform/yaml_conversion.erb | 2 ++ 5 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mmv1/products/datafusion/go_instance.yaml b/mmv1/products/datafusion/go_instance.yaml index 952b4ccb7a29..fecffa4352c2 100644 --- a/mmv1/products/datafusion/go_instance.yaml +++ b/mmv1/products/datafusion/go_instance.yaml @@ -30,7 +30,7 @@ timeouts: delete_minutes: 50 autogen_async: true async: - type: "OpAsync" + type: 'OpAsync' operation: base_url: '{{op_id}}' path: 'name' diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index 463cac379b16..c8642d31b3bc 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -32,7 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: - type: "PollAsync" + type: 'PollAsync' check_response_func_existence: 'transport_tpg.PollCheckForExistence' check_response_func_absence: 'transport_tpg.PollCheckForAbsence' suppress_error: false diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 141f00def5af..6475ecb3fc16 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -35,7 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: - type: "PollAsync" + type: 'PollAsync' check_response_func_existence: 'transport_tpg.PollCheckForExistence' suppress_error: true actions: ['create'] diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index 229a9bbe29cf..f22cb68f4d7e 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -34,7 +34,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: - type: "PollAsync" + type: 'PollAsync' check_response_func_existence: 'transport_tpg.PollCheckForExistence' check_response_func_absence: 'transport_tpg.PollCheckForAbsence' suppress_error: true diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 9420dd3f85fd..63497552b8b7 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -168,6 +168,7 @@ autogen_async: <%= object.autogen_async %> <% unless object.async.nil? -%> async: <% if object.async.is_a? Provider::Terraform::PollAsync -%> + type: 'PollAsync' <% unless object.async.check_response_func_existence.nil? -%> check_response_func_existence: '<%= object.async.check_response_func_existence %>' <% end -%> @@ -185,6 +186,7 @@ async: <% end -%> <% end -%> <% if object.async.is_a? Api::OpAsync -%> + type: 'OpAsync' <% #async.operation %> <% unless object.async.operation.nil? -%> operation: From 78c55099c2b854c121c17080a66bdcba68d91065 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Thu, 30 May 2024 17:05:43 -0500 Subject: [PATCH 028/356] go rewrite - fix some more documentation diffs (#10834) --- mmv1/api/resource.go | 12 ++ mmv1/api/resource/examples.go | 142 ++++-------------- mmv1/products/pubsub/go_Subscription.yaml | 6 +- mmv1/products/pubsub/go_Topic.yaml | 6 +- mmv1/provider/template_data.go | 29 ++-- .../property_documentation.html.markdown.tmpl | 26 ++-- mmv1/templates/terraform/resource.go.tmpl | 2 +- .../terraform/resource.html.markdown.tmpl | 11 +- 8 files changed, 79 insertions(+), 155 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index a750741ecce9..6d9ad9bdf4c7 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1293,6 +1293,18 @@ func (r Resource) GetPropertyUpdateMasksGroups() map[string][]string { return maskGroups } +// Formats whitespace in the style of the old Ruby generator's descriptions in documentation +func FormatDocDescription(desc string) string { + returnString := strings.ReplaceAll(desc, "\n\n", "\n") + + returnString = strings.ReplaceAll(returnString, "\n", "\n ") + + // fix removing for ruby -> go transition diffs + returnString = strings.ReplaceAll(returnString, "\n \n **Note**: This field is non-authoritative,", "\n\n **Note**: This field is non-authoritative,") + + return strings.TrimSuffix(returnString, "\n ") +} + func (r Resource) CustomTemplate(templatePath string, appendNewline bool) string { return resource.ExecuteTemplate(&r, templatePath, appendNewline) } diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index d5a5dde96cf8..7c6c45530140 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -179,13 +179,33 @@ func (e *Examples) UnmarshalYAML(n *yaml.Node) error { // Executes example templates for documentation and tests func (e *Examples) SetHCLText() { - e.DocumentationHCLText = ExecuteTemplate(e, e.ConfigPath, true) + docCopy := e + testCopy := e + docs_defaults := map[string]string{ + "PROJECT_NAME": "my-project-name", + "CREDENTIALS": "my/credentials/filename.json", + "REGION": "us-west1", + "ORG_ID": "123456789", + "ORG_DOMAIN": "example.com", + "ORG_TARGET": "123456789", + "BILLING_ACCT": "000000-0000000-0000000-000000", + "MASTER_BILLING_ACCT": "000000-0000000-0000000-000000", + "SERVICE_ACCT": "my@service-account.com", + "CUST_ID": "A01b123xz", + "IDENTITY_USER": "cloud_identity_user", + "PAP_DESCRIPTION": "description", + } + + // Apply doc defaults to test_env_vars from YAML + for key := range docCopy.TestEnvVars { + docCopy.TestEnvVars[key] = docs_defaults[docCopy.TestEnvVars[key]] + } + e.DocumentationHCLText = ExecuteTemplate(docCopy, docCopy.ConfigPath, true) - copy := e // Override vars to inject test values into configs - will have // - "a-example-var-value%{random_suffix}"" // - "%{my_var}" for overrides that have custom Golang values - for key, value := range copy.Vars { + for key, value := range testCopy.Vars { var newVal string if strings.Contains(value, "-") { newVal = fmt.Sprintf("tf-test-%s", value) @@ -199,15 +219,15 @@ func (e *Examples) SetHCLText() { if len(newVal) > 54 { newVal = newVal[:54] } - copy.Vars[key] = fmt.Sprintf("%s%%{random_suffix}", newVal) + testCopy.Vars[key] = fmt.Sprintf("%s%%{random_suffix}", newVal) } // Apply overrides from YAML - for key := range copy.TestVarsOverrides { - copy.Vars[key] = fmt.Sprintf("%%{%s}", key) + for key := range testCopy.TestVarsOverrides { + testCopy.Vars[key] = fmt.Sprintf("%%{%s}", key) } - e.TestHCLText = ExecuteTemplate(copy, copy.ConfigPath, true) + e.TestHCLText = ExecuteTemplate(testCopy, testCopy.ConfigPath, true) } func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { @@ -235,114 +255,6 @@ func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { return rs } -// func (e *Examples) config_documentation(pwd) { -// docs_defaults = { -// PROJECT_NAME: 'my-project-name', -// CREDENTIALS: 'my/credentials/filename.json', -// REGION: 'us-west1', -// ORG_ID: '123456789', -// ORG_DOMAIN: 'example.com', -// ORG_TARGET: '123456789', -// BILLING_ACCT: '000000-0000000-0000000-000000', -// MASTER_BILLING_ACCT: '000000-0000000-0000000-000000', -// SERVICE_ACCT: 'my@service-account.com', -// CUST_ID: 'A01b123xz', -// IDENTITY_USER: 'cloud_identity_user', -// PAP_DESCRIPTION: 'description' -// } -// @vars ||= {} -// @test_env_vars ||= {} -// body = lines(compile_file( -// { -// vars:, -// test_env_vars: test_env_vars.to_h { |k, v| [k, docs_defaults[v]] }, -// primary_resource_id: -// }, -// "//{pwd}///{config_path}" -// )) - -// // Remove region tags -// body = body.gsub(/// \[[a-zA-Z_ ]+\]\n/, '') -// body = body.gsub(/\n// \[[a-zA-Z_ ]+\]/, '') -// lines(compile_file( -// { content: body }, -// "//{pwd}/templates/terraform/examples/base_configs/documentation.tf.erb" -// )) -// } - -// func (e *Examples) config_test(pwd) { -// body = config_test_body(pwd) -// lines(compile_file( -// { -// content: body -// }, -// "//{pwd}/templates/terraform/examples/base_configs/test_body.go.erb" -// )) -// } - -// rubocop:disable Style/FormatStringToken -// func (e *Examples) config_test_body(pwd) { -// @vars ||= {} -// @test_env_vars ||= {} -// @test_vars_overrides ||= {} - -// // Construct map for vars to inject into config - will have -// // - "a-example-var-value%{random_suffix}"" -// // - "%{my_var}" for overrides that have custom Golang values -// rand_vars = vars.map do |k, v| -// // Some resources only allow underscores. -// testv = if v.include?('-') -// "tf-test-//{v}" -// elsif v.include?('_') -// "tf_test_//{v}" -// else -// // Some vars like descriptions shouldn't have prefix -// v -// end -// // Random suffix is 10 characters and standard name length <= 64 -// testv = "//{testv[0...54]}%{random_suffix}" -// [k, testv] -// end - -// rand_vars = rand_vars.to_h -// overrides = test_vars_overrides.to_h { |k, _| [k, "%{//{k}}"] } -// body = lines(compile_file( -// { -// vars: rand_vars.merge(overrides), -// test_env_vars: test_env_vars.to_h { |k, _| [k, "%{//{k}}"] }, -// primary_resource_id:, -// primary_resource_type: -// }, -// "//{pwd}///{config_path}" -// )) - -// // Remove region tags -// body = body.gsub(/// \[[a-zA-Z_ ]+\]\n/, '') -// body = body.gsub(/\n// \[[a-zA-Z_ ]+\]/, '') -// substitute_test_paths body -// } - -// func (e *Examples) config_oics(pwd) { -// @vars ||= [] -// @oics_vars_overrides ||= {} - -// rand_vars = vars.to_h { |k, str| [k, "//{str}-${local.name_suffix}"] } - -// // Examples with test_env_vars are skipped elsewhere -// body = lines(compile_file( -// { -// vars: rand_vars.merge(oics_vars_overrides), -// primary_resource_id: -// }, -// "//{pwd}///{config_path}" -// )) - -// // Remove region tags -// body = body.gsub(/// \[[a-zA-Z_ ]+\]\n/, '') -// body = body.gsub(/\n// \[[a-zA-Z_ ]+\]/, '') -// substitute_example_paths body -// } - func (e *Examples) OiCSLink() string { v := url.Values{} // TODO Q2: Values.Encode() sorts the values by key alphabetically. This will produce diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 6475ecb3fc16..e21927fbab71 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -22,9 +22,9 @@ references: 'Managing Subscriptions': 'https://cloud.google.com/pubsub/docs/admin#managing_subscriptions' api: 'https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions' docs: - note: 'You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding -by using the `google_project_service_identity` resource. -' + note: | + You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding + by using the `google_project_service_identity` resource. base_url: 'projects/{{project}}/subscriptions' create_verb: 'PUT' update_url: 'projects/{{project}}/subscriptions/{{name}}' diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index f22cb68f4d7e..8bf902dd89c2 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -21,9 +21,9 @@ references: 'Managing Topics': 'https://cloud.google.com/pubsub/docs/admin#managing_topics' api: 'https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics' docs: - note: 'You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding -by using the `google_project_service_identity` resource. -' + note: | + You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding + by using the `google_project_service_identity` resource. base_url: 'projects/{{project}}/topics' create_verb: 'PUT' update_url: 'projects/{{project}}/topics/{{name}}' diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 63ab416c88f8..797064d39689 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -71,20 +71,21 @@ func subtract(a, b int) int { } var TemplateFunctions = template.FuncMap{ - "title": google.SpaceSeparatedTitle, - "replace": strings.Replace, - "camelize": google.Camelize, - "underscore": google.Underscore, - "plural": google.Plural, - "contains": strings.Contains, - "join": strings.Join, - "lower": strings.ToLower, - "upper": strings.ToUpper, - "dict": wrapMultipleParams, - "format2regex": google.Format2Regex, - "orderProperties": api.OrderProperties, - "hasPrefix": strings.HasPrefix, - "sub": subtract, + "title": google.SpaceSeparatedTitle, + "replace": strings.Replace, + "camelize": google.Camelize, + "underscore": google.Underscore, + "plural": google.Plural, + "contains": strings.Contains, + "join": strings.Join, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "dict": wrapMultipleParams, + "format2regex": google.Format2Regex, + "orderProperties": api.OrderProperties, + "hasPrefix": strings.HasPrefix, + "sub": subtract, + "formatDocDescription": api.FormatDocDescription, } var GA_VERSION = "ga" diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index e791ca08df37..60dc8df96638 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -2,36 +2,36 @@ * `{{ underscore $.Name }}` - {{- if and (eq $.MinVersion "beta") (not (eq $.ResourceMetadata.MinVersion "beta")) }} {{- if $.Required }} - (Required, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated {{ end }}) + (Required, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- else if not $.Output }} - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated {{ end }}) + (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- else if and $.Output $.ParentMetadata }} - (Output, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated {{ end }}) + (Output, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- else }} - ([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated {{ end }}) + ([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html){{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- end}} {{- else }} {{- if $.Required }} - (Required{{ if $.DeprecationMessage }}, Deprecated {{ end }}) + (Required{{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- else if not $.Output }} - (Optional{{ if $.DeprecationMessage }}, Deprecated {{ end }}) + (Optional{{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- else if and $.Output $.ParentMetadata }} - (Output{{ if $.DeprecationMessage }}, Deprecated {{ end }}) + (Output{{ if $.DeprecationMessage }}, Deprecated{{ end }}) {{- else if $.DeprecationMessage }} (Deprecated) {{- end}} {{- end }} - {{ $.Description }} + {{ formatDocDescription $.Description -}} {{- if and (and ($.IsA "Array") ($.ItemType.IsA "Enum")) (and (not $.Output) (not $.ItemType.SkipDocsValues))}} - {{- if not (or $.ItemType.DefaultValue (eq $.ItemType.DefaultValue "")) }} - Default value is [`{{ $.ItemType.DefaultValue }}`]. + {{- if $.ItemType.DefaultValue }} + Default value is `{{ $.ItemType.DefaultValue }}`. {{- end }} Each value may be one of: {{ $.ItemType.EnumValuesToString "`" false }}. {{- else if and ($.IsA "Enum") (and (not $.Output) (not (and $.ItemType $.ItemType.SkipDocsValues)))}} - {{- if not (or $.DefaultValue (eq $.DefaultValue "")) }} - Default value is [`{{ $.DefaultValue }}`]. + {{- if $.DefaultValue }} + Default value is `{{ $.DefaultValue }}`. {{- end }} - Possible values are: {{ $.EnumValuesToString "`" false }}. + Possible values are: {{ $.EnumValuesToString "`" false }}. {{- end }} {{- if $.Sensitive }} **Note**: This property is sensitive and will not be displayed in the plan. diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index c60a8ff2fe1a..54a9854728c0 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -742,7 +742,7 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating {{ $.Name }} %q: %#v", d.Id(), obj) headers := make(http.Header) -{{ if $.UpdateMask -}} +{{- if $.UpdateMask -}} {{template "UpdateMask" $ -}} {{end}} {{- if $.CustomCode.PreUpdate -}} diff --git a/mmv1/templates/terraform/resource.html.markdown.tmpl b/mmv1/templates/terraform/resource.html.markdown.tmpl index 06d22f525203..a9bb6ee8ab17 100644 --- a/mmv1/templates/terraform/resource.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource.html.markdown.tmpl @@ -27,7 +27,7 @@ # ---------------------------------------------------------------------------- subcategory: "{{$.ProductMetadata.DisplayName}}" description: |- - {{$.Description -}} + {{ formatDocDescription $.Description }} --- # {{$.TerraformName}} @@ -54,16 +54,15 @@ To get more information about {{$.Name}}, see: * [{{$title}}]({{$link}}) {{- end }} {{- end }} -{{- end }} +{{ end }} {{- if $.Docs.Warning}} ~> **Warning:** {{$.Docs.Warning}} {{- end }} {{- if $.Docs.Note}} - -~> **Note:** {{$.Docs.Note}} +~> **Note:** {{$.Docs.Note }} {{- end }} -{{ if $.SensitiveProps }} +{{- if $.SensitiveProps }} ~> **Warning:** All arguments including the following potentially sensitive values will be stored in the raw state as plain text: {{ $.SensitivePropsToString }}. [Read more about sensitive data in state](https://www.terraform.io/language/state/sensitive-data). @@ -188,4 +187,4 @@ $ terraform import {{$.TerraformName}}.default {{$idFormat}} ## User Project Overrides This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). -{{- end }} \ No newline at end of file +{{ end }} \ No newline at end of file From f630747146b8a3ad8adc6b775abfb311de9401d8 Mon Sep 17 00:00:00 2001 From: patrickmoy <53500820+patrickmoy@users.noreply.github.com> Date: Fri, 31 May 2024 07:05:07 -0700 Subject: [PATCH 029/356] Removed required tag on maxFindingsPerInfoType.infoType (#10818) --- mmv1/products/dlp/InspectTemplate.yaml | 6 ++++- ...te_max_infotype_per_finding_default.tf.erb | 22 +++++++++++++++++++ 2 files changed, 27 insertions(+), 1 deletion(-) create mode 100644 mmv1/templates/terraform/examples/dlp_inspect_template_max_infotype_per_finding_default.tf.erb diff --git a/mmv1/products/dlp/InspectTemplate.yaml b/mmv1/products/dlp/InspectTemplate.yaml index 221868367ef2..d20a7a0468bd 100644 --- a/mmv1/products/dlp/InspectTemplate.yaml +++ b/mmv1/products/dlp/InspectTemplate.yaml @@ -55,6 +55,11 @@ examples: test_env_vars: project: :PROJECT_NAME skip_docs: true + - !ruby/object:Provider::Terraform::Examples + name: 'dlp_inspect_template_max_infotype_per_finding_default' + primary_resource_id: 'max_infotype_per_finding_default' + test_env_vars: + project: :PROJECT_NAME custom_code: !ruby/object:Provider::Terraform::CustomCode decoder: templates/terraform/decoders/dlp_template_id.go.erb encoder: templates/terraform/encoders/wrap_object_with_template_id.go.erb @@ -144,7 +149,6 @@ properties: properties: - !ruby/object:Api::Type::NestedObject name: 'infoType' - required: true description: | Type of information the findings limit applies to. Only one limit per infoType should be provided. If InfoTypeLimit does not have an infoType, the DLP API applies the limit against all infoTypes that are found but not diff --git a/mmv1/templates/terraform/examples/dlp_inspect_template_max_infotype_per_finding_default.tf.erb b/mmv1/templates/terraform/examples/dlp_inspect_template_max_infotype_per_finding_default.tf.erb new file mode 100644 index 000000000000..d0b2925ae465 --- /dev/null +++ b/mmv1/templates/terraform/examples/dlp_inspect_template_max_infotype_per_finding_default.tf.erb @@ -0,0 +1,22 @@ +resource "google_data_loss_prevention_inspect_template" "<%= ctx[:primary_resource_id] %>" { + parent = "projects/<%= ctx[:test_env_vars]['project'] %>" + + inspect_config { + info_types { + name = "EMAIL_ADDRESS" + } + info_types { + name = "PERSON_NAME" + } + + min_likelihood = "UNLIKELY" + limits { + max_findings_per_request = 333 + max_findings_per_item = 222 + max_findings_per_info_type { + # Entry with no info_type specifies the default value used by all info_types that don't specify their own limit + max_findings = 111 + } + } + } +} From ccfaa124026ca9c21b6e2d4dbcdf556dda00e5d3 Mon Sep 17 00:00:00 2001 From: roop2 <161707562+roop2@users.noreply.github.com> Date: Fri, 31 May 2024 19:45:59 +0530 Subject: [PATCH 030/356] Adding support for backup_config in netapp volumes volume resource (#10822) --- mmv1/products/netapp/volume.yaml | 23 ++ .../netapp/resource_netapp_volume_test.go | 268 +++++++++++++++++- 2 files changed, 289 insertions(+), 2 deletions(-) diff --git a/mmv1/products/netapp/volume.yaml b/mmv1/products/netapp/volume.yaml index a856038bcc72..1073e7525c17 100644 --- a/mmv1/products/netapp/volume.yaml +++ b/mmv1/products/netapp/volume.yaml @@ -452,6 +452,29 @@ properties: description: |- Set the day or days of the month to make a snapshot (1-31). Accepts a comma separated number of days. Defaults to '1'. default_value: '1' + - !ruby/object:Api::Type::NestedObject + name: 'backupConfig' + description: |- + Backup configuration for the volume. + update_mask_fields: + - 'backup_config.backup_policies' + - 'backup_config.backup_vault' + - 'backup_config.scheduled_backup_enabled' + properties: + - !ruby/object:Api::Type::Array + name: 'backupPolicies' + description: |- + Specify a single backup policy ID for scheduled backups. Format: `projects/{{projectId}}/locations/{{location}}/backupPolicies/{{backupPolicyName}}` + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'backupVault' + description: |- + ID of the backup vault to use. A backup vault is reqired to create manual or scheduled backups. + Format: `projects/{{projectId}}/locations/{{location}}/backupVaults/{{backupVaultName}}` + - !ruby/object:Api::Type::Boolean + name: 'scheduledBackupEnabled' + description: |- + When set to true, scheduled backup is enabled on the volume. Omit if no backup_policy is specified. virtual_fields: - !ruby/object:Api::Type::Enum name: 'deletion_policy' diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go index 81574ec62d00..8b4d0b06ec07 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_volume_test.go @@ -4,16 +4,21 @@ package netapp_test import ( + "fmt" + "sort" "testing" + "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/services/netapp" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func TestAccNetappVolume_netappVolumeBasicExample_update(t *testing.T) { - t.Parallel() - context := map[string]interface{}{ "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), "random_suffix": acctest.RandString(t, 10), @@ -23,6 +28,9 @@ func TestAccNetappVolume_netappVolumeBasicExample_update(t *testing.T) { PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckNetappVolumeDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, Steps: []resource.TestStep{ { Config: testAccNetappVolume_volumeBasicExample_basic(context), @@ -68,6 +76,34 @@ func TestAccNetappVolume_netappVolumeBasicExample_update(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, }, + { + Config: testAccNetappVolume_volumeBasicExample_createBackupConfig(context), + }, + { + ResourceName: "google_netapp_volume.test_volume", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappVolume_volumeBasicExample_updateBackupConfigRemoveBackupPolicy(context), + Check: testAccNetappVolume_volumeBasicExample_cleanupScheduledBackup(t, "google_netapp_backup_vault.backup-vault"), + }, + { + ResourceName: "google_netapp_volume.test_volume", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappVolume_volumeBasicExample_updateBackupConfigRemoveBackupVault(context), + }, + { + ResourceName: "google_netapp_volume.test_volume", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"restore_parameters", "location", "name", "deletion_policy", "labels", "terraform_labels"}, + }, }, }) } @@ -395,3 +431,231 @@ data "google_compute_network" "default" { } `, context) } + +// Tests creating a volume with backup config +func testAccNetappVolume_volumeBasicExample_createBackupConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "default2" { + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "test_volume" { + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + security_style = "UNIX" + # Delete protection only gets active after an NFS client mounts. + # Setting it here is save, volume can still be deleted. + restricted_actions = ["DELETE"] + deletion_policy = "FORCE" + backup_config { + backup_policies = [ + google_netapp_backup_policy.backup-policy.id + ] + backup_vault = google_netapp_backup_vault.backup-vault.id + scheduled_backup_enabled = true + } +} + +resource "time_sleep" "wait_30_minutes" { + depends_on = [google_netapp_volume.test_volume] + create_duration = "30m" +} + +resource "google_netapp_backup_vault" "backup-vault" { + location = "us-west2" + name = "tf-test-vault%{random_suffix}" +} + +resource "google_netapp_backup_policy" "backup-policy" { + name = "tf-test-backup-policy%{random_suffix}" + location = "us-west2" + daily_backup_limit = 2 + weekly_backup_limit = 0 + monthly_backup_limit = 0 + enabled = true +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + `, context) +} + +// Tests updating the volume backup config +func testAccNetappVolume_volumeBasicExample_updateBackupConfigRemoveBackupPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "default2" { + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "test_volume" { + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + security_style = "UNIX" + # Delete protection only gets active after an NFS client mounts. + # Setting it here is save, volume can still be deleted. + restricted_actions = ["DELETE"] + deletion_policy = "FORCE" + backup_config { + backup_vault = google_netapp_backup_vault.backup-vault.id + } +} + +resource "time_sleep" "wait_30_minutes" { + depends_on = [google_netapp_volume.test_volume] + create_duration = "30m" +} + +resource "google_netapp_backup_vault" "backup-vault" { + location = "us-west2" + name = "tf-test-vault%{random_suffix}" +} + +resource "google_netapp_backup_policy" "backup-policy" { + name = "tf-test-backup-policy%{random_suffix}" + location = "us-west2" + daily_backup_limit = 2 + weekly_backup_limit = 0 + monthly_backup_limit = 0 + enabled = true +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + `, context) +} + +// Tests updating the volume to no backup config +func testAccNetappVolume_volumeBasicExample_updateBackupConfigRemoveBackupVault(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "default2" { + name = "tf-test-pool%{random_suffix}" + location = "us-west2" + service_level = "EXTREME" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "test_volume" { + location = "us-west2" + name = "tf-test-test-volume%{random_suffix}" + capacity_gib = "200" + share_name = "tf-test-test-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default2.name + protocols = ["NFSV3"] + security_style = "UNIX" + # Delete protection only gets active after an NFS client mounts. + # Setting it here is save, volume can still be deleted. + restricted_actions = ["DELETE"] + deletion_policy = "FORCE" +} + +resource "time_sleep" "wait_30_minutes" { + depends_on = [google_netapp_volume.test_volume] + create_duration = "30m" +} + +resource "google_netapp_backup_vault" "backup-vault" { + location = "us-west2" + name = "tf-test-vault%{random_suffix}" +} + +resource "google_netapp_backup_policy" "backup-policy" { + name = "tf-test-backup-policy%{random_suffix}" + location = "us-west2" + daily_backup_limit = 2 + weekly_backup_limit = 0 + monthly_backup_limit = 0 + enabled = true +} + +data "google_compute_network" "default" { + name = "%{network_name}" +} + `, context) +} + +// Cleanup the created backup of the test +func testAccNetappVolume_volumeBasicExample_cleanupScheduledBackup(t *testing.T, vault string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + rs, ok := s.RootModule().Resources[vault] + if !ok { + return fmt.Errorf("Not found: %v", vault) + } + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/backupVaults/{{name}}/backups") + if err != nil { + return fmt.Errorf("Error : %v", err) + } + response, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: config.UserAgent, + }) + backups := response["backups"].([]interface{}) + if len(backups) == 0 { + return nil + } + type BackupData struct { + name string + createTime time.Time + } + var backupDataList []BackupData + for i, _ := range backups { + backup := backups[i].(map[string]interface{}) + backupName := backup["name"].(string) + backupCreateTimeStr := backup["createTime"].(string) + backupCreateTime, err := time.Parse(time.RFC3339, backupCreateTimeStr) + if err != nil { + fmt.Errorf("Failed to parse backup create time : %v", err) + } + backupData := BackupData{ + name: backupName, + createTime: backupCreateTime, + } + backupDataList = append(backupDataList, backupData) + } + sort.Slice(backupDataList, func(i, j int) bool { + return backupDataList[i].createTime.After(backupDataList[j].createTime) + }) + for i, _ := range backupDataList { + baseUrl, err := tpgresource.ReplaceVarsForTest(config, rs, "{{NetappBasePath}}") + if err != nil { + return fmt.Errorf("Error : %v", err) + } + backupUrl := baseUrl + backupDataList[i].name + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + RawURL: backupUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + return fmt.Errorf("Delete Request Error : %v", err) + } + err = netapp.NetappOperationWaitTime(config, res, config.Project, "Deleting Backup", config.UserAgent, 10*time.Minute) + if err != nil { + return fmt.Errorf("Delete LRO Error : %v", err) + } + } + return nil + } +} From 3ef465fb745effaa0212ca6ad3f9ea4dae8632bd Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 31 May 2024 08:29:57 -0700 Subject: [PATCH 031/356] Force MM Upstream nightly sweepers to use hashicorp vcs root (#10836) --- .../.teamcity/components/constants.kt | 2 + .../projects/google_beta_subproject.kt | 2 +- .../projects/google_ga_subproject.kt | 2 +- .../components/projects/reused/mm_upstream.kt | 14 ++- .../tests/build_configuration_features.kt | 3 +- .../terraform/.teamcity/tests/sweepers.kt | 101 ++++++++++++------ 6 files changed, 81 insertions(+), 43 deletions(-) diff --git a/mmv1/third_party/terraform/.teamcity/components/constants.kt b/mmv1/third_party/terraform/.teamcity/components/constants.kt index 8383ea4ae762..0f9b6ef92c54 100644 --- a/mmv1/third_party/terraform/.teamcity/components/constants.kt +++ b/mmv1/third_party/terraform/.teamcity/components/constants.kt @@ -37,6 +37,8 @@ const val SharedResourceNameVcr = "ci-test-project-188019 Service Lock" // Build configuration names referenced in multiple places const val ServiceSweeperName = "Service Sweeper" +const val ServiceSweeperCronName = "$ServiceSweeperName - Cron" +const val ServiceSweeperManualName = "$ServiceSweeperName - Manual" const val ProjectSweeperName = "Project Sweeper" const val NightlyTestsProjectId = "NightlyTests" const val MMUpstreamProjectId = "MMUpstreamTests" diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt b/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt index 72233a701534..6b6e694f1866 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt @@ -38,7 +38,7 @@ fun googleSubProjectBeta(allConfig: AllContextParameters): Project { subProject(nightlyTests(betaId, ProviderNameBeta, HashiCorpVCSRootBeta, betaConfig)) // MM Upstream project that uses modular-magician/terraform-provider-google-beta - subProject(mmUpstream(betaId, ProviderNameBeta, ModularMagicianVCSRootBeta, vcrConfig)) + subProject(mmUpstream(betaId, ProviderNameBeta, ModularMagicianVCSRootBeta, HashiCorpVCSRootBeta, vcrConfig)) // VCR recording project that allows VCR recordings to be made using hashicorp/terraform-provider-google-beta OR modular-magician/terraform-provider-google-beta // This is only present for the Beta provider, as only TPGB VCR recordings are used. diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt b/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt index 19b4395d1c29..0f0605766ea6 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt @@ -37,7 +37,7 @@ fun googleSubProjectGa(allConfig: AllContextParameters): Project { subProject(nightlyTests(gaId, ProviderNameGa, HashiCorpVCSRootGa, gaConfig)) // MM Upstream project that uses modular-magician/terraform-provider-google - subProject(mmUpstream(gaId, ProviderNameGa, ModularMagicianVCSRootGa, vcrConfig)) + subProject(mmUpstream(gaId, ProviderNameGa, ModularMagicianVCSRootGa, HashiCorpVCSRootGa, vcrConfig)) params { readOnlySettings() diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt b/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt index aa04cab704ed..83efcf91f109 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt @@ -10,7 +10,8 @@ package projects.reused import MMUpstreamProjectId import ProviderNameBeta import ProviderNameGa -import ServiceSweeperName +import ServiceSweeperCronName +import ServiceSweeperManualName import SharedResourceNameVcr import builds.* import generated.PackagesListBeta @@ -24,7 +25,7 @@ import jetbrains.buildServer.configs.kotlin.Project import jetbrains.buildServer.configs.kotlin.vcs.GitVcsRoot import replaceCharsId -fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, config: AccTestConfiguration): Project { +fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, cronSweeperVcsRoot: GitVcsRoot, config: AccTestConfiguration): Project { // Create unique ID for the dynamically-created project var projectId = "${parentProject}_${MMUpstreamProjectId}" @@ -44,9 +45,11 @@ fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, ProviderNameBeta -> sweepersList = SweepersListBeta else -> throw Exception("Provider name not supplied when generating a nightly test subproject") } - val serviceSweeperConfig = BuildConfigurationForServiceSweeper(providerName, ServiceSweeperName, sweepersList, projectId, vcsRoot, sharedResources, config) + val serviceSweeperManualConfig = BuildConfigurationForServiceSweeper(providerName, ServiceSweeperManualName, sweepersList, projectId, vcsRoot, sharedResources, config) + + val serviceSweeperCronConfig = BuildConfigurationForServiceSweeper(providerName, ServiceSweeperCronName, sweepersList, projectId, cronSweeperVcsRoot, sharedResources, config) val trigger = NightlyTriggerConfiguration(startHour=12) - serviceSweeperConfig.addTrigger(trigger) // Only the sweeper is on a schedule in this project + serviceSweeperCronConfig.addTrigger(trigger) // Only the sweeper is on a schedule in this project return Project { id(projectId) @@ -57,7 +60,8 @@ fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, packageBuildConfigs.forEach { buildConfiguration: BuildType -> buildType(buildConfiguration) } - buildType(serviceSweeperConfig) + buildType(serviceSweeperManualConfig) + buildType(serviceSweeperCronConfig) params{ configureGoogleSpecificTestParameters(config) diff --git a/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt b/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt index 5624a5651479..ac63532dfac1 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/build_configuration_features.kt @@ -7,6 +7,7 @@ package tests +import ServiceSweeperName import builds.UseTeamCityGoTest import org.junit.Assert.assertTrue import org.junit.Assert.fail @@ -75,7 +76,7 @@ class BuildConfigurationFeatureTests { } } // service sweeper does not contain push artifacts to GCS step - if (bt.name != "Service Sweeper") { + if (!bt.name.startsWith(ServiceSweeperName)) { assertTrue("Build configuration `${bt.name}` contains a build step that pushes artifacts to GCS", found) } } diff --git a/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt b/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt index e4f55a2a7a82..727675e09731 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt @@ -9,6 +9,8 @@ package tests import ProjectSweeperName import ServiceSweeperName +import ServiceSweeperCronName +import ServiceSweeperManualName import jetbrains.buildServer.configs.kotlin.BuildType import jetbrains.buildServer.configs.kotlin.Project import jetbrains.buildServer.configs.kotlin.triggers.ScheduleTrigger @@ -19,76 +21,105 @@ import projects.googleCloudRootProject class SweeperTests { @Test - fun projectSweeperDoesNotSkipProjectSweep() { + fun projectSweeperConfig() { val root = googleCloudRootProject(testContextParameters()) // Find Project sweeper project val projectSweeperProject = getSubProject(root, projectSweeperProjectName) - // For the project sweeper to be skipped, SKIP_PROJECT_SWEEPER needs a value + // SKIP_PROJECT_SWEEPER should be empty so project sweepers will be run // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 projectSweeperProject.buildTypes.forEach{bt -> - val value = bt.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value - assertTrue("env.SKIP_PROJECT_SWEEPER should be set to an empty value, so project sweepers are NOT skipped in the ${projectSweeperProject.name} project. Value = `${value}` ", value == "") + val skipProjectSweeper = bt.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value + assertTrue("env.SKIP_PROJECT_SWEEPER should be set to an empty value, so project sweepers are NOT skipped in the ${projectSweeperProject.name} project. Value = `${skipProjectSweeper}` ", skipProjectSweeper == "") } } @Test - fun serviceSweepersSkipProjectSweeper() { + fun gaNightlyTestsServiceSweeperConfig() { val root = googleCloudRootProject(testContextParameters()) // Find GA nightly test project - val gaNightlyTestProject = getNestedProjectFromRoot(root, gaProjectName, nightlyTestsProjectName) - // Find GA MM Upstream project - val gaMmUpstreamProject = getNestedProjectFromRoot(root, gaProjectName, mmUpstreamProjectName) + val project = getNestedProjectFromRoot(root, gaProjectName, nightlyTestsProjectName) - // Find Beta nightly test project - val betaNightlyTestProject = getNestedProjectFromRoot(root, betaProjectName, nightlyTestsProjectName) - // Find Beta MM Upstream project - val betaMmUpstreamProject = getNestedProjectFromRoot(root, betaProjectName, mmUpstreamProjectName) - - val allProjects: ArrayList = arrayListOf(gaNightlyTestProject, gaMmUpstreamProject, betaNightlyTestProject, betaMmUpstreamProject) - allProjects.forEach{ project -> - // Find sweeper inside - val sweeper = getBuildFromProject(project, ServiceSweeperName) + // Find sweeper inside + val sweeper = getBuildFromProject(project, ServiceSweeperName) - // For the project sweeper to be skipped, SKIP_PROJECT_SWEEPER needs a value - // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 + // Check PACKAGE_PATH is in google (not google-beta) + val value = sweeper.params.findRawParam("PACKAGE_PATH")!!.value + assertEquals("./google/sweeper", value) - val value = sweeper.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value - assertTrue("env.SKIP_PROJECT_SWEEPER should be set to a non-empty string so project sweepers are skipped in the ${project.name} project. Value = `${value}` ", value != "") - } + // SKIP_PROJECT_SWEEPER should have a value so project sweepers will be skipped + // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 + val skipProjectSweeper = sweeper.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value + assertTrue("env.SKIP_PROJECT_SWEEPER should be set to a non-empty string so project sweepers are skipped in the ${project.name} project (${sweeper.name}). Value = `${skipProjectSweeper}` ", skipProjectSweeper != "") } @Test - fun gaNightlyProjectServiceSweeperRunsInGoogle() { + fun betaNightlyTestsServiceSweeperConfig() { val root = googleCloudRootProject(testContextParameters()) - // Find GA nightly test project - val gaNightlyTestProject = getNestedProjectFromRoot(root, gaProjectName, nightlyTestsProjectName) + // Find Beta nightly test project + val project = getNestedProjectFromRoot(root, betaProjectName, nightlyTestsProjectName) // Find sweeper inside - val sweeper = getBuildFromProject(gaNightlyTestProject, ServiceSweeperName) + val sweeper: BuildType = getBuildFromProject(project, ServiceSweeperName) - // Check PACKAGE_PATH is in google (not google-beta) + // Check PACKAGE_PATH is in google-beta val value = sweeper.params.findRawParam("PACKAGE_PATH")!!.value - assertEquals("./google/sweeper", value) + assertEquals("./google-beta/sweeper", value) + + // SKIP_PROJECT_SWEEPER should have a value so project sweepers will be skipped + // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 + val skipProjectSweeper = sweeper.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value + assertTrue("env.SKIP_PROJECT_SWEEPER should be set to a non-empty string so project sweepers are skipped in the ${project.name} project (${sweeper.name}). Value = `${skipProjectSweeper}` ", skipProjectSweeper != "") } @Test - fun betaNightlyProjectServiceSweeperRunsInGoogleBeta() { + fun gaMmUpstreamServiceSweeperConfig() { val root = googleCloudRootProject(testContextParameters()) // Find Beta nightly test project - val betaNightlyTestProject = getNestedProjectFromRoot(root, betaProjectName, nightlyTestsProjectName) + val project = getNestedProjectFromRoot(root, gaProjectName, mmUpstreamProjectName) + + // Find sweepers inside + val cronSweeper = getBuildFromProject(project, ServiceSweeperCronName) + val manualSweeper = getBuildFromProject(project, ServiceSweeperManualName) + val allSweepers: ArrayList = arrayListOf(cronSweeper, manualSweeper) + allSweepers.forEach{ sweeper -> + // Check PACKAGE_PATH is in google-beta + val value = sweeper.params.findRawParam("PACKAGE_PATH")!!.value + assertEquals("./google/sweeper", value) + + // SKIP_PROJECT_SWEEPER should have a value so project sweepers will be skipped + // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 + val skipProjectSweeper = sweeper.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value + assertTrue("env.SKIP_PROJECT_SWEEPER should be set to a non-empty string so project sweepers are skipped in the ${project.name} project (${sweeper.name}). Value = `${skipProjectSweeper}` ", skipProjectSweeper != "") + } + } - // Find sweeper inside - val sweeper: BuildType = getBuildFromProject(betaNightlyTestProject, ServiceSweeperName) + @Test + fun betaMmUpstreamServiceSweeperConfig() { + val root = googleCloudRootProject(testContextParameters()) - // Check PACKAGE_PATH is in google-beta - val value = sweeper.params.findRawParam("PACKAGE_PATH")!!.value - assertEquals("./google-beta/sweeper", value) + // Find Beta nightly test project + val project = getNestedProjectFromRoot(root, betaProjectName, mmUpstreamProjectName) + + // Find sweepers inside + val cronSweeper = getBuildFromProject(project, ServiceSweeperCronName) + val manualSweeper = getBuildFromProject(project, ServiceSweeperManualName) + val allSweepers: ArrayList = arrayListOf(cronSweeper, manualSweeper) + allSweepers.forEach{ sweeper -> + // Check PACKAGE_PATH is in google-beta + val value = sweeper.params.findRawParam("PACKAGE_PATH")!!.value + assertEquals("./google-beta/sweeper", value) + + // SKIP_PROJECT_SWEEPER should have a value so project sweepers will be skipped + // See https://github.com/GoogleCloudPlatform/magic-modules/blob/501429790939717ca6dce76dbf4b1b82aef4e9d9/mmv1/third_party/terraform/services/resourcemanager/resource_google_project_sweeper.go#L18-L26 + val skipProjectSweeper = sweeper.params.findRawParam("env.SKIP_PROJECT_SWEEPER")!!.value + assertTrue("env.SKIP_PROJECT_SWEEPER should be set to a non-empty string so project sweepers are skipped in the ${project.name} project (${sweeper.name}). Value = `${skipProjectSweeper}` ", skipProjectSweeper != "") + } } @Test From cb2aeabefe84e25a306ebf397237dbc118333f19 Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Fri, 31 May 2024 10:28:59 -0700 Subject: [PATCH 032/356] chore(ci): exit 1 on generate commit if run into other errors (#10796) --- .ci/magician/cmd/generate_downstream.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.ci/magician/cmd/generate_downstream.go b/.ci/magician/cmd/generate_downstream.go index 8cdeeccd99e4..79201986aba0 100644 --- a/.ci/magician/cmd/generate_downstream.go +++ b/.ci/magician/cmd/generate_downstream.go @@ -153,6 +153,9 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G scratchCommitSha, commitErr := createCommit(scratchRepo, commitMessage, rnr) if commitErr != nil { fmt.Println("Error creating commit: ", commitErr) + if !strings.Contains(commitErr.Error(), "nothing to commit") { + os.Exit(1) + } } if _, err := rnr.Run("git", []string{"push", ctlr.URL(scratchRepo), scratchRepo.Branch, "-f"}, nil); err != nil { From cc33342daaf84c8289461150c4de413e12e06915 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 31 May 2024 18:49:21 +0100 Subject: [PATCH 033/356] Update TeamCity configs version to "2024.03" (#10846) --- mmv1/third_party/terraform/.teamcity/settings.kts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/.teamcity/settings.kts b/mmv1/third_party/terraform/.teamcity/settings.kts index a87b2b8c5d19..518323d7bdef 100644 --- a/mmv1/third_party/terraform/.teamcity/settings.kts +++ b/mmv1/third_party/terraform/.teamcity/settings.kts @@ -9,7 +9,7 @@ import projects.googleCloudRootProject import builds.AllContextParameters import jetbrains.buildServer.configs.kotlin.* -version = "2023.11" +version = "2024.03" // The code below pulls context parameters from the TeamCity project. // Context parameters aren't stored in VCS, and are managed manually. From df1a3052e64a64a44c8705b75ee45626f651c33d Mon Sep 17 00:00:00 2001 From: Joshua Gibeon Date: Sat, 1 Jun 2024 01:36:29 +0700 Subject: [PATCH 034/356] add default-domains-netblock for google_netblock_ip_ranges data source (#10821) --- mmv1/third_party/terraform/go.mod | 1 + mmv1/third_party/terraform/go.mod.erb | 1 + mmv1/third_party/terraform/go.sum | 2 + .../data_source_google_netblock_ip_ranges.go | 73 ++++++++++++++++++- ...a_source_google_netblock_ip_ranges_test.go | 24 ++++++ 5 files changed, 99 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index bb0b2e89a736..e4e0f48fe054 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -97,6 +97,7 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index a219a8b2da06..253dea2bebb9 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -98,6 +98,7 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 0c641ff41dd8..9a5f52c621b1 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -287,6 +287,8 @@ go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= diff --git a/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges.go b/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges.go index cec7d95cdc86..4a4f9ceebd3d 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges.go +++ b/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges.go @@ -5,8 +5,11 @@ import ( "fmt" "io/ioutil" "net/http" + "net/netip" + "sort" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "go4.org/netipx" ) type googRanges struct { @@ -20,6 +23,11 @@ type prefixes struct { Ipv6Prefix string `json:"ipv6Prefix"` } +const ( + CLOUD_NETBLOCK_URL = "https://www.gstatic.com/ipranges/cloud.json" + GOOGLE_NETBLOCK_URL = "https://www.gstatic.com/ipranges/goog.json" +) + func DataSourceGoogleNetblockIpRanges() *schema.Resource { return &schema.Resource{ Read: dataSourceGoogleNetblockIpRangesRead, @@ -58,7 +66,6 @@ func dataSourceGoogleNetblockIpRangesRead(d *schema.ResourceData, meta interface // Dynamic ranges case "cloud-netblocks": // https://cloud.google.com/compute/docs/faq#find_ip_range - const CLOUD_NETBLOCK_URL = "https://www.gstatic.com/ipranges/cloud.json" CidrBlocks, err := getCidrBlocksFromUrl(CLOUD_NETBLOCK_URL) if err != nil { @@ -75,12 +82,35 @@ func dataSourceGoogleNetblockIpRangesRead(d *schema.ResourceData, meta interface } case "google-netblocks": // https://cloud.google.com/vpc/docs/configure-private-google-access?hl=en#ip-addr-defaults - const GOOGLE_NETBLOCK_URL = "https://www.gstatic.com/ipranges/goog.json" CidrBlocks, err := getCidrBlocksFromUrl(GOOGLE_NETBLOCK_URL) if err != nil { return err } + if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { + return fmt.Errorf("Error setting cidr_blocks: %s", err) + } + if err := d.Set("cidr_blocks_ipv4", CidrBlocks["cidr_blocks_ipv4"]); err != nil { + return fmt.Errorf("Error setting cidr_blocks_ipv4: %s", err) + } + if err := d.Set("cidr_blocks_ipv6", CidrBlocks["cidr_blocks_ipv6"]); err != nil { + return fmt.Errorf("Error setting cidr_blocks_ipv6: %s", err) + } + case "default-domains-netblocks": + // https: //cloud.google.com/vpc/docs/configure-private-google-access#ip-addr-defaults + googleBlocks, err := getCidrBlocksFromUrl(GOOGLE_NETBLOCK_URL) + if err != nil { + return err + } + cloudBlocks, err := getCidrBlocksFromUrl(CLOUD_NETBLOCK_URL) + if err != nil { + return err + } + CidrBlocks, err := getCidrsDifference(googleBlocks, cloudBlocks) + if err != nil { + return err + } + if err := d.Set("cidr_blocks", CidrBlocks["cidr_blocks"]); err != nil { return fmt.Errorf("Error setting cidr_blocks: %s", err) } @@ -199,3 +229,42 @@ func getCidrBlocksFromUrl(url string) (map[string][]string, error) { return cidrBlocks, nil } + +func getCidrsDifference(reference, excluded map[string][]string) (map[string][]string, error) { + result := make(map[string][]string) + + for blockName := range reference { + var ipSetBuilder netipx.IPSetBuilder + for _, cidr := range reference[blockName] { + net, err := netip.ParsePrefix(cidr) + if err != nil { + return result, err + } + ipSetBuilder.AddPrefix(net) + } + + for _, cidr := range excluded[blockName] { + net, err := netip.ParsePrefix(cidr) + if err != nil { + return result, err + } + ipSetBuilder.RemovePrefix(net) + } + + ipSet, err := ipSetBuilder.IPSet() + if err != nil { + return result, err + } + + var ipRangeStrings []string + for _, ipRange := range ipSet.Prefixes() { + ipRangeStrings = append(ipRangeStrings, ipRange.String()) + } + + sort.Strings(ipRangeStrings) + + result[blockName] = ipRangeStrings + } + + return result, nil +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges_test.go b/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges_test.go index 035f1b071087..8c81f5cedc59 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges_test.go +++ b/mmv1/third_party/terraform/services/resourcemanager/data_source_google_netblock_ip_ranges_test.go @@ -49,6 +49,24 @@ func TestAccDataSourceGoogleNetblockIpRanges_basic(t *testing.T) { "cidr_blocks_ipv6.0", regexp.MustCompile("^(?:[0-9a-fA-F]{1,4}:){1,2}.*/[0-9]{1,3}$")), ), }, + { + Config: testAccNetblockIpRangesConfig_defaultdomains, + Check: resource.ComposeTestCheckFunc( + // Default domains netblocks + resource.TestMatchResourceAttr("data.google_netblock_ip_ranges.defaultdomains", + "cidr_blocks.#", regexp.MustCompile(("^[1-9]+[0-9]*$"))), + resource.TestMatchResourceAttr("data.google_netblock_ip_ranges.defaultdomains", + "cidr_blocks.0", regexp.MustCompile("^(?:[0-9a-fA-F./:]{1,4}){1,2}.*/[0-9]{1,3}$")), + resource.TestMatchResourceAttr("data.google_netblock_ip_ranges.defaultdomains", + "cidr_blocks_ipv4.#", regexp.MustCompile(("^[1-9]+[0-9]*$"))), + resource.TestMatchResourceAttr("data.google_netblock_ip_ranges.defaultdomains", + "cidr_blocks_ipv4.0", regexp.MustCompile("^(?:[0-9]{1,3}.){3}[0-9]{1,3}/[0-9]{1,2}$")), + resource.TestMatchResourceAttr("data.google_netblock_ip_ranges.defaultdomains", + "cidr_blocks_ipv6.#", regexp.MustCompile(("^[1-9]+[0-9]*$"))), + resource.TestMatchResourceAttr("data.google_netblock_ip_ranges.defaultdomains", + "cidr_blocks_ipv6.0", regexp.MustCompile("^(?:[0-9a-fA-F]{1,4}:){1,2}.*/[0-9]{1,3}$")), + ), + }, { Config: testAccNetblockIpRangesConfig_restricted, Check: resource.ComposeTestCheckFunc( @@ -141,6 +159,12 @@ data "google_netblock_ip_ranges" "google" { } ` +const testAccNetblockIpRangesConfig_defaultdomains = ` +data "google_netblock_ip_ranges" "defaultdomains" { + range_type = "default-domains-netblocks" +} +` + const testAccNetblockIpRangesConfig_restricted = ` data "google_netblock_ip_ranges" "restricted" { range_type = "restricted-googleapis" From bf05ed0325f5611d5421163b032cb34fd537d387 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 31 May 2024 19:38:09 +0100 Subject: [PATCH 035/356] Add force_destroy field to google_bigtable_instance resource (#10799) --- .../bigtable/resource_bigtable_instance.go | 84 ++++++++ .../resource_bigtable_instance_test.go | 188 ++++++++++++++++++ .../docs/r/bigtable_instance.html.markdown | 4 +- 3 files changed, 275 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go index e5a7cef9b16b..3940082bb731 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "google.golang.org/api/iterator" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" @@ -17,6 +18,23 @@ import ( "cloud.google.com/go/bigtable" ) +// resourceBigtableInstanceVirtualUpdate identifies if an update to the resource includes only virtual field updates +func resourceBigtableInstanceVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { + // force_destroy is the only virtual field + if d.HasChange("force_destroy") { + for field := range resourceSchema { + if field == "force_destroy" { + continue + } + if d.HasChange(field) { + return false + } + } + return true + } + return false +} + func ResourceBigtableInstance() *schema.Resource { return &schema.Resource{ Create: resourceBigtableInstanceCreate, @@ -153,6 +171,13 @@ func ResourceBigtableInstance() *schema.Resource { Deprecated: `It is recommended to leave this field unspecified since the distinction between "DEVELOPMENT" and "PRODUCTION" instances is going away, and all instances will become "PRODUCTION" instances. This means that new and existing "DEVELOPMENT" instances will be converted to "PRODUCTION" instances. It is recommended for users to use "PRODUCTION" instances in any case, since a 1-node "PRODUCTION" instance is functionally identical to a "DEVELOPMENT" instance, but without the accompanying restrictions.`, }, + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When deleting a BigTable instance, this boolean option will delete all backups within the instance.`, + }, + "deletion_protection": { Type: schema.TypeBool, Optional: true, @@ -341,6 +366,13 @@ func resourceBigtableInstanceRead(d *schema.ResourceData, meta interface{}) erro // Don't set instance_type: we don't want to detect drift on it because it can // change under-the-hood. + // Explicitly set virtual fields to default values if unset + if _, ok := d.GetOkExists("force_destroy"); !ok { + if err := d.Set("force_destroy", false); err != nil { + return fmt.Errorf("Error setting force_destroy: %s", err) + } + } + return nil } @@ -389,6 +421,18 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er return err } + log.Printf("[DEBUG] Updating BigTable instance %q: %#v", d.Id(), conf) + + // Handle scenario where the update includes only updating force_destroy + if resourceBigtableInstanceVirtualUpdate(d, ResourceBigtableInstance().Schema) { + if d.Get("force_destroy") != nil { + if err := d.Set("force_destroy", d.Get("force_destroy")); err != nil { + return fmt.Errorf("error reading Instance: %s", err) + } + } + return nil + } + ctxWithTimeout, cancel := context.WithTimeout(ctx, d.Timeout(schema.TimeoutUpdate)) defer cancel() if _, err := bigtable.UpdateInstanceAndSyncClusters(ctxWithTimeout, c, conf); err != nil { @@ -399,6 +443,7 @@ func resourceBigtableInstanceUpdate(d *schema.ResourceData, meta interface{}) er } func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Deleting BigTable instance %q", d.Id()) if d.Get("deletion_protection").(bool) { return fmt.Errorf("cannot destroy instance without setting deletion_protection=false and running `terraform apply`") } @@ -423,6 +468,40 @@ func resourceBigtableInstanceDestroy(d *schema.ResourceData, meta interface{}) e defer c.Close() name := d.Get("name").(string) + + // If force_destroy is set, delete all backups and unblock deletion of the instance + if d.Get("force_destroy").(bool) { + adminClient, err := config.BigTableClientFactory(userAgent).NewAdminClient(project, name) + if err != nil { + return fmt.Errorf("error starting admin client. %s", err) + } + + // Iterate over clusters to get all backups + // Need to get backup data per cluster because when you delete a backup the name must be provided. + // If we get all backups in an instance at once the information about the cluster a backup belongs to isn't present. + clusters, err := c.Clusters(ctx, name) + if err != nil { + return fmt.Errorf("error retrieving cluster data for instance %s: %s", name, err) + } + for _, cluster := range clusters { + it := adminClient.Backups(ctx, cluster.Name) + for { + backup, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return fmt.Errorf("error iterating over backups in cluster %s: %s", cluster.Name, err) + } + log.Printf("[DEBUG] Deleting backup %s from cluster %s", backup.Name, cluster.Name) + err = adminClient.DeleteBackup(ctx, cluster.Name, backup.Name) + if err != nil { + return fmt.Errorf("error backup %s from cluster %s: %s", backup.Name, cluster.Name, err) + } + } + } + } + err = c.DeleteInstance(ctx, name) if err != nil { return fmt.Errorf("Error deleting instance. %s", err) @@ -733,5 +812,10 @@ func resourceBigtableInstanceImport(d *schema.ResourceData, meta interface{}) ([ } d.SetId(id) + // Explicitly set virtual fields to default values on import + if err := d.Set("force_destroy", false); err != nil { + return nil, fmt.Errorf("error setting force_destroy: %s", err) + } + return []*schema.ResourceData{d}, nil } diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go index 3e67663e66a9..f1d0cc9323d0 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go @@ -438,6 +438,62 @@ func TestAccBigtableInstance_MultipleClustersSameID(t *testing.T) { }) } +func TestAccBigtableInstance_forceDestroyBackups(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + randomString := acctest.RandString(t, 10) + region := envvar.GetTestRegionFromEnv() + context := map[string]interface{}{ + "instance_name": fmt.Sprintf("tf-test-instance-%s", randomString), + "cluster_name_1": fmt.Sprintf("tf-test-cluster-%s-1", randomString), + "cluster_name_2": fmt.Sprintf("tf-test-cluster-%s-2", randomString), + "cluster_zone_1": fmt.Sprintf("%s-a", region), + "cluster_zone_2": fmt.Sprintf("%s-b", region), + "table_name": fmt.Sprintf("tf-test-table-%s", randomString), + "force_destroy": true, // Overridden in test steps + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "http": {}, + "time": {}, + }, + CheckDestroy: testAccCheckBigtableInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Create force_destroy = false + Config: testAccBigtableInstance_forceDestroy(context, false), + }, + { + ResourceName: "google_bigtable_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type", "labels", "terraform_labels"}, // we don't read instance type back + Check: resource.ComposeTestCheckFunc( + // Make sure field is set, and is set to false after import + resource.TestCheckResourceAttr("google_bigtable_instance.instance", "force_destroy", "false"), + ), + }, + { + // Try to delete the instance after force_destroy = false was set before + Config: testAccBigtableInstance_forceDestroy_deleteInstance(), + ExpectError: regexp.MustCompile("until all user backups have been deleted"), + }, + { + // Update force_destroy = true + Config: testAccBigtableInstance_forceDestroy(context, true), + }, + { + // Try to delete the instance after force_destroy = true was set before + Config: testAccBigtableInstance_forceDestroy_deleteInstance(), + }, + }, + }) +} + func testAccBigtableInstance_multipleClustersSameID(instanceName string) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { @@ -742,3 +798,135 @@ func autoscalingClusterConfigWithStorageTarget(instanceName string, min int, max }`, instanceName, instanceName, min, max, cpuTarget, storageTarget) } + +func testAccBigtableInstance_forceDestroy(context map[string]interface{}, forceDestroy bool) string { + context["force_destroy"] = forceDestroy + + return acctest.Nprintf(` +provider "google" { + alias = "http_auth" +} + +resource "google_bigtable_instance" "instance" { + name = "%{instance_name}" + cluster { + cluster_id = "%{cluster_name_1}" + num_nodes = 1 + storage_type = "HDD" + zone = "%{cluster_zone_1}" + } + cluster { + cluster_id = "%{cluster_name_2}" + num_nodes = 1 + storage_type = "HDD" + zone = "%{cluster_zone_2}" + } + force_destroy = %{force_destroy} + deletion_protection = false + labels = { + env = "default" + } +} + +resource "google_bigtable_table" "table" { + name = "%{table_name}" + instance_name = google_bigtable_instance.instance.id + split_keys = ["a", "b", "c"] +} + +data "google_client_config" "current" { + provider = google.http_auth +} + +locals { + project = google_bigtable_instance.instance.project + instance = google_bigtable_instance.instance.name + cluster_1 = google_bigtable_instance.instance.cluster[0].cluster_id + cluster_2 = google_bigtable_instance.instance.cluster[1].cluster_id + backup = "backup-1" +} + +data "http" "make_backup_1" { + url = "https://bigtableadmin.googleapis.com/v2/projects/${local.project}/instances/${local.instance}/clusters/${local.cluster_1}/backups?backupId=${local.backup}" + method = "POST" + + request_headers = { + Content-Type = "application/json" + Authorization = "Bearer ${data.google_client_config.current.access_token}" + } + + request_body = < Date: Fri, 31 May 2024 13:28:07 -0700 Subject: [PATCH 036/356] Upgrade DCL to 1.67 (#10838) --- mmv1/third_party/terraform/go.mod | 2 +- mmv1/third_party/terraform/go.mod.erb | 2 +- mmv1/third_party/terraform/go.sum | 4 ++-- .../api/clouddeploy/samples/update_execution_env.target.json | 3 ++- tpgtools/go.mod | 2 +- tpgtools/go.sum | 4 ++-- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index e4e0f48fe054..26e6f46d5d43 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.23.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index 253dea2bebb9..0e4b61503874 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -5,7 +5,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.23.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 9a5f52c621b1..029cf013033d 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -18,8 +18,6 @@ cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0 h1:9C++tMcDcwgT8QTaq2bRtuAB5Tg4o4I4CkDQD57i914= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= @@ -430,3 +428,5 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= diff --git a/tpgtools/api/clouddeploy/samples/update_execution_env.target.json b/tpgtools/api/clouddeploy/samples/update_execution_env.target.json index 4f85f5293903..3ef13cb859ed 100644 --- a/tpgtools/api/clouddeploy/samples/update_execution_env.target.json +++ b/tpgtools/api/clouddeploy/samples/update_execution_env.target.json @@ -21,7 +21,8 @@ { "usages": ["RENDER"], "serviceAccount": "other-owner@{{project}}.iam.gserviceaccount.com", - "artifactStorage": "gs://other-bucket/other-dir" + "artifactStorage": "gs://other-bucket/other-dir", + "verbose": true }, { "usages": ["DEPLOY"], diff --git a/tpgtools/go.mod b/tpgtools/go.mod index f2c9231bd6eb..67f7b6571bd9 100644 --- a/tpgtools/go.mod +++ b/tpgtools/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( bitbucket.org/creachadair/stringset v0.0.11 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 github.com/golang/glog v1.1.2 github.com/hashicorp/hcl v1.0.0 github.com/kylelemons/godebug v1.1.0 diff --git a/tpgtools/go.sum b/tpgtools/go.sum index b03ba910fa6e..0e90e5744d3a 100644 --- a/tpgtools/go.sum +++ b/tpgtools/go.sum @@ -6,8 +6,8 @@ cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdi cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0 h1:9C++tMcDcwgT8QTaq2bRtuAB5Tg4o4I4CkDQD57i914= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.66.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= From 48dec2af5cf1b80049cdfe22b02eaea8ad9c1146 Mon Sep 17 00:00:00 2001 From: Anjali Soni Date: Fri, 31 May 2024 20:42:05 +0000 Subject: [PATCH 037/356] Introduce endpoint global access field in endpoint attachment resource (#10632) --- mmv1/products/integrationconnectors/EndpointAttachment.yaml | 4 ++++ ...esource_integration_connectors_endpoint_attachment_test.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/mmv1/products/integrationconnectors/EndpointAttachment.yaml b/mmv1/products/integrationconnectors/EndpointAttachment.yaml index 7039e2dc8800..c4a6320629ea 100644 --- a/mmv1/products/integrationconnectors/EndpointAttachment.yaml +++ b/mmv1/products/integrationconnectors/EndpointAttachment.yaml @@ -105,3 +105,7 @@ properties: description: | The Private Service Connect connection endpoint ip. output: true + - !ruby/object:Api::Type::Boolean + name: "endpointGlobalAccess" + description: | + Enable global access for endpoint attachment. diff --git a/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_endpoint_attachment_test.go b/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_endpoint_attachment_test.go index 609f5bc0a2a7..8f3b0d79a210 100644 --- a/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_endpoint_attachment_test.go +++ b/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_endpoint_attachment_test.go @@ -56,6 +56,7 @@ resource "google_integration_connectors_endpoint_attachment" "sampleendpointatta labels = { foo = "bar" } + endpoint_global_access = false } `, context) } @@ -74,6 +75,7 @@ resource "google_integration_connectors_endpoint_attachment" "sampleendpointatta labels = { bar = "foo" } + endpoint_global_access = true } `, context) } From 8d5eac137cb5ca51dc764d18d6c7038018b97222 Mon Sep 17 00:00:00 2001 From: Luca Prete Date: Sat, 1 Jun 2024 00:53:50 +0300 Subject: [PATCH 038/356] [#11127] Fixes ability to add secret manager secrets IAM bindings with conditions (#10844) Co-authored-by: Luca Prete --- mmv1/products/secretmanager/Secret.yaml | 1 + .../iam_secret_manager_secret_test.go.erb | 119 ++++++++++++++++++ 2 files changed, 120 insertions(+) create mode 100644 mmv1/third_party/terraform/services/secretmanager/iam_secret_manager_secret_test.go.erb diff --git a/mmv1/products/secretmanager/Secret.yaml b/mmv1/products/secretmanager/Secret.yaml index b1842ac1a775..472d4dd4149a 100644 --- a/mmv1/products/secretmanager/Secret.yaml +++ b/mmv1/products/secretmanager/Secret.yaml @@ -22,6 +22,7 @@ iam_policy: !ruby/object:Api::Resource::IamPolicy parent_resource_attribute: secret_id method_name_separator: ':' allowed_iam_role: roles/secretmanager.secretAccessor + iam_conditions_request_type: :QUERY_PARAM_NESTED references: !ruby/object:Api::Resource::ReferenceLinks api: 'https://cloud.google.com/secret-manager/docs/reference/rest/v1/projects.secrets' description: | diff --git a/mmv1/third_party/terraform/services/secretmanager/iam_secret_manager_secret_test.go.erb b/mmv1/third_party/terraform/services/secretmanager/iam_secret_manager_secret_test.go.erb new file mode 100644 index 000000000000..8be70a31ae36 --- /dev/null +++ b/mmv1/third_party/terraform/services/secretmanager/iam_secret_manager_secret_test.go.erb @@ -0,0 +1,119 @@ +<% autogen_exception -%> +package secretmanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccSecretManagerSecretIam_iamMemberConditionUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/secretmanager.secretAccessor", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecretIam_iamMemberCondition_basic(context), + }, + { + ResourceName: "google_secret_manager_secret_iam_member.default", + ImportStateId: fmt.Sprintf("projects/%s/secrets/%s %s serviceAccount:%s %s", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-secret-%s", context["random_suffix"]), context["role"], fmt.Sprintf("tf-test-sa-%s@%s.iam.gserviceaccount.com", context["random_suffix"], envvar.GetTestProjectFromEnv()), fmt.Sprintf("tf-test-condition-%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSecretManagerSecretIam_iamMemberCondition_update(context), + }, + { + ResourceName: "google_secret_manager_secret_iam_member.default", + ImportStateId: fmt.Sprintf("projects/%s/secrets/%s %s serviceAccount:%s %s", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-secret-%s", context["random_suffix"]), context["role"], fmt.Sprintf("tf-test-sa-%s@%s.iam.gserviceaccount.com", context["random_suffix"], envvar.GetTestProjectFromEnv()), fmt.Sprintf("tf-test-condition-new-%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccSecretManagerSecretIam_iamMemberCondition_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "default" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "Secret manager IAM testing account" +} + +resource "google_secret_manager_secret" "default" { + secret_id = "tf-test-secret-%{random_suffix}" + ttl = "3600s" + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_iam_member" "default" { + secret_id = google_secret_manager_secret.default.id + role = "%{role}" + member = "serviceAccount:${google_service_account.default.email}" + condition { + title = "tf-test-condition-%{random_suffix}" + description = "test condition" + expression = "request.time < timestamp(\"2022-03-01T00:00:00Z\")" + } +} +`, context) +} + +func testAccSecretManagerSecretIam_iamMemberCondition_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "default" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "Secret manager IAM testing account" +} + +resource "google_secret_manager_secret" "default" { + secret_id = "tf-test-secret-%{random_suffix}" + ttl = "3600s" + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_iam_member" "default" { + secret_id = google_secret_manager_secret.default.id + role = "%{role}" + member = "serviceAccount:${google_service_account.default.email}" + condition { + title = "tf-test-condition-new-%{random_suffix}" + description = "test new condition" + expression = "request.time < timestamp(\"2024-03-01T00:00:00Z\")" + } +} +`, context) +} From 7c6c2b7517578d7da20ce8eae99d37f34aa20a05 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Fri, 31 May 2024 16:07:03 -0700 Subject: [PATCH 039/356] Fix some wrong and missing GKE network config docs (#10849) Co-authored-by: Zhenhua Li --- .../website/docs/r/container_cluster.html.markdown | 6 ------ .../website/docs/r/container_node_pool.html.markdown | 11 +++++++++++ 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 3e0228f168d0..3b7c6f6d39cf 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -973,12 +973,6 @@ sole_tenant_config { * `enable_nested_virtualization`- (Optional) Defines whether the instance should have nested virtualization enabled. Defaults to false. -* `network_performance_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Network bandwidth tier configuration. - -The `network_performance_config` block supports: - -* `total_egress_bandwidth_tier` (Required) - Specifies the total network bandwidth tier for the NodePool. - The `ephemeral_storage_config` block supports: * `local_ssd_count` (Required) - Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD is 375 GB in size. If zero, it means to disable using local SSDs as ephemeral storage. diff --git a/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown index 0df385456be5..0eef08600637 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_node_pool.html.markdown @@ -224,6 +224,9 @@ cluster. * `additional_pod_network_configs` - (Optional, Beta) We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node. Structure is [documented below](#nested_additional_pod_network_configs) +* `pod_cidr_overprovision_config` - (Optional) Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited. Structure is [documented below](#pod_cidr_overprovision_config). + +* `network_performance_config` - (Optional) Network bandwidth tier configuration. Structure is [documented below](#network_performance_config). The `additional_node_network_configs` block supports: @@ -239,6 +242,14 @@ cluster. * `max_pods_per_node` - The maximum number of pods per node which use this pod network. +The `network_performance_config` block supports: + +* `total_egress_bandwidth_tier` (Required) - Specifies the total network bandwidth tier for the NodePool. + +The `pod_cidr_overprovision_config` block supports: + +* `disabled` (Required) - Whether pod cidr overprovision is disabled. + The `upgrade_settings` block supports: * `max_surge` - (Optional) The number of additional nodes that can be added to the node pool during From cf980e17b2aca585a0b9b5816f6eaa453fd6c2e5 Mon Sep 17 00:00:00 2001 From: KeisukeYamashita <19yamashita15@gmail.com> Date: Mon, 3 Jun 2024 18:49:27 +0200 Subject: [PATCH 040/356] Add missing closing backtick for `gae_app` in monitoring uptime check resource (#10819) Signed-off-by: KeisukeYamashita <19yamashita15@gmail.com> --- mmv1/products/monitoring/UptimeCheckConfig.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/monitoring/UptimeCheckConfig.yaml b/mmv1/products/monitoring/UptimeCheckConfig.yaml index fddd4a0bcf9f..192bbd998a9a 100644 --- a/mmv1/products/monitoring/UptimeCheckConfig.yaml +++ b/mmv1/products/monitoring/UptimeCheckConfig.yaml @@ -438,7 +438,7 @@ properties: uptime checks: * `aws_ec2_instance` * `aws_elb_load_balancer` - * `gae_app + * `gae_app` * `gce_instance` * `k8s_service` * `servicedirectory_service` From d7ffcbad998968ec74d556b72cf5a39d5caaf449 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 3 Jun 2024 09:49:51 -0700 Subject: [PATCH 041/356] Convert iam_context file with Go (#10837) --- mmv1/provider/template_data.go | 6 +- mmv1/provider/terraform.go | 2 +- .../datasource_iam.html.markdown.tmpl | 3 +- .../terraform/env_var_context.go.tmpl | 33 +++++++++++ .../base_configs/iam_test_file.go.tmpl | 58 +++++++------------ .../examples/base_configs/test_file.go.tmpl | 32 +--------- .../terraform/iam/go/iam_context.go.tmpl | 25 ++++++++ 7 files changed, 87 insertions(+), 72 deletions(-) create mode 100644 mmv1/templates/terraform/env_var_context.go.tmpl create mode 100644 mmv1/templates/terraform/iam/go/iam_context.go.tmpl diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 797064d39689..d265a29f3dd5 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -137,7 +137,7 @@ func (td *TemplateData) GenerateDocumentationFile(filePath string, resource api. func (td *TemplateData) GenerateTestFile(filePath string, resource api.Resource) { templatePath := "templates/terraform/examples/base_configs/test_file.go.tmpl" templates := []string{ - // "templates/terraform//env_var_context.go.tmpl", + "templates/terraform/env_var_context.go.tmpl", templatePath, } tmplInput := TestInput{ @@ -189,8 +189,10 @@ func (td *TemplateData) GenerateIamPolicyTestFile(filePath string, resource api. templatePath := "templates/terraform/examples/base_configs/iam_test_file.go.tmpl" templates := []string{ templatePath, + "templates/terraform/env_var_context.go.tmpl", + "templates/terraform/iam/go/iam_context.go.tmpl", } - td.GenerateFile(filePath, templatePath, resource, false, templates...) + td.GenerateFile(filePath, templatePath, resource, true, templates...) } func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, goFormat bool, templates ...string) { diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index e10868c8cb78..e610c44769d8 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -241,7 +241,7 @@ func (t *Terraform) GenerateIamDocumentation(object api.Resource, templateData T if err := os.MkdirAll(datasourceDocFolder, os.ModePerm); err != nil { log.Println(fmt.Errorf("error creating parent directory %v: %v", datasourceDocFolder, err)) } - targetFilePath = path.Join(datasourceDocFolder, fmt.Sprintf("%s_iam.html.markdown", t.FullResourceName(object))) + targetFilePath = path.Join(datasourceDocFolder, fmt.Sprintf("%s_iam_policy.html.markdown", t.FullResourceName(object))) templateData.GenerateIamDatasourceDocumentationFile(targetFilePath, object) } diff --git a/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl b/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl index db0d361cc133..78e75ee81683 100644 --- a/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl @@ -56,7 +56,6 @@ description: |- # `{{ $.IamTerraformName }}_policy` Retrieves the current IAM policy data for {{ lower $.Name }} - {{ if or (eq $.MinVersionObj.Name "beta") (eq $.IamPolicy.MinVersion "beta") }} ~> **Warning:** This datasource is in beta, and should be used with the terraform-provider-google-beta provider. See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. @@ -66,7 +65,7 @@ See [Provider Versions](https://terraform.io/docs/providers/google/guides/provid ```hcl data "{{ $.IamTerraformName }}_policy" "policy" { -{{ if eq $.MinVersionObj.Name "beta" }} +{{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta {{- end }} {{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} diff --git a/mmv1/templates/terraform/env_var_context.go.tmpl b/mmv1/templates/terraform/env_var_context.go.tmpl new file mode 100644 index 000000000000..9cf8a91260f6 --- /dev/null +++ b/mmv1/templates/terraform/env_var_context.go.tmpl @@ -0,0 +1,33 @@ +{{- define "EnvVarContext" }} + {{- range $varKey, $varVal := $.TestEnvVars }} + {{- if eq $varVal $.ORGID }} + "{{$varKey}}": envvar.GetTestOrgFromEnv(t), + {{- else if eq $varVal $.ORG_DOMAIN }} + "{{$varKey}}": envvar.GetTestOrgDomainFromEnv(t), + {{- else if eq $varVal $.CREDENTIALS }} + "{{$varKey}}": envvar.GetTestCredsFromEnv(t), + {{- else if eq $varVal $.REGION }} + "{{$varKey}}": envvar.GetTestRegionFromEnv(), + {{- else if eq $varVal $.ORG_TARGET }} + "{{$varKey}}": envvar.GetTestOrgTargetFromEnv(t), + {{- else if eq $varVal $.BILLING_ACCT }} + "{{$varKey}}": envvar.GetTestBillingAccountFromEnv(t), + {{- else if eq $varVal $.MASTER_BILLING_ACCT }} + "{{$varKey}}": envvar.GetTestMasterBillingAccountFromEnv(t), + {{- else if eq $varVal $.SERVICE_ACCT }} + "{{$varKey}}": envvar.GetTestServiceAccountFromEnv(t), + {{- else if eq $varVal $.PROJECT_NAME }} + "{{$varKey}}": envvar.GetTestProjectFromEnv(), + {{- else if eq $varVal $.PROJECT_NUMBER }} + "{{$varKey}}": envvar.GetTestProjectNumberFromEnv(), + {{- else if eq $varVal $.CUST_ID }} + "{{$varKey}}": envvar.GetTestCustIdFromEnv(t), + {{- else if eq $varVal $.IDENTITY_USER }} + "{{$varKey}}": envvar.GetTestIdentityUserFromEnv(t), + {{- else if eq $varVal $.PAP_DESCRIPTION }} + "{{$varKey}}": envvar.GetTestPublicAdvertisedPrefixDescriptionFromEnv(t), + {{- else if eq $varVal $.ZONE }} + "{{$varKey}}": envvar.GetTestZoneFromEnv(), + {{- end }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl index 044f937ee1cc..748e9d1ddcd5 100644 --- a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl @@ -1,4 +1,4 @@ -{{/* <% if hc_downstream */}} +{{/* <% if hc_downstream */ -}} // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 @@ -26,13 +26,14 @@ import ( "{{ $.ImportPath }}/acctest" "{{ $.ImportPath }}/envvar" + {{- if $.IamPolicy.IamConditionsRequestType }} "{{ $.ImportPath }}/tpgresource" + {{- end }} ) {{ $example := $.FirstTestExample }} func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { t.Parallel() - -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -78,8 +79,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { func TestAcc{{ $.ResourceName }}IamMemberGenerated(t *testing.T) { t.Parallel() - -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -119,7 +119,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { // This may skip test, so do it first sa := envvar.GetTestServiceAccountFromEnv(t) {{- end }} -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} {{- if $.IamPolicy.AdminIamRole }} context["service_account"] = sa {{- end }} @@ -169,8 +169,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { {{- if $.IamPolicy.IamConditionsRequestType }} func TestAcc{{ $.ResourceName }}IamBindingGenerated_withCondition(t *testing.T) { t.Parallel() - -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -206,8 +205,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withAndWithoutCondition(t *t // Multiple fine-grained resources acctest.SkipIfVcr(t) t.Parallel() - -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -253,8 +251,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withAndWithoutCondition(t *t func TestAcc{{ $.ResourceName }}IamMemberGenerated_withCondition(t *testing.T) { t.Parallel() - -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -290,8 +287,7 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated_withAndWithoutCondition(t *te // Multiple fine-grained resources acctest.SkipIfVcr(t) t.Parallel() - -{{/* iam_context.go.erb */}} +{{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -342,7 +338,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated_withCondition(t *testing.T) { // This may skip test, so do it first sa := envvar.GetTestServiceAccountFromEnv(t) {{- end }} -{{/* iam_context.go.erb */}} +{{- template "IamContext" $ }} {{- if $.IamPolicy.AdminIamRole }} context["service_account"] = sa {{- end }} @@ -395,8 +391,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated_withCondition(t *testing.T) { func testAcc{{ $.ResourceName }}IamMember_basicGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_member" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -410,8 +405,7 @@ resource "{{ $.IamTerraformName }}_member" "foo" { func testAcc{{ $.ResourceName }}IamPolicy_basicGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} data "google_iam_policy" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -450,8 +444,7 @@ data "{{ $.IamTerraformName }}_policy" "foo" { func testAcc{{ $.ResourceName }}IamPolicy_emptyBinding(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} data "google_iam_policy" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -470,8 +463,7 @@ resource "{{ $.IamTerraformName }}_policy" "foo" { func testAcc{{ $.ResourceName }}IamBinding_basicGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_binding" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -485,8 +477,7 @@ resource "{{ $.IamTerraformName }}_binding" "foo" { func testAcc{{ $.ResourceName }}IamBinding_updateGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_binding" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -501,8 +492,7 @@ resource "{{ $.IamTerraformName }}_binding" "foo" { {{- if $.IamPolicy.IamConditionsRequestType }} func testAcc{{ $.ResourceName }}IamBinding_withConditionGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_binding" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -521,8 +511,7 @@ resource "{{ $.IamTerraformName }}_binding" "foo" { func testAcc{{ $.ResourceName }}IamBinding_withAndWithoutConditionGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_binding" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -565,8 +554,7 @@ resource "{{ $.IamTerraformName }}_binding" "foo3" { func testAcc{{ $.ResourceName }}IamMember_withConditionGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_member" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -585,8 +573,7 @@ resource "{{ $.IamTerraformName }}_member" "foo" { func testAcc{{ $.ResourceName }}IamMember_withAndWithoutConditionGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} resource "{{ $.IamTerraformName }}_member" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -629,8 +616,7 @@ resource "{{ $.IamTerraformName }}_member" "foo3" { func testAcc{{ $.ResourceName }}IamPolicy_withConditionGenerated(context map[string]interface{}) string { return acctest.Nprintf(` -{{/* example.config_test_body */}} - +{{ $example.TestHCLText }} data "google_iam_policy" "foo" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta @@ -671,4 +657,4 @@ resource "{{ $.IamTerraformName }}_policy" "foo" { } `, context) } -{{- end }} +{{- end }}{{/* if $.IamPolicy.IamConditionsRequestType */}} diff --git a/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl index a1f8a923df7a..f0472fbe4430 100644 --- a/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl @@ -44,37 +44,7 @@ func TestAcc{{ $e.TestSlug $.Res.ProductMetadata.Name $.Res.Name }}(t *testing.T t.Parallel() context := map[string]interface{}{ - {{- range $varKey, $varVal := $e.TestEnvVars }} - {{- if eq $varVal $.ORGID }} - "{{$varKey}}": envvar.GetTestOrgFromEnv(t), - {{- else if eq $varVal $.ORG_DOMAIN }} - "{{$varKey}}": envvar.GetTestOrgDomainFromEnv(t), - {{- else if eq $varVal $.CREDENTIALS }} - "{{$varKey}}": envvar.GetTestCredsFromEnv(t), - {{- else if eq $varVal $.REGION }} - "{{$varKey}}": envvar.GetTestRegionFromEnv(), - {{- else if eq $varVal $.ORG_TARGET }} - "{{$varKey}}": envvar.GetTestOrgTargetFromEnv(t), - {{- else if eq $varVal $.BILLING_ACCT }} - "{{$varKey}}": envvar.GetTestBillingAccountFromEnv(t), - {{- else if eq $varVal $.MASTER_BILLING_ACCT }} - "{{$varKey}}": envvar.GetTestMasterBillingAccountFromEnv(t), - {{- else if eq $varVal $.SERVICE_ACCT }} - "{{$varKey}}": envvar.GetTestServiceAccountFromEnv(t), - {{- else if eq $varVal $.PROJECT_NAME }} - "{{$varKey}}": envvar.GetTestProjectFromEnv(), - {{- else if eq $varVal $.PROJECT_NUMBER }} - "{{$varKey}}": envvar.GetTestProjectNumberFromEnv(), - {{- else if eq $varVal $.CUST_ID }} - "{{$varKey}}": envvar.GetTestCustIdFromEnv(t), - {{- else if eq $varVal $.IDENTITY_USER }} - "{{$varKey}}": envvar.GetTestIdentityUserFromEnv(t), - {{- else if eq $varVal $.PAP_DESCRIPTION }} - "{{$varKey}}": envvar.GetTestPublicAdvertisedPrefixDescriptionFromEnv(t), - {{- else if eq $varVal $.ZONE }} - "{{$varKey}}": envvar.GetTestZoneFromEnv(), - {{- end }} - {{- end }} + {{- template "EnvVarContext" $e }} {{- range $varKey, $varVal := $e.TestVarsOverrides }} "{{$varKey}}": {{$varVal}}, {{- end }} diff --git a/mmv1/templates/terraform/iam/go/iam_context.go.tmpl b/mmv1/templates/terraform/iam/go/iam_context.go.tmpl new file mode 100644 index 000000000000..54d105314463 --- /dev/null +++ b/mmv1/templates/terraform/iam/go/iam_context.go.tmpl @@ -0,0 +1,25 @@ +{{- define "IamContext" }} + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "{{ $.IamPolicy.AllowedIamRole }}", +{{- if $.IamPolicy.AdminIamRole }} + "admin_role": "{{ $.IamPolicy.AdminIamRole }}", +{{- end }} +{{- if $.IamPolicy.TestProjectName }} + "project_id" : fmt.Sprintf("{{ $.IamPolicy.TestProjectName }}%s", acctest.RandString(t, 10)), +{{- end }} +{{- template "EnvVarContext" $.FirstTestExample }} +{{- if $.FirstTestExample.TestVarsOverrides }} +{{- range $varName, $override := $.FirstTestExample.TestVarsOverrides }} + "{{ $varName }}": {{ $override }}, +{{- end }} +{{- end }} +{{- if $.IamPolicy.IamConditionsRequestType }} + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, +{{- end }} + } +{{- end }} \ No newline at end of file From 3fc405dc19dd59b313710b2d8cd95ac57c109329 Mon Sep 17 00:00:00 2001 From: Abhijeet Dargude <144316709+dargudear-google@users.noreply.github.com> Date: Tue, 4 Jun 2024 00:05:44 +0530 Subject: [PATCH 042/356] Add support secret manager csi addon in beta (#10841) --- .../resource_container_cluster.go.erb | 103 ++++++++++++++++++ .../resource_container_cluster_test.go.erb | 88 +++++++++++++++ .../docs/r/container_cluster.html.markdown | 8 ++ 3 files changed, 199 insertions(+) diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 38028f27021d..85808489e5d6 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -1501,6 +1501,24 @@ func ResourceContainerCluster() *schema.Resource { }, }, <% end -%> +<% unless version == "ga" -%> + "secret_manager_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Secret Manager feature.`, + MaxItems: 1, + DiffSuppressFunc: SecretManagerCfgSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enable the Secret manager csi component.`, + }, + }, + }, + }, +<% end -%> "project": { Type: schema.TypeString, @@ -2273,6 +2291,9 @@ func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) er IpAllocationPolicy: ipAllocationBlock, <% unless version == "ga" -%> PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), +<% end -%> +<% unless version == "ga" -%> + SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), <% end -%> Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization")), @@ -2915,6 +2936,10 @@ func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) erro if err := d.Set("cluster_telemetry", flattenClusterTelemetry(cluster.ClusterTelemetry)); err != nil { return err } + + if err := d.Set("secret_manager_config", flattenSecretManagerConfig(cluster.SecretManagerConfig)); err != nil { + return err + } <% end -%> if err := d.Set("resource_labels", cluster.ResourceLabels); err != nil { @@ -3901,6 +3926,35 @@ func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) er } <% end -%> +<% unless version == 'ga' -%> + if d.HasChange("secret_manager_config") { + c := d.Get("secret_manager_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredSecretManagerConfig: expandSecretManagerConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating secret manager csi driver config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s secret manager csi add-on has been updated", d.Id()) + } +<% end -%> + if d.HasChange("workload_identity_config") { // Because GKE uses a non-RESTful update function, when removing the // feature you need to specify a fairly full request body or it fails: @@ -5274,6 +5328,21 @@ func expandPodSecurityPolicyConfig(configured interface{}) *container.PodSecurit } <% end -%> +<% unless version == 'ga' -%> +func expandSecretManagerConfig(configured interface{}) *container.SecretManagerConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.SecretManagerConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} +<% end -%> + func expandDefaultMaxPodsConstraint(v interface{}) *container.MaxPodsConstraint { if v == nil { return nil @@ -6140,6 +6209,24 @@ func flattenPodSecurityPolicyConfig(c *container.PodSecurityPolicyConfig) []map[ <% end -%> +<% unless version == 'ga' -%> +func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]interface{} { + if c == nil { + return []map[string]interface{}{ + { + "enabled": false, + }, + } + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +<% end -%> + func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) []map[string]interface{} { if c == nil { return nil @@ -6589,6 +6676,22 @@ func podSecurityPolicyCfgSuppress(k, old, new string, r *schema.ResourceData) bo } <% end -%> +<% unless version == 'ga' -%> +func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "secret_manager_config.#" && old == "1" && new == "0" { + if v, ok := r.GetOk("secret_manager_config"); ok { + cfgList := v.([]interface{}) + if len(cfgList) > 0 { + d := cfgList[0].(map[string]interface{}) + // Suppress if old value was {enabled == false} + return !d["enabled"].(bool) + } + } + } + return false +} +<% end -%> + func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.ResourceData) bool { // if network_policy configuration is empty, we store it as populated and enabled=false, and // provider=PROVIDER_UNSPECIFIED, in the case that it was previously stored with this state, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index a1b47beaa7d2..e8cad2c7d0e0 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -3223,6 +3223,59 @@ func TestAccContainerCluster_withIdentityServiceConfig(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccContainerCluster_withSecretManagerConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withSecretManagerConfigEnabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withSecretManagerConfigUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +<% end -%> + func TestAccContainerCluster_withLoggingConfig(t *testing.T) { t.Parallel() @@ -9227,6 +9280,41 @@ resource "google_container_cluster" "primary" { `, name, networkName, subnetworkName) } +<% unless version == 'ga' -%> +func testAccContainerCluster_withSecretManagerConfigEnabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + secret_manager_config { + enabled = true + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withSecretManagerConfigUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + secret_manager_config { + enabled = false + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} +<% end -%> + func testAccContainerCluster_withLoggingConfigEnabled(name, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "primary" { diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 3b7c6f6d39cf..94443c5c1d4a 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -292,6 +292,10 @@ region are guaranteed to support the same version. [PodSecurityPolicy](https://cloud.google.com/kubernetes-engine/docs/how-to/pod-security-policies) feature. Structure is [documented below](#nested_pod_security_policy_config). +* `secret_manager_config` - (Optional, [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)) Configuration for the + [SecretManagerConfig](https://cloud.google.com/secret-manager/docs/secret-manager-managed-csi-component) feature. + Structure is [documented below](#nested_secret_manager_config). + * `authenticator_groups_config` - (Optional) Configuration for the [Google Groups for GKE](https://cloud.google.com/kubernetes-engine/docs/how-to/role-based-access-control#groups-setup-gsuite) feature. Structure is [documented below](#nested_authenticator_groups_config). @@ -1106,6 +1110,10 @@ notification_config { * `enabled` (Required) - Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created. +The `secret_manager_config` block supports: + +* `enabled` (Required) - Enable the Secret Manager add-on for this cluster. + The `private_cluster_config` block supports: * `enable_private_nodes` (Optional) - Enables the private cluster feature, From a1e28b594bd0b0c411cc02dae0af02d5e4627a22 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 3 Jun 2024 12:26:28 -0700 Subject: [PATCH 043/356] Convert sweeeper template with Go (#10862) --- mmv1/api/resource.go | 26 +++ mmv1/api/resource/sweeper.go | 27 +++ mmv1/provider/template_data.go | 8 + mmv1/provider/terraform.go | 58 ++---- mmv1/templates/terraform/sweeper_file.go.tmpl | 182 ++++++++++++++++++ 5 files changed, 261 insertions(+), 40 deletions(-) create mode 100644 mmv1/api/resource/sweeper.go create mode 100644 mmv1/templates/terraform/sweeper_file.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 6d9ad9bdf4c7..2dab965b708c 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -222,6 +222,9 @@ type Resource struct { // If true, skip sweeper generation for this resource SkipSweeper bool `yaml:"skip_sweeper"` + // Override sweeper settings + Sweeper resource.Sweeper + Timeouts *Timeouts // An array of function names that determine whether an error is retryable. @@ -1308,3 +1311,26 @@ func FormatDocDescription(desc string) string { func (r Resource) CustomTemplate(templatePath string, appendNewline bool) string { return resource.ExecuteTemplate(&r, templatePath, appendNewline) } + +// Returns the key of the list of resources in the List API response +// Used to get the list of resources to sweep +func (r Resource) ResourceListKey() string { + var k string + if r.NestedQuery != nil && len(r.NestedQuery.Keys) > 0 { + k = r.NestedQuery.Keys[0] + } + + if k == "" { + k = r.CollectionUrlKey + } + + return k +} + +func (r Resource) ListUrlTemplate() string { + return strings.Replace(r.CollectionUrl(), "zones/{{zone}}", "aggregated", 1) +} + +func (r Resource) DeleteUrlTemplate() string { + return fmt.Sprintf("%s%s", r.ProductMetadata.BaseUrl, r.DeleteUri()) +} diff --git a/mmv1/api/resource/sweeper.go b/mmv1/api/resource/sweeper.go new file mode 100644 index 000000000000..7ba3e789e203 --- /dev/null +++ b/mmv1/api/resource/sweeper.go @@ -0,0 +1,27 @@ +// Copyright 2024 Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +type Sweeper struct { + //Google::YamlValidator + // The field checked by sweeper to determine + // eligibility for deletion for generated resources + SweepableIdentifierField string `yaml:"sweepable_identifier_field"` +} + +// def validate +// super + +// check :sweepable_identifier_field, type: String +// end diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index d265a29f3dd5..55a9a1928c74 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -195,6 +195,14 @@ func (td *TemplateData) GenerateIamPolicyTestFile(filePath string, resource api. td.GenerateFile(filePath, templatePath, resource, true, templates...) } +func (td *TemplateData) GenerateSweeperFile(filePath string, resource api.Resource) { + templatePath := "templates/terraform/sweeper_file.go.tmpl" + templates := []string{ + templatePath, + } + td.GenerateFile(filePath, templatePath, resource, false, templates...) +} + func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, goFormat bool, templates ...string) { log.Printf("Generating %s", filePath) diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index e610c44769d8..80d128e11367 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -52,7 +52,7 @@ type Terraform struct { Version product.Version - Product api.Product + Product *api.Product StartTime time.Time } @@ -61,7 +61,7 @@ func NewTerraform(product *api.Product, versionName string, startTime time.Time) t := Terraform{ ResourceCount: 0, IAMResourceCount: 0, - Product: *product, + Product: product, TargetVersionName: versionName, Version: *product.VersionObjOrClosest(versionName), StartTime: startTime, @@ -122,8 +122,7 @@ func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPat if generateCode { log.Printf("Generating %s tests", object.Name) t.GenerateResourceTests(object, *templateData, outputFolder) - // TODO Q2 - // generate_resource_sweepers(pwd, data.clone) + t.GenerateResourceSweeper(object, *templateData, outputFolder) } } @@ -179,6 +178,20 @@ func (t *Terraform) GenerateResourceTests(object api.Resource, templateData Temp templateData.GenerateTestFile(targetFilePath, object) } +func (t *Terraform) GenerateResourceSweeper(object api.Resource, templateData TemplateData, outputFolder string) { + if object.SkipSweeper || object.CustomCode.CustomDelete != "" || object.CustomCode.PreDelete != "" || object.CustomCode.PostDelete != "" || object.SkipDelete { + return + } + + productName := t.Product.ApiName + targetFolder := path.Join(outputFolder, t.FolderName(), "services", productName) + if err := os.MkdirAll(targetFolder, os.ModePerm); err != nil { + log.Println(fmt.Errorf("error creating parent directory %v: %v", targetFolder, err)) + } + targetFilePath := path.Join(targetFolder, fmt.Sprintf("resource_%s_sweeper.go", t.FullResourceName(object))) + templateData.GenerateSweeperFile(targetFilePath, object) +} + func (t *Terraform) GenerateOperation(outputFolder string) { // TODO Q2 @@ -245,6 +258,7 @@ func (t *Terraform) GenerateIamDocumentation(object api.Resource, templateData T templateData.GenerateIamDatasourceDocumentationFile(targetFilePath, object) } +// Finds the folder name for a given version of the terraform provider func (t *Terraform) FolderName() string { if t.TargetVersionName == "ga" { return "google" @@ -1016,42 +1030,6 @@ func languageFromFilename(filename string) string { } } -// # Finds the folder name for a given version of the terraform provider -// def folder_name(version) -// -// version == 'ga' ? 'google' : "google-#{version}" -// -// end -// -// def generate_documentation(pwd, data) -// -// target_folder = data.output_folder -// target_folder = File.join(target_folder, 'website', 'docs', 'r') -// FileUtils.mkpath target_folder -// filepath = File.join(target_folder, "#{full_resource_name(data)}.html.markdown") -// data.generate(pwd, 'templates/terraform/resource.html.markdown.erb', filepath, self) -// -// end -// -// def generate_resource_sweepers(pwd, data) -// -// return if data.object.skip_sweeper || -// data.object.custom_code.custom_delete || -// data.object.custom_code.pre_delete || -// data.object.custom_code.post_delete || -// data.object.skip_delete -// -// product_name = @api.api_name -// target_folder = File.join(folder_name(data.version), 'services', product_name) -// file_name = -// "#{target_folder}/resource_#{full_resource_name(data)}_sweeper.go" -// FileUtils.mkpath folder_name(data.version) -// data.generate(pwd, -// 'templates/terraform/sweeper_file.go.erb', -// file_name, -// self) -// -// end // // # Returns the id format of an object, or self_link_uri if none is explicitly defined // # We prefer the long name of a resource as the id so that users can reference diff --git a/mmv1/templates/terraform/sweeper_file.go.tmpl b/mmv1/templates/terraform/sweeper_file.go.tmpl new file mode 100644 index 000000000000..e70186347b66 --- /dev/null +++ b/mmv1/templates/terraform/sweeper_file.go.tmpl @@ -0,0 +1,182 @@ +{{/* <% if hc_downstream */ -}} +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package {{ lower $.ProductMetadata.Name }} + +import ( + "context" + "log" + "strings" + "testing" + + "{{ $.ImportPath }}/envvar" + "{{ $.ImportPath }}/sweeper" + "{{ $.ImportPath }}/tpgresource" + transport_tpg "{{ $.ImportPath }}/transport" +) + +func init() { + sweeper.AddTestSweepers("{{ $.ResourceName }}", testSweep{{ $.ResourceName }}) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweep{{ $.ResourceName }}(region string) error { + resourceName := "{{ $.ResourceName }}" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("{{ $.ListUrlTemplate }}", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["{{ $.ResourceListKey }}"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + {{- if contains $.ListUrlTemplate "/aggregated/" }} + var rl []interface{} + zones := resourceList.(map[string]interface{}) + // Loop through every zone in the list response + for _, zonesValue := range zones { + zone := zonesValue.(map[string]interface{}) + for k, v := range zone { + // Zone map either has resources or a warning stating there were no resources found in the zone + if k != "warning" { + resourcesInZone := v.([]interface{}) + rl = append(rl, resourcesInZone...) + } + } + } + {{- else }} + + rl := resourceList.([]interface{}) + {{- end }} + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + {{- if contains $.DeleteUrlTemplate "_id" }} + var name string + // Id detected in the delete URL, attempt to use id. + if obj["id"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["id"].(string)) + } else if obj["name"] != nil { + name = tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + } else { + log.Printf("[INFO][SWEEPER_LOG] %s resource name and id were nil", resourceName) + return nil + } + {{- else }} + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + {{- end }} + // Skip resources that shouldn't be sweeped + {{- if $.Sweeper.SweepableIdentifierField }} + if !sweeper.IsSweepableTestResource(obj["{{ $.Sweeper.SweepableIdentifierField }}"].(string)) { + {{- else }} + if !sweeper.IsSweepableTestResource(name) { + {{- end }} + nonPrefixCount++ + continue + } + + deleteTemplate := "{{ $.DeleteUrlTemplate }}" + {{- if contains $.ListUrlTemplate "/aggregated/" }} + if obj["zone"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource zone was nil", resourceName) + return nil + } + zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) + deleteTemplate = strings.Replace(deleteTemplate, "{{"{{zone}}"}}", zone, -1) + + {{- end }} + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} From 5449997c69a891356f1f417da5cc895703474955 Mon Sep 17 00:00:00 2001 From: muhammed kamal Date: Mon, 3 Jun 2024 21:37:37 +0200 Subject: [PATCH 044/356] add support for partner metadata field on instance and instance template (#10753) --- .../services/compute/metadata.go.erb | 69 +++++ .../compute/resource_compute_instance.go.erb | 69 +++++ ...te_instance_from_machine_image_test.go.erb | 106 ++++++++ ...compute_instance_from_template_test.go.erb | 248 ++++++++++++++++++ .../resource_compute_instance_template.go.erb | 35 +++ ...urce_compute_instance_template_test.go.erb | 124 +++++++++ .../resource_compute_instance_test.go.erb | 210 ++++++++++++++- ...ce_compute_region_instance_template.go.erb | 40 +++ ...mpute_region_instance_template_test.go.erb | 120 +++++++++ .../docs/r/compute_instance.html.markdown | 2 + .../r/compute_instance_template.html.markdown | 2 + ...ute_region_instance_template.html.markdown | 2 + 12 files changed, 1024 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/metadata.go.erb b/mmv1/third_party/terraform/services/compute/metadata.go.erb index c6cbffee3045..3b378c9bb665 100644 --- a/mmv1/third_party/terraform/services/compute/metadata.go.erb +++ b/mmv1/third_party/terraform/services/compute/metadata.go.erb @@ -2,7 +2,15 @@ package compute import ( + <% unless version == 'ga' -%> + "encoding/json" + <% end -%> "errors" + <% unless version == 'ga' -%> + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + "reflect" + <% end -%> "sort" <% if version == "ga" -%> @@ -161,3 +169,64 @@ func resourceInstanceMetadata(d tpgresource.TerraformResourceData) (*compute.Met return m, nil } +<% unless version == 'ga' -%> + +func resourceInstancePartnerMetadata(d tpgresource.TerraformResourceData) (map[string]compute.StructuredEntries, error) { + partnerMetadata := make(map[string]compute.StructuredEntries) + partnerMetadataMap := d.Get("partner_metadata").(map[string]interface{}) + if len(partnerMetadataMap) > 0 { + for key, value := range partnerMetadataMap { + var jsonMap map[string]interface{} + err := json.Unmarshal([]byte(value.(string)), &jsonMap) + if err != nil { + return nil, err + } + structuredEntries := jsonMap["entries"].(map[string]interface{}) + structuredEntriesJson, err := json.Marshal(&structuredEntries) + if err != nil { + return nil, err + } + partnerMetadata[key] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(structuredEntriesJson), + } + } + } + return partnerMetadata, nil +} + +func resourceInstancePatchPartnerMetadata(d tpgresource.TerraformResourceData, currentPartnerMetadata map[string]compute.StructuredEntries) map[string]compute.StructuredEntries { + partnerMetadata, _ := resourceInstancePartnerMetadata(d) + for key := range currentPartnerMetadata { + if _, ok := partnerMetadata[key]; !ok { + partnerMetadata[key] = compute.StructuredEntries{} + } + } + return partnerMetadata + +} +func flattenPartnerMetadata(partnerMetadata map[string]compute.StructuredEntries) (map[string]string, error) { + partnerMetadataMap := make(map[string]string) + for key, value := range partnerMetadata { + + jsonString, err := json.Marshal(&value) + if err != nil { + return nil, err + } + if value.Entries != nil { + partnerMetadataMap[key] = string(jsonString) + } + + } + return partnerMetadataMap, nil +} +func ComparePartnerMetadataDiff(_, old, new string, d *schema.ResourceData) bool { + var oldJson map[string]interface{} + var newJson map[string]interface{} + json.Unmarshal([]byte(old), &oldJson) + json.Unmarshal([]byte(new), &newJson) + if reflect.DeepEqual(oldJson, newJson) { + return true + } + return false +} +<% end -%> \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 7be414a69952..97bf3c56f864 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -701,6 +701,17 @@ func ResourceComputeInstance() *schema.Resource { Description: `Metadata key/value pairs made available within the instance.`, }, + <% unless version == 'ga' -%> + "partner_metadata": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: ComparePartnerMetadataDiff, + DiffSuppressOnRefresh: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Partner Metadata Map made available within the instance.`, + }, + <% end -%> + "metadata_startup_script": { Type: schema.TypeString, Optional: true, @@ -1198,7 +1209,11 @@ func getInstance(config *transport_tpg.Config, d *schema.ResourceData) (*compute if err != nil { return nil, err } + <% if version == "ga" -%> instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() + <% else -%> + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).View("FULL").Do() + <% end -%> if err != nil { return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) } @@ -1283,6 +1298,13 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *trans return nil, fmt.Errorf("Error creating metadata: %s", err) } + <% unless version == 'ga' -%> + PartnerMetadata, err := resourceInstancePartnerMetadata(d) + if err != nil { + return nil, fmt.Errorf("Error creating partner metadata: %s", err) + } + <% end -%> + networkInterfaces, err := expandNetworkInterfaces(d, config) if err != nil { return nil, fmt.Errorf("Error creating network interfaces: %s", err) @@ -1308,6 +1330,9 @@ func expandComputeInstance(project string, d *schema.ResourceData, config *trans Disks: disks, MachineType: machineTypeUrl, Metadata: metadata, + <% unless version == 'ga' -%> + PartnerMetadata: PartnerMetadata, + <% end -%> Name: d.Get("name").(string), NetworkInterfaces: networkInterfaces, NetworkPerformanceConfig: networkPerformanceConfig, @@ -1484,6 +1509,19 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("metadata_fingerprint", instance.Metadata.Fingerprint); err != nil { return fmt.Errorf("Error setting metadata_fingerprint: %s", err) } + + <% unless version == 'ga' -%> + if instance.PartnerMetadata != nil { + partnerMetadata, err := flattenPartnerMetadata(instance.PartnerMetadata) + if err != nil { + return fmt.Errorf("Error parsing partner metadata: %s", err) + } + if err = d.Set("partner_metadata", partnerMetadata); err != nil { + return fmt.Errorf("Error setting partner metadata: %s", err) + } + } + <% end -%> + if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { return fmt.Errorf("Error setting can_ip_forward: %s", err) } @@ -1815,6 +1853,37 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err } } +<% unless version == 'ga' -%> + if d.HasChange("partner_metadata") { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).View("FULL").Do() + if err != nil { + return fmt.Errorf("Error retrieving partner_metadata: %s", err) + } + instance.Fingerprint = instance.Fingerprint + instance.PartnerMetadata = resourceInstancePatchPartnerMetadata(d, instance.PartnerMetadata) + instance.NullFields = []string{"partnerMetadata"} + + op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error updating partner_metadata: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "partner metadata to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + return nil + }, + }) + + if err != nil { + return err + } + } + +<% end -%> if d.HasChange("tags") { tags := resourceInstanceTags(d) tagsV1 := &compute.Tags{} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb index bf62de039797..c918e8f9cabe 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb @@ -16,6 +16,9 @@ import ( <% else -%> compute "google.golang.org/api/compute/v0.beta" <% end -%> +<% unless version == 'ga' -%> + "google.golang.org/api/googleapi" +<% end -%> ) func TestAccComputeInstanceFromMachineImage_basic(t *testing.T) { @@ -135,6 +138,37 @@ func TestAccComputeInstanceFromMachineImageWithOverride_localSsdRecoveryTimeout( }) } +<% unless version == 'ga' -%> +func TestAccComputeInstanceFromMachineImageWithOverride_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImageWithOverride_partnerMetadata(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + }, + }) +} +<% end -%> + func TestAccComputeInstanceFromMachineImage_overrideMetadataDotStartupScript(t *testing.T) { t.Parallel() @@ -440,6 +474,78 @@ resource "google_compute_instance_from_machine_image" "foobar" { `, instance, instance, newInstance) } +<% unless version == 'ga' -%> +func testAccComputeInstanceFromMachineImageWithOverride_partnerMetadata(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key = "value" + } + }) + } +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } +} +`, instance, instance, newInstance) +} +<% end -%> + func testAccComputeInstanceFromMachineImage_overrideMetadataDotStartupScript(instanceName, generatedInstanceName string) string { return fmt.Sprintf(` resource "google_compute_instance" "vm" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb index 1d5bbd89500c..fa31de54ff2e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_template_test.go.erb @@ -15,6 +15,9 @@ import ( <% else -%> compute "google.golang.org/api/compute/v0.beta" <% end -%> +<% unless version == 'ga' -%> + "google.golang.org/api/googleapi" +<% end -%> ) func TestAccComputeInstanceFromTemplate_basic(t *testing.T) { @@ -166,6 +169,70 @@ func TestAccComputeInstanceFromTemplateWithOverride_localSsdRecoveryTimeout(t *t }) } +<% unless version == 'ga' -%> +func TestAccComputeInstanceFromTemplate_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_partnerMetadata(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplateWithOverride_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplateWithOverride_partnerMetadata(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + }, + }) +} +<% end -%> + <% unless version == 'ga' -%> func TestAccComputeInstanceFromRegionTemplate_basic(t *testing.T) { @@ -757,6 +824,187 @@ resource "google_compute_instance_from_template" "foobar" { `, template, template, instance) } +<% unless version == 'ga' -%> +func testAccComputeInstanceFromTemplate_partnerMetadata(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, template, template, instance) +} + +func testAccComputeInstanceFromTemplateWithOverride_partnerMetadata(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + } + }) + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } +} +`, template, template, instance) +} +<% end -%> + <% unless version == 'ga' -%> func testAccComputeInstanceFromRegionTemplate_basic(instance, template string) string { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index 9e652212aead..6db07747edb0 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -382,6 +382,16 @@ Google Cloud KMS.`, ForceNew: true, Description: `An alternative to using the startup-script metadata key, mostly to match the compute_instance resource. This replaces the startup-script metadata key on the created instance and thus the two mechanisms are not allowed to be used simultaneously.`, }, + <% unless version == 'ga' -%> + "partner_metadata": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: ComparePartnerMetadataDiff, + DiffSuppressOnRefresh: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Partner Metadata Map made available within the instance.`, + }, + <% end -%> "metadata_fingerprint": { Type: schema.TypeString, @@ -1368,6 +1378,12 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if err != nil { return err } + <% unless version == 'ga' -%> + PartnerMetadata, err := resourceInstancePartnerMetadata(d) + if err != nil { + return err + } + <% end -%> networks, err := expandNetworkInterfaces(d, config) if err != nil { @@ -1396,6 +1412,9 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac MinCpuPlatform: d.Get("min_cpu_platform").(string), Disks: disks, Metadata: metadata, + <% unless version == 'ga' -%> + PartnerMetadata: PartnerMetadata, + <% end -%> NetworkInterfaces: networks, NetworkPerformanceConfig: networkPerformanceConfig, Scheduling: scheduling, @@ -1707,7 +1726,11 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ } splits := strings.Split(idStr, "/") + <% if version == "ga" -%> instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, splits[len(splits)-1]).Do() + <% else -%> + instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, splits[len(splits)-1]).View("FULL").Do() + <% end -%> if err != nil { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) } @@ -1734,6 +1757,18 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ } } + <% unless version == 'ga' -%> + if instanceTemplate.Properties.PartnerMetadata != nil { + partnerMetadata, err := flattenPartnerMetadata(instanceTemplate.Properties.PartnerMetadata) + if err != nil { + return fmt.Errorf("Error parsing partner metadata: %s", err) + } + if err = d.Set("partner_metadata", partnerMetadata); err != nil { + return fmt.Errorf("Error setting partner metadata: %s", err) + } + } + <% end -%> + // Set the tags fingerprint if there is one. if instanceTemplate.Properties.Tags != nil { if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index 0e5a6615d473..c7adb60d0e11 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -3,6 +3,9 @@ package compute_test import ( +<% unless version == 'ga' -%> + "encoding/json" +<% end -%> "fmt" "reflect" "regexp" @@ -22,6 +25,10 @@ import ( <% else -%> compute "google.golang.org/api/compute/v0.beta" <% end -%> + +<% unless version == 'ga' -%> + "google.golang.org/api/googleapi" +<% end -%> ) const DEFAULT_MIN_CPU_TEST_VALUE = "Intel Haswell" @@ -1250,6 +1257,43 @@ func TestAccComputeInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccComputeInstanceTemplate_partnerMetadata(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_partnerMetadata(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplatePartnerMetadata(&instanceTemplate, expectedPartnerMetadata), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{fmt.Sprintf("partner_metadata.%s", namespace)}, + }, + }, + }) + +} +<% end -%> + func TestAccComputeInstanceTemplate_sourceSnapshotEncryptionKey(t *testing.T) { t.Parallel() @@ -1534,8 +1578,14 @@ func testAccCheckComputeInstanceTemplateExistsInProject(t *testing.T, n, p strin splits := strings.Split(rs.Primary.ID, "/") templateName := splits[len(splits)-1] + <% if version == "ga" -%> found, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( p, templateName).Do() + <% else -%> + found, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( + p, templateName).View("FULL").Do() + <% end -%> + if err != nil { return err } @@ -1703,6 +1753,36 @@ func testAccCheckComputeInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate } } +<% unless version == 'ga' -%> +func testAccCheckComputeInstanceTemplatePartnerMetadata(instanceTemplate *compute.InstanceTemplate, expectedPartnerMetadata map[string]compute.StructuredEntries) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate == nil { + return fmt.Errorf("instance template is nil") + } + if instanceTemplate.Properties.PartnerMetadata == nil { + return fmt.Errorf("no partner metadata") + } + expectedPartnerMetadataMap := make(map[string]interface{}) + acutalPartnerMetadataMap := make(map[string]interface{}) + for key, value := range instanceTemplate.Properties.PartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + acutalPartnerMetadataMap[key] = jsonMap + } + for key, value := range expectedPartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + expectedPartnerMetadataMap[key] = jsonMap + } + if !reflect.DeepEqual(acutalPartnerMetadataMap, expectedPartnerMetadataMap) { + return fmt.Errorf("got the wrong instance partne metadata action: have: %+v; want: %+v", acutalPartnerMetadataMap, expectedPartnerMetadataMap) + } + return nil + + } +} +<% end -%> + func testAccCheckComputeInstanceTemplateAutomaticRestart(instanceTemplate *compute.InstanceTemplate, automaticRestart bool) resource.TestCheckFunc { return func(s *terraform.State) error { ar := instanceTemplate.Properties.Scheduling.AutomaticRestart @@ -3851,6 +3931,50 @@ resource "google_compute_instance_template" "foobar" { `, suffix) } +<% unless version == 'ga' -%> +func testAccComputeInstanceTemplate_partnerMetadata(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +<% end -%> + func testAccComputeInstanceTemplate_sourceSnapshotEncryptionKey(context map[string]interface{}) string { return acctest.Nprintf(` data "google_kms_key_ring" "ring" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index ef606ea0654c..586fd0dd7739 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -2,7 +2,13 @@ package compute_test import ( + <% unless version == 'ga' -%> + "encoding/json" + <% end -%> "fmt" + <% unless version == 'ga' -%> + "google.golang.org/api/googleapi" + <% end -%> "reflect" "regexp" "sort" @@ -1596,7 +1602,7 @@ func TestAccComputeInstance_guestAccelerator(t *testing.T) { Config: testAccComputeInstance_guestAccelerator(instanceName, 1), Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), - testAccCheckComputeInstanceHasGuestAccelerator(&instance, "nvidia-tesla-k80", 1), + testAccCheckComputeInstanceHasGuestAccelerator(&instance, "nvidia-tesla-t4", 1), ), }, computeInstanceImportStep("us-east1-d", instanceName, []string{"metadata.baz", "metadata.foo"}), @@ -2834,6 +2840,108 @@ func TestAccComputeInstance_localSsdRecoveryTimeout_update(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccComputeInstance_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_partnerMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{fmt.Sprintf("partner_metadata.%s", namespace)}), + }, + }) +} + +func TestAccComputeInstance_partnerMetadata_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_partnerMetadata_empty(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + + { + Config: testAccComputeInstance_partnerMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{fmt.Sprintf("partner_metadata.%s", namespace)}), + }, + }) +} + +func TestAccComputeInstance_partnerMetadata_deletePartnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_partnerMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + + { + Config: testAccComputeInstance_partnerMetadata_empty(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{fmt.Sprintf("partner_metadata.%s", namespace)}), + }, + }) +} +<% end -%> + func TestAccComputeInstance_metadataStartupScript_update(t *testing.T) { t.Parallel() @@ -3497,9 +3605,13 @@ func testAccCheckComputeInstanceExistsInProject(t *testing.T, n, p string, insta } config := acctest.GoogleProviderConfig(t) - + <% if version == "ga" -%> found, err := config.NewComputeClient(config.UserAgent).Instances.Get( p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + <% else -%> + found, err := config.NewComputeClient(config.UserAgent).Instances.Get( + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).View("FULL").Do() + <% end -%> if err != nil { return err } @@ -3655,6 +3767,36 @@ func testAccCheckComputeInstanceLocalSsdRecoveryTimeout(instance *compute.Instan } } +<% unless version == 'ga' -%> +func testAccCheckComputeInstancePartnerMetadata(instance *compute.Instance, expectedPartnerMetadata map[string]compute.StructuredEntries) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance == nil { + return fmt.Errorf("instance is nil") + } + if instance.PartnerMetadata == nil { + return fmt.Errorf("no partner metadata") + } + expectedPartnerMetadataMap := make(map[string]interface{}) + acutalPartnerMetadataMap := make(map[string]interface{}) + for key, value := range instance.PartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + acutalPartnerMetadataMap[key] = jsonMap + } + for key, value := range expectedPartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + expectedPartnerMetadataMap[key] = jsonMap + } + if !reflect.DeepEqual(acutalPartnerMetadataMap, expectedPartnerMetadataMap) { + return fmt.Errorf("got the wrong instance partne metadata action: have: %+v; want: %+v", acutalPartnerMetadataMap, expectedPartnerMetadataMap) + } + return nil + + } +} +<% end -%> + func testAccCheckComputeInstanceTerminationAction(instance *compute.Instance, instanceTerminationActionWant string) resource.TestCheckFunc { return func(s *terraform.State) error { if instance == nil { @@ -6697,7 +6839,7 @@ resource "google_compute_instance" "foobar" { guest_accelerator { count = %d - type = "nvidia-tesla-k80" + type = "nvidia-tesla-t4" } } `, instance, count) @@ -8263,6 +8405,68 @@ resource "google_compute_instance" "foobar" { `, instance) } +<% unless version == 'ga' -%> +func testAccComputeInstance_partnerMetadata_empty(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +}`, instance) +} + +func testAccComputeInstance_partnerMetadata(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } +}`, instance) +} +<% end -%> + func testAccComputeInstance_metadataStartupScript(instance, machineType, metadata string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index ca64142f0710..56b931cae358 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -354,6 +354,17 @@ Google Cloud KMS.`, Description: `Metadata key/value pairs to make available from within instances created from this template.`, }, + <% unless version == 'ga' -%> + "partner_metadata": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: ComparePartnerMetadataDiff, + DiffSuppressOnRefresh: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Partner Metadata Map made available within the instance.`, + }, + <% end -%> + "metadata_startup_script": { Type: schema.TypeString, Optional: true, @@ -1064,6 +1075,13 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in return err } + <% unless version == 'ga' -%> + PartnerMetadata, err := resourceInstancePartnerMetadata(d) + if err != nil { + return err + } + <% end -%> + networks, err := expandNetworkInterfaces(d, config) if err != nil { return err @@ -1091,6 +1109,9 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in MinCpuPlatform: d.Get("min_cpu_platform").(string), Disks: disks, Metadata: metadata, + <% unless version == 'ga' -%> + PartnerMetadata: PartnerMetadata, + <% end -%> NetworkInterfaces: networks, NetworkPerformanceConfig: networkPerformanceConfig, Scheduling: scheduling, @@ -1182,6 +1203,13 @@ func resourceComputeRegionInstanceTemplateRead(d *schema.ResourceData, meta inte return err } + <% unless version == 'ga' -%> + url, err = transport_tpg.AddQueryParams(url, map[string]string{"view": "FULL"}) + if err != nil { + return err + } + <% end -%> + instanceTemplate, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "GET", @@ -1231,6 +1259,18 @@ func resourceComputeRegionInstanceTemplateRead(d *schema.ResourceData, meta inte } } + <% unless version == 'ga' -%> + if instanceProperties.PartnerMetadata != nil { + partnerMetadata, err := flattenPartnerMetadata(instanceProperties.PartnerMetadata) + if err != nil { + return fmt.Errorf("Error parsing partner metadata: %s", err) + } + if err = d.Set("partner_metadata", partnerMetadata); err != nil { + return fmt.Errorf("Error setting partner metadata: %s", err) + } + } + <% end -%> + // Set the tags fingerprint if there is one. if instanceProperties.Tags != nil { if err = d.Set("tags_fingerprint", instanceProperties.Tags.Fingerprint); err != nil { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index 519616b48b55..61fa831bd9bd 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -23,6 +23,9 @@ import ( <% else -%> compute "google.golang.org/api/compute/v0.beta" <% end -%> +<% unless version == 'ga' -%> + "google.golang.org/api/googleapi" +<% end -%> ) func TestAccComputeRegionInstanceTemplate_basic(t *testing.T) { @@ -1128,6 +1131,42 @@ func TestAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) }) } +<% unless version == 'ga' -%> +func TestAccComputeRegionalInstanceTemplate_partnerMetadata(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionalInstanceTemplate_partnerMetadata(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionalInstanceTemplatePartnerMetadata(&instanceTemplate, expectedPartnerMetadata), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{fmt.Sprintf("partner_metadata.%s", namespace)}, + }, + }, + }) + +} +<% end -%> + func TestAccComputeRegionInstanceTemplate_sourceSnapshotEncryptionKey(t *testing.T) { t.Parallel() @@ -1294,6 +1333,12 @@ func testAccCheckComputeRegionInstanceTemplateExistsInProject(t *testing.T, n, p if config.BillingProject != "" { billingProject = config.BillingProject } + <% unless version == 'ga' -%> + url, err = transport_tpg.AddQueryParams(url, map[string]string{"view": "FULL"}) + if err != nil { + return err + } + <% end -%> found, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, @@ -1467,6 +1512,36 @@ func testAccCheckComputeRegionInstanceTemplateLocalSsdRecoveryTimeout(instanceTe } } +<% unless version == 'ga' -%> +func testAccCheckComputeRegionalInstanceTemplatePartnerMetadata(instanceTemplate *compute.InstanceTemplate, expectedPartnerMetadata map[string]compute.StructuredEntries) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate == nil { + return fmt.Errorf("instance template is nil") + } + if instanceTemplate.Properties.PartnerMetadata == nil { + return fmt.Errorf("no partner metadata") + } + expectedPartnerMetadataMap := make(map[string]interface{}) + acutalPartnerMetadataMap := make(map[string]interface{}) + for key, value := range instanceTemplate.Properties.PartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + acutalPartnerMetadataMap[key] = jsonMap + } + for key, value := range expectedPartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + expectedPartnerMetadataMap[key] = jsonMap + } + if !reflect.DeepEqual(acutalPartnerMetadataMap, expectedPartnerMetadataMap) { + return fmt.Errorf("got the wrong instance partne metadata action: have: %+v; want: %+v", acutalPartnerMetadataMap, expectedPartnerMetadataMap) + } + return nil + + } +} +<% end -%> + func testAccCheckComputeRegionInstanceTemplateAutomaticRestart(instanceTemplate *compute.InstanceTemplate, automaticRestart bool) resource.TestCheckFunc { return func(s *terraform.State) error { ar := instanceTemplate.Properties.Scheduling.AutomaticRestart @@ -3443,6 +3518,51 @@ resource "google_compute_region_instance_template" "foobar" { `, suffix) } +<% unless version == 'ga' -%> +func testAccComputeRegionalInstanceTemplate_partnerMetadata(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +<% end -%> + func testAccComputeRegionInstanceTemplate_sourceSnapshotEncryptionKey(context map[string]interface{}) string { return acctest.Nprintf(` data "google_kms_key_ring" "ring" { diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index fc8b157e2899..9f7553b529dd 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -196,6 +196,8 @@ is desired, you will need to modify your state file manually using in `guest-os-features`, and `network_interface.0.nic-type` must be `GVNIC` in order for this setting to take effect. +* `partner_metadata` - (optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) key/value pair represents partner metadata assigned to instance where key represent a defined namespace and value is a json string represent the entries associted with the namespace. + --- The `boot_disk` block supports: diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index 7b056c20e8ff..e1030a25b6d9 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -375,6 +375,8 @@ The following arguments are supported: * `advanced_machine_features` (Optional) - Configure Nested Virtualisation and Simultaneous Hyper Threading on this VM. Structure is [documented below](#nested_advanced_machine_features) +* `partner_metadata` - (optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) key/value pair represents partner metadata assigned to instance template where key represent a defined namespace and value is a json string represent the entries associted with the namespace. + The `disk` block supports: * `auto_delete` - (Optional) Whether or not the disk should be auto-deleted. diff --git a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown index bb02214b1fb0..daf0291d5b01 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown @@ -384,6 +384,8 @@ The following arguments are supported: * `advanced_machine_features` (Optional) - Configure Nested Virtualisation and Simultaneous Hyper Threading on this VM. Structure is [documented below](#nested_advanced_machine_features) +* `partner_metadata` - (optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) key/value pair represents partner metadata assigned to instance template where key represent a defined namespace and value is a json string represent the entries associted with the namespace. + The `disk` block supports: * `auto_delete` - (Optional) Whether or not the disk should be auto-deleted. From 710c06bdc931d2228e1a825af70f8afcd5110985 Mon Sep 17 00:00:00 2001 From: Aleksandr Averbukh Date: Mon, 3 Jun 2024 22:34:38 +0200 Subject: [PATCH 045/356] Healthcare Fhir Store, promote `notification_configs` field to GA, deprecation message for `notification_config` (#10851) --- mmv1/products/healthcare/FhirStore.yaml | 5 +++-- .../go/healthcare_fhir_store_basic.tf.tmpl | 2 +- ...hcare_fhir_store_notification_configs.tf.tmpl | 4 ---- .../examples/healthcare_fhir_store_basic.tf.erb | 2 +- ...thcare_fhir_store_notification_configs.tf.erb | 4 ---- .../resource_healthcare_fhir_store_test.go.erb | 16 ++++++++-------- 6 files changed, 13 insertions(+), 20 deletions(-) diff --git a/mmv1/products/healthcare/FhirStore.yaml b/mmv1/products/healthcare/FhirStore.yaml index aca33e8dfb57..8c5f2a0e7f6a 100644 --- a/mmv1/products/healthcare/FhirStore.yaml +++ b/mmv1/products/healthcare/FhirStore.yaml @@ -55,9 +55,9 @@ examples: dataset_name: 'example-dataset' fhir_store_name: 'example-fhir-store' pubsub_topic: 'fhir-notifications' + skip_docs: true - !ruby/object:Provider::Terraform::Examples name: 'healthcare_fhir_store_notification_configs' - min_version: beta primary_resource_id: 'default' vars: dataset_name: 'example-dataset' @@ -192,6 +192,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: notificationConfig + deprecation_message: >- + `notification_config` is deprecated and will be removed in a future major release. Use `notification_configs` instead. required: false properties: - !ruby/object:Api::Type::String @@ -300,7 +302,6 @@ properties: name: notificationConfigs description: |- A list of notifcation configs that configure the notification for every resource mutation in this FHIR store. - min_version: beta item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::String diff --git a/mmv1/templates/terraform/examples/go/healthcare_fhir_store_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/healthcare_fhir_store_basic.tf.tmpl index 6e2960e38211..a61cdf68c9f5 100644 --- a/mmv1/templates/terraform/examples/go/healthcare_fhir_store_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/healthcare_fhir_store_basic.tf.tmpl @@ -10,7 +10,7 @@ resource "google_healthcare_fhir_store" "default" { enable_history_import = false default_search_handling_strict = false - notification_config { + notification_configs { pubsub_topic = google_pubsub_topic.topic.id } diff --git a/mmv1/templates/terraform/examples/go/healthcare_fhir_store_notification_configs.tf.tmpl b/mmv1/templates/terraform/examples/go/healthcare_fhir_store_notification_configs.tf.tmpl index d22b7f3634ab..219fadbc32b5 100644 --- a/mmv1/templates/terraform/examples/go/healthcare_fhir_store_notification_configs.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/healthcare_fhir_store_notification_configs.tf.tmpl @@ -1,5 +1,4 @@ resource "google_healthcare_fhir_store" "default" { - provider = google-beta name = "{{index $.Vars "fhir_store_name"}}" dataset = google_healthcare_dataset.dataset.id version = "R4" @@ -8,7 +7,6 @@ resource "google_healthcare_fhir_store" "default" { disable_referential_integrity = false disable_resource_versioning = false enable_history_import = false - enable_history_modifications = false labels = { label1 = "labelvalue1" @@ -22,12 +20,10 @@ resource "google_healthcare_fhir_store" "default" { } resource "google_pubsub_topic" "topic" { - provider = google-beta name = "{{index $.Vars "pubsub_topic"}}" } resource "google_healthcare_dataset" "dataset" { - provider = google-beta name = "{{index $.Vars "dataset_name"}}" location = "us-central1" } diff --git a/mmv1/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb b/mmv1/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb index 487f1f318b15..084363773765 100644 --- a/mmv1/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb +++ b/mmv1/templates/terraform/examples/healthcare_fhir_store_basic.tf.erb @@ -10,7 +10,7 @@ resource "google_healthcare_fhir_store" "default" { enable_history_import = false default_search_handling_strict = false - notification_config { + notification_configs { pubsub_topic = google_pubsub_topic.topic.id } diff --git a/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_configs.tf.erb b/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_configs.tf.erb index b980058870e8..359f160b769b 100644 --- a/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_configs.tf.erb +++ b/mmv1/templates/terraform/examples/healthcare_fhir_store_notification_configs.tf.erb @@ -1,5 +1,4 @@ resource "google_healthcare_fhir_store" "default" { - provider = google-beta name = "<%= ctx[:vars]['fhir_store_name'] %>" dataset = google_healthcare_dataset.dataset.id version = "R4" @@ -8,7 +7,6 @@ resource "google_healthcare_fhir_store" "default" { disable_referential_integrity = false disable_resource_versioning = false enable_history_import = false - enable_history_modifications = false labels = { label1 = "labelvalue1" @@ -22,12 +20,10 @@ resource "google_healthcare_fhir_store" "default" { } resource "google_pubsub_topic" "topic" { - provider = google-beta name = "<%= ctx[:vars]['pubsub_topic']%>" } resource "google_healthcare_dataset" "dataset" { - provider = google-beta name = "<%= ctx[:vars]['dataset_name'] %>" location = "us-central1" } diff --git a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb index acc3706515f1..1390f9e83cd2 100644 --- a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb +++ b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb @@ -156,19 +156,16 @@ resource "google_healthcare_fhir_store" "default" { version = "R4" - notification_config { - pubsub_topic = google_pubsub_topic.topic.id - } - -<% unless version == "ga" -%> notification_configs { pubsub_topic = google_pubsub_topic.topic.id send_full_resource = true send_previous_resource_on_delete = true } +<% unless version == "ga" -%> enable_history_modifications = true <% end -%> + labels = { label1 = "labelvalue1" } @@ -221,9 +218,12 @@ func testAccCheckGoogleHealthcareFhirStoreUpdate(t *testing.T, pubsubTopic strin return fmt.Errorf("fhirStore labels not updated: %s", gcpResourceUri) } - topicName := path.Base(response.NotificationConfig.PubsubTopic) - if topicName != pubsubTopic { - return fmt.Errorf("fhirStore 'NotificationConfig' not updated ('%s' != '%s'): %s", topicName, pubsubTopic, gcpResourceUri) + notifications := response.NotificationConfigs + if len(notifications) > 0 { + topicName := path.Base(notifications[0].PubsubTopic) + if topicName != pubsubTopic { + return fmt.Errorf("fhirStore 'NotificationConfig' not updated ('%s' != '%s'): %s", topicName, pubsubTopic, gcpResourceUri) + } } } From 40e5c97e051e9b4ac75cc1704c1e5a92eaa7bad5 Mon Sep 17 00:00:00 2001 From: vickramp Date: Mon, 3 Jun 2024 14:13:25 -0700 Subject: [PATCH 046/356] Adding zone distribution mode in the Cluster resource for Memorystore Redis cluster (#10458) --- mmv1/products/redis/Cluster.yaml | 31 +++++++ .../examples/go/redis_cluster_ha.tf.tmpl | 3 + .../examples/redis_cluster_ha.tf.erb | 3 + .../redis_cluster_ha_single_zone.tf.erb | 42 ++++++++++ .../redis/resource_redis_cluster_test.go.erb | 81 ++++++++++++++----- 5 files changed, 142 insertions(+), 18 deletions(-) create mode 100644 mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb diff --git a/mmv1/products/redis/Cluster.yaml b/mmv1/products/redis/Cluster.yaml index e4fdc0649c05..82077ea2985a 100644 --- a/mmv1/products/redis/Cluster.yaml +++ b/mmv1/products/redis/Cluster.yaml @@ -62,6 +62,19 @@ examples: prevent_destroy: 'false' oics_vars_overrides: prevent_destroy: 'false' + - !ruby/object:Provider::Terraform::Examples + name: "redis_cluster_ha_single_zone" + primary_resource_id: "cluster-ha-single-zone" + vars: + cluster_name: "ha-cluster-single-zone" + policy_name: "mypolicy" + subnet_name: "mysubnet" + network_name: "mynetwork" + prevent_destroy: 'true' + test_vars_overrides: + prevent_destroy: 'false' + oics_vars_overrides: + prevent_destroy: 'false' properties: - !ruby/object:Api::Type::Time name: createTime @@ -123,6 +136,24 @@ properties: default_from_api: true immutable: true required: false + - !ruby/object:Api::Type::NestedObject + name: zoneDistributionConfig + description: Immutable. Zone distribution config for Memorystore Redis cluster. + immutable: true + properties: + - !ruby/object:Api::Type::Enum + name: mode + description: | + Immutable. The mode for zone distribution for Memorystore Redis cluster. + If not provided, MULTI_ZONE will be used as default + values: + - :MULTI_ZONE + - :SINGLE_ZONE + default_from_api: true + - !ruby/object:Api::Type::String + name: zone + description: | + Immutable. The zone for single zone Memorystore Redis cluster. - !ruby/object:Api::Type::Array name: 'pscConfigs' description: | diff --git a/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl b/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl index b7e542f03441..799c11f9108b 100644 --- a/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl @@ -9,6 +9,9 @@ resource "google_redis_cluster" "{{$.PrimaryResourceId}}" { node_type = "REDIS_SHARED_CORE_NANO" transit_encryption_mode = "TRANSIT_ENCRYPTION_MODE_DISABLED" authorization_mode = "AUTH_MODE_DISABLED" + zone_distribution_config { + mode = "MULTI_ZONE" + } depends_on = [ google_network_connectivity_service_connection_policy.default ] diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb index 6f97550fe142..9420df6abb47 100644 --- a/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb +++ b/mmv1/templates/terraform/examples/redis_cluster_ha.tf.erb @@ -12,6 +12,9 @@ resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { redis_configs = { maxmemory-policy = "volatile-ttl" } + zone_distribution_config { + mode = "MULTI_ZONE" + } depends_on = [ google_network_connectivity_service_connection_policy.default ] diff --git a/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb new file mode 100644 index 000000000000..63c7b20f5f81 --- /dev/null +++ b/mmv1/templates/terraform/examples/redis_cluster_ha_single_zone.tf.erb @@ -0,0 +1,42 @@ +resource "google_redis_cluster" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['cluster_name'] %>" + shard_count = 3 + psc_configs { + network = google_compute_network.producer_net.id + } + region = "us-central1" + zone_distribution_config { + mode = "SINGLE_ZONE" + zone = "us-central1-f" + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + + lifecycle { + prevent_destroy = <%= ctx[:vars]['prevent_destroy'] %> + } +} + +resource "google_network_connectivity_service_connection_policy" "default" { + name = "<%= ctx[:vars]['policy_name'] %>" + location = "us-central1" + service_class = "gcp-memorystore-redis" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + name = "<%= ctx[:vars]['subnet_name'] %>" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + name = "<%= ctx[:vars]['network_name'] %>" + auto_create_subnetworks = false +} diff --git a/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb b/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb index 4397f6e5a5a7..d932cd0e4cf1 100644 --- a/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/redis/resource_redis_cluster_test.go.erb @@ -23,7 +23,7 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, nodeType: "REDIS_STANDARD_SMALL"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -33,12 +33,42 @@ func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, nodeType: "REDIS_STANDARD_SMALL"}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), }, }, }) } + +// Validate zone distribution for the cluster. +func TestAccRedisCluster_createClusterWithZoneDistribution(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with replica count 1 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // clean up the resource + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), + }, + }, + }) +} + // Validate that replica count is updated for the cluster func TestAccRedisCluster_updateReplicaCount(t *testing.T) { t.Parallel() @@ -52,7 +82,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with replica count 1 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -62,7 +92,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // update replica count to 2 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, preventDestroy: true}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -72,11 +102,11 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: false}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), }, { // update replica count to 0 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: true}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -86,7 +116,7 @@ func TestAccRedisCluster_updateReplicaCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), }, }, }) @@ -105,7 +135,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { Steps: []resource.TestStep{ { // create cluster with shard count 3 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -115,7 +145,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // update shard count to 5 - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: true}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), }, { ResourceName: "google_redis_cluster.test", @@ -125,7 +155,7 @@ func TestAccRedisCluster_updateShardCount(t *testing.T) { }, { // clean up the resource - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: false}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), }, }, }) @@ -147,6 +177,7 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { Config: createOrUpdateRedisCluster(&ClusterParams{ name: name, shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", redisConfigs: map[string]string{ "maxmemory-policy": "volatile-ttl", }}), @@ -162,6 +193,7 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { Config: createOrUpdateRedisCluster(&ClusterParams{ name: name, shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", redisConfigs: map[string]string{ "maxmemory-policy": "allkeys-lru", "maxmemory-clients": "90%", @@ -175,7 +207,7 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { }, { // remove all redis configs - Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3}), + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE"}), }, }, @@ -183,12 +215,14 @@ func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { } type ClusterParams struct { - name string - replicaCount int - shardCount int - preventDestroy bool - nodeType string - redisConfigs map[string]string + name string + replicaCount int + shardCount int + preventDestroy bool + nodeType string + redisConfigs map[string]string + zoneDistributionMode string + zone string } func createOrUpdateRedisCluster(params *ClusterParams) string { @@ -204,6 +238,16 @@ func createOrUpdateRedisCluster(params *ClusterParams) string { strBuilder.WriteString(fmt.Sprintf("%s = \"%s\"\n", key, value)) } + zoneDistributionConfigBlock := `` + if params.zoneDistributionMode != "" { + zoneDistributionConfigBlock = fmt.Sprintf(` + zone_distribution_config { + mode = "%s" + zone = "%s" + } + `, params.zoneDistributionMode, params.zone) + } + return fmt.Sprintf(` resource "google_redis_cluster" "test" { provider = google-beta @@ -218,6 +262,7 @@ resource "google_redis_cluster" "test" { redis_configs = { %s } + %s depends_on = [ google_network_connectivity_service_connection_policy.default ] @@ -249,7 +294,7 @@ resource "google_compute_network" "producer_net" { name = "%s" auto_create_subnetworks = false } -`, params.name, params.replicaCount, params.shardCount, params.nodeType, strBuilder.String(), lifecycleBlock, params.name, params.name, params.name) +`, params.name, params.replicaCount, params.shardCount, params.nodeType, strBuilder.String(), zoneDistributionConfigBlock, lifecycleBlock, params.name, params.name, params.name) } <% end -%> From daa35c8bc1d58207ded0c4fa431121a9c5058a26 Mon Sep 17 00:00:00 2001 From: joelkattapuram <46967875+joelkattapuram@users.noreply.github.com> Date: Tue, 4 Jun 2024 08:33:47 -0700 Subject: [PATCH 047/356] add nested virtualization, boot disk size, and pool size to boost (#10864) --- .../workstations/WorkstationConfig.yaml | 18 ++++++++++++++++++ .../examples/workstation_config_boost.tf.erb | 7 +++++-- ...workstations_workstation_config_test.go.erb | 7 +++++-- 3 files changed, 28 insertions(+), 4 deletions(-) diff --git a/mmv1/products/workstations/WorkstationConfig.yaml b/mmv1/products/workstations/WorkstationConfig.yaml index 65a868209b9a..ff575c7801f5 100644 --- a/mmv1/products/workstations/WorkstationConfig.yaml +++ b/mmv1/products/workstations/WorkstationConfig.yaml @@ -342,6 +342,24 @@ properties: name: 'machineType' description: | The type of machine that boosted VM instances will use—for example, e2-standard-4. For more information about machine types that Cloud Workstations supports, see the list of available machine types https://cloud.google.com/workstations/docs/available-machine-types. Defaults to e2-standard-4. + - !ruby/object:Api::Type::Integer + name: 'bootDiskSizeGb' + immutable: true + description: |- + Size of the boot disk in GB. The minimum boot disk size is `30` GB. Defaults to `50` GB. + default_from_api: true + - !ruby/object:Api::Type::Boolean + name: 'enableNestedVirtualization' + description: | + Whether to enable nested virtualization on the Compute Engine VMs backing boosted Workstations. + + See https://cloud.google.com/workstations/docs/reference/rest/v1beta/projects.locations.workstationClusters.workstationConfigs#GceInstance.FIELDS.enable_nested_virtualization + default_from_api: true + - !ruby/object:Api::Type::Integer + name: 'poolSize' + description: |- + Number of instances to pool for faster workstation boosting. + default_from_api: true - !ruby/object:Api::Type::Array name: 'accelerators' description: | diff --git a/mmv1/templates/terraform/examples/workstation_config_boost.tf.erb b/mmv1/templates/terraform/examples/workstation_config_boost.tf.erb index d498c3eb1281..a9ac67997ed6 100644 --- a/mmv1/templates/terraform/examples/workstation_config_boost.tf.erb +++ b/mmv1/templates/terraform/examples/workstation_config_boost.tf.erb @@ -48,8 +48,11 @@ resource "google_workstations_workstation_config" "<%= ctx[:primary_resource_id] } } boost_configs { - id = "boost-1" - machine_type = "e2-standard-2" + id = "boost-2" + machine_type = "n1-standard-2" + pool_size = 2 + boot_disk_size_gb = 30 + enable_nested_virtualization = true } } } diff --git a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb index 10839661da72..16fb4c655cdf 100644 --- a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb +++ b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb @@ -541,8 +541,11 @@ func testAccWorkstationsWorkstationConfig_boost(context map[string]interface{}) } } boost_configs { - id = "boost-1" - machine_type = "e2-standard-2" + id = "boost-2" + machine_type = "n1-standard-2" + pool_size = 2 + boot_disk_size_gb = 30 + enable_nested_virtualization = true } } } From a5ef92165addd3b106614c04a2389c3b52d57376 Mon Sep 17 00:00:00 2001 From: Max Portocarrero CI&T <105444618+maxi-cit@users.noreply.github.com> Date: Tue, 4 Jun 2024 12:47:20 -0500 Subject: [PATCH 048/356] add datasource to google_compute_security_policy resource (#10780) --- .../provider/provider_mmv1_resources.go.erb | 1 + .../data_source_compute_secutity_policy.go | 69 ++++++++++++++++++ ...ata_source_compute_secutity_policy_test.go | 73 +++++++++++++++++++ .../d/compute_security_policy.html.markdown | 40 ++++++++++ 4 files changed, 183 insertions(+) create mode 100644 mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy.go create mode 100644 mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/compute_security_policy.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 852aac90345e..b915b63e8e73 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -95,6 +95,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_compute_router": compute.DataSourceGoogleComputeRouter(), "google_compute_router_nat": compute.DataSourceGoogleComputeRouterNat(), "google_compute_router_status": compute.DataSourceGoogleComputeRouterStatus(), + "google_compute_security_policy": compute.DataSourceGoogleComputeSecurityPolicy(), "google_compute_snapshot": compute.DataSourceGoogleComputeSnapshot(), "google_compute_ssl_certificate": compute.DataSourceGoogleComputeSslCertificate(), "google_compute_ssl_policy": compute.DataSourceGoogleComputeSslPolicy(), diff --git a/mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy.go b/mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy.go new file mode 100644 index 000000000000..d0cba3c3287b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy.go @@ -0,0 +1,69 @@ +package compute + +import ( + "errors" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComputeSecurityPolicy() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeSecurityPolicy().Schema) + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "self_link") + + return &schema.Resource{ + Read: dataSourceComputSecurityPolicyRead, + Schema: dsSchema, + } +} + +func dataSourceComputSecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + id := "" + + if name, ok := d.GetOk("name"); ok { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + id = fmt.Sprintf("projects/%s/global/securityPolicies/%s", project, name.(string)) + d.SetId(id) + } else if selfLink, ok := d.GetOk("self_link"); ok { + parsed, err := tpgresource.ParseSecurityPolicyFieldValue(selfLink.(string), d, config) + if err != nil { + return err + } + + if err := d.Set("name", parsed.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + if err := d.Set("project", parsed.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + id = fmt.Sprintf("projects/%s/global/securityPolicies/%s", parsed.Project, parsed.Name) + d.SetId(id) + } else { + return errors.New("Must provide either `self_link` or `name`") + } + + err := resourceComputeSecurityPolicyRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy_test.go b/mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy_test.go new file mode 100644 index 000000000000..761fef6ae86d --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/data_source_compute_secutity_policy_test.go @@ -0,0 +1,73 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceComputeSecurityPolicy_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComputeSecurityPolicy_basic(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_compute_security_policy.sp1", "google_compute_security_policy.policy"), + acctest.CheckDataSourceStateMatchesResourceState("data.google_compute_security_policy.sp2", "google_compute_security_policy.policy"), + ), + }, + }, + }) +} + +func testAccDataSourceComputeSecurityPolicy_basic(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "my-policy-%s" + + rule { + action = "deny(403)" + priority = "1000" + description = "Deny access to IPs in 9.9.9.0/24" + + match { + versioned_expr = "SRC_IPS_V1" + + config { + src_ip_ranges = ["9.9.9.0/24"] + } + } + } + + rule { + action = "allow" + priority = "2147483647" + description = "default rule" + + match { + versioned_expr = "SRC_IPS_V1" + + config { + src_ip_ranges = ["*"] + } + } + } +} + +data "google_compute_security_policy" "sp1" { + name = google_compute_security_policy.policy.name + project = google_compute_security_policy.policy.project +} + +data "google_compute_security_policy" "sp2" { + self_link = google_compute_security_policy.policy.self_link +} +`, suffix) +} diff --git a/mmv1/third_party/terraform/website/docs/d/compute_security_policy.html.markdown b/mmv1/third_party/terraform/website/docs/d/compute_security_policy.html.markdown new file mode 100644 index 000000000000..f5b201ea4059 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/compute_security_policy.html.markdown @@ -0,0 +1,40 @@ +--- +subcategory: "Compute Engine" +description: |- + Get information about a Google Compute Security Policy. +--- + +# google_compute_security_policy + +To get more information about Google Compute Security Policy, see: + +* [API documentation](https://cloud.google.com/compute/docs/reference/rest/beta/securityPolicies) +* How-to Guides + * [Official Documentation](https://cloud.google.com/armor/docs/configure-security-policies) + +## Example Usage + +```hcl +data "google_compute_security_policy" "sp1" { + name = "my-policy" + project = "my-project" +} + +data "google_compute_security_policy" "sp2" { + self_link = "https://www.googleapis.com/compute/v1/projects/my-project/global/securityPolicies/my-policy" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Optional) The name of the security policy. Provide either this or a `self_link`. + +* `project` - (Optional) The project in which the resource belongs. If it is not provided, the provider project is used. + +* `self_link` - (Optional) The self_link of the security policy. Provide either this or a `name` + +## Attributes Reference + +See [google_compute_security_policy](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_security_policy) resource for details of the available attributes. From 00dddb9762ed27beba42df87aa6f2a6a77b575c7 Mon Sep 17 00:00:00 2001 From: vijaykanthm Date: Tue, 4 Jun 2024 11:12:37 -0700 Subject: [PATCH 049/356] Add Resource OrganizationEventThreatDetectionCustomModule (#10769) --- ...ationEventThreatDetectionCustomModule.yaml | 111 ++++++++++++++ .../securitycentermanagement/product.yaml | 23 +++ ...vent_threat_detection_custom_module.tf.erb | 19 +++ .../components/inputs/services_beta.kt | 5 + .../components/inputs/services_ga.kt | 5 + ...ent_threat_detection_custom_module_test.go | 138 ++++++++++++++++++ 6 files changed, 301 insertions(+) create mode 100644 mmv1/products/securitycentermanagement/OrganizationEventThreatDetectionCustomModule.yaml create mode 100644 mmv1/products/securitycentermanagement/product.yaml create mode 100644 mmv1/templates/terraform/examples/scc_management_organization_event_threat_detection_custom_module.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_event_threat_detection_custom_module_test.go diff --git a/mmv1/products/securitycentermanagement/OrganizationEventThreatDetectionCustomModule.yaml b/mmv1/products/securitycentermanagement/OrganizationEventThreatDetectionCustomModule.yaml new file mode 100644 index 000000000000..d5b4f9bc0992 --- /dev/null +++ b/mmv1/products/securitycentermanagement/OrganizationEventThreatDetectionCustomModule.yaml @@ -0,0 +1,111 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'OrganizationEventThreatDetectionCustomModule' +description: | + Represents an instance of an Event Threat Detection custom module, including + its full module name, display name, enablement state, and last updated time. + You can create a custom module at the organization level only. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Overview of custom modules for Event Threat Detection': 'https://cloud.google.com/security-command-center/docs/custom-modules-etd-overview' + api: 'https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.eventThreatDetectionCustomModules' +base_url: 'organizations/{{organization}}/locations/{{location}}/eventThreatDetectionCustomModules' +self_link: 'organizations/{{organization}}/locations/{{location}}/eventThreatDetectionCustomModules/{{name}}' +mutex: 'organizations/{{organization}}/locations/{{location}}/eventThreatDetectionCustomModules' +update_verb: :PATCH +update_mask: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_organization_event_threat_detection_custom_module" + primary_resource_id: "example" + # Has a handwritten update test + skip_test: true + vars: + display_name: basic_custom_module + type: 'CONFIGURABLE_BAD_IP' + test_env_vars: + org_id: :ORG_ID + +parameters: + - !ruby/object:Api::Type::String + name: 'organization' + immutable: true + required: true + url_param_only: true + description: | + Numerical ID of the parent organization. + + - !ruby/object:Api::Type::String + name: 'location' + immutable: true + required: false + url_param_only: true + default_value: 'global' + description: | + Location ID of the parent organization. Only global is supported at the moment. + + +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb + description: | + The resource name of the Event Threat Detection custom module. + Its format is "organizations/{organization}/locations/{location}/eventThreatDetectionCustomModules/{eventThreatDetectionCustomModule}". + - !ruby/object:Api::Type::String + name: 'config' + required: false + custom_expand: 'templates/terraform/custom_expand/json_schema.erb' + custom_flatten: 'templates/terraform/custom_flatten/json_schema.erb' + state_func: + 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); + return s }' + description: | + Config for the module. For the resident module, its config value is defined at this level. + For the inherited module, its config value is inherited from the ancestor module. + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.StringIsJSON' + - !ruby/object:Api::Type::Enum + name: 'enablementState' + required: false + description: | + The state of enablement for the module at the given level of the hierarchy. + values: + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::String + name: 'type' + immutable: true + required: false + description: | + Immutable. Type for the module. e.g. CONFIGURABLE_BAD_IP. + - !ruby/object:Api::Type::String + name: 'displayName' + description: | + The human readable name to be displayed for the module. + - !ruby/object:Api::Type::String + name: 'updateTime' + output: true + description: | + The time at which the custom module was last updated. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: 'lastEditor' + output: true + description: | + The editor that last updated the custom module diff --git a/mmv1/products/securitycentermanagement/product.yaml b/mmv1/products/securitycentermanagement/product.yaml new file mode 100644 index 000000000000..1f8d5f92b745 --- /dev/null +++ b/mmv1/products/securitycentermanagement/product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Product +name: SecurityCenterManagement +display_name: Security Command Center Management (SCC) +legacy_name: scc_management +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://securitycentermanagement.googleapis.com/v1/ +scopes: + - https://www.googleapis.com/auth/cloud-platform diff --git a/mmv1/templates/terraform/examples/scc_management_organization_event_threat_detection_custom_module.tf.erb b/mmv1/templates/terraform/examples/scc_management_organization_event_threat_detection_custom_module.tf.erb new file mode 100644 index 000000000000..ad7fed178439 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_organization_event_threat_detection_custom_module.tf.erb @@ -0,0 +1,19 @@ +resource "google_scc_management_organization_event_threat_detection_custom_module" "<%= ctx[:primary_resource_id] %>" { + organization = "<%= ctx[:test_env_vars]['org_id'] %>" + location = "global" + display_name = "<%= ctx[:vars]['display_name'] %>" + enablement_state = "ENABLED" + type = "<%= ctx[:vars]['type'] %>" + description = "My Event Threat Detection Custom Module" + config = jsonencode({ + "metadata": { + "severity": "LOW", + "description": "Flagged by Forcepoint as malicious", + "recommendation": "Contact the owner of the relevant project." + }, + "ips": [ + "192.0.2.1", + "192.0.2.0/24" + ] + }) +} diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index 1ca3c967249c..8360ae4c1224 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -616,6 +616,11 @@ var ServicesListBeta = mapOf( "displayName" to "Securitycenter", "path" to "./google-beta/services/securitycenter" ), + "securitycentermanagement" to mapOf( + "name" to "securitycentermanagement", + "displayName" to "Securitycentermanagement", + "path" to "./google-beta/services/securitycentermanagement" + ), "securityposture" to mapOf( "name" to "securityposture", "displayName" to "Securityposture", diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index 64b7c62fe060..b29b26b26855 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -611,6 +611,11 @@ var ServicesListGa = mapOf( "displayName" to "Securitycenter", "path" to "./google/services/securitycenter" ), + "securitycentermanagement" to mapOf( + "name" to "securitycentermanagement", + "displayName" to "Securitycentermanagement", + "path" to "./google/services/securitycentermanagement" + ), "securityposture" to mapOf( "name" to "securityposture", "displayName" to "Securityposture", diff --git a/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_event_threat_detection_custom_module_test.go b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_event_threat_detection_custom_module_test.go new file mode 100644 index 000000000000..1f6354341093 --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_event_threat_detection_custom_module_test.go @@ -0,0 +1,138 @@ +package securitycentermanagement_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccSecurityCenterManagementOrganizationEventThreatDetectionCustomModule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "location": "global", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccSecurityCenterManagementOrganizationEventThreatDetectionCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterManagementOrganizationEventThreatDetectionCustomModule__sccOrganizationCustomModuleExample(context), + }, + { + ResourceName: "google_scc_management_organization_event_threat_detection_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization", "location"}, + }, + { + Config: testAccSecurityCenterManagementOrganizationEventThreatDetectionCustomModule_sccOrganizationCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_management_organization_event_threat_detection_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization", "location"}, + }, + }, + }) +} + +func testAccSecurityCenterManagementOrganizationEventThreatDetectionCustomModule__sccOrganizationCustomModuleExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_organization_event_threat_detection_custom_module" "example" { + organization = "%{org_id}" + location = "%{location}" + display_name = "tf_test_custom_module%{random_suffix}" + enablement_state = "ENABLED" + type = "CONFIGURABLE_BAD_IP" + config = < Date: Tue, 4 Jun 2024 13:57:38 -0500 Subject: [PATCH 050/356] compute target pool sweeper (#10871) --- .../resource_compute_target_pool_sweeper.go | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go b/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go new file mode 100644 index 000000000000..6f14e5c8a43b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go @@ -0,0 +1,66 @@ +package compute + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +// This will sweep GCE Target Pool resources +func init() { + sweeper.AddTestSweepers("ComputeTargetPool", testSweepTargetPool) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepTargetPool(region string) error { + resourceName := "ComputeTargetPool" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + found, err := config.NewComputeClient(config.UserAgent).TargetPools.AggregatedList(config.Project).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request: %s", err) + return nil + } + + // log.Printf("cam here") + // log.Printf("%+v", found) + + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for zone, itemList := range found.Items { + for _, tp := range itemList.TargetPools { + if !sweeper.IsSweepableTestResource(tp.Name) { + nonPrefixCount++ + continue + } + + // Don't wait on operations as we may have a lot to delete + _, err := config.NewComputeClient(config.UserAgent).TargetPools.Delete(config.Project, tpgresource.GetResourceNameFromSelfLink(zone), tp.Name).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting %s resource %s : %s", resourceName, tp.Name, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, tp.Name) + } + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} From b98e4f36358aa41aafd809a31eb81d3e54f8edc7 Mon Sep 17 00:00:00 2001 From: Obada Alabbadi <76101898+obada-ab@users.noreply.github.com> Date: Tue, 4 Jun 2024 21:18:27 +0200 Subject: [PATCH 051/356] Minor contribution guide doc fixes (#10869) --- docs/content/develop/custom-code.md | 2 +- docs/content/develop/field-reference.md | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/docs/content/develop/custom-code.md b/docs/content/develop/custom-code.md index 5e2ed2bfdc6e..4de4ca983a77 100644 --- a/docs/content/develop/custom-code.md +++ b/docs/content/develop/custom-code.md @@ -112,7 +112,7 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode ``` -Use `custom_code.decoder` to inject code that modifies the data that will be sent in the API request. This is useful if the API expects the data to be in a significantly different structure than Terraform does - for example, if the API returns the entire object nested under a key, or uses a different name for a field in the response than in the request. The decoder will run _before_ any [`custom_flatten`]({{< ref "#custom_flatten" >}}) code. +Use `custom_code.decoder` to inject code that modifies the data recieved from an API response. This is useful if the API returns data in a significantly different structure than what Terraform expects - for example, if the API returns the entire object nested under a key, or uses a different name for a field in the response than in the request. The decoder will run _before_ any [`custom_flatten`]({{< ref "#custom_flatten" >}}) code. The decoder code will be wrapped in a function like: diff --git a/docs/content/develop/field-reference.md b/docs/content/develop/field-reference.md index f82ab33b65b3..daa35b6afa8a 100644 --- a/docs/content/develop/field-reference.md +++ b/docs/content/develop/field-reference.md @@ -285,6 +285,16 @@ understand how the resource maps to the underlying API. api_name: 'otherFieldName' ``` +### `url_param_only` +If true, the field is not sent in the resource body, and the provider does +not read the field value from the API response. If unset or false, the field +is sent in the resource body, and the provider reads the field value from the +API response. + +```yaml +url_param_only: true +``` + ## `Enum` properties ### `values` From 2adb396567cb3a607b5cfb0c6b1a2f90469bb8ce Mon Sep 17 00:00:00 2001 From: delimaneto <167232526+delimaneto@users.noreply.github.com> Date: Tue, 4 Jun 2024 19:23:03 +0000 Subject: [PATCH 052/356] Add support for `google_compute_region_commitment` to TGC (#10868) --- mmv1/provider/terraform_tgc.rb | 4 +- mmv1/templates/tgc/resource_converters.go.erb | 1 + mmv1/third_party/tgc/commitment.go | 326 ++++++++++++++++++ ...mple_google_compute_region_commitment.json | 38 ++ ...xample_google_compute_region_commitment.tf | 30 ++ 5 files changed, 398 insertions(+), 1 deletion(-) create mode 100644 mmv1/third_party/tgc/commitment.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 6355a3fdd9aa..6d48122f5c41 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -315,7 +315,9 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/compute_target_pool.go', 'third_party/tgc/compute_target_pool.go'], ['converters/google/resources/dataproc_cluster.go', - 'third_party/tgc/dataproc_cluster.go'] + 'third_party/tgc/dataproc_cluster.go'], + ['converters/google/resources/commitment.go', + 'third_party/tgc/commitment.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index e5068b51c248..fecfc286418c 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -65,6 +65,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_compute_target_https_proxy": {compute.ResourceConverterComputeTargetHttpsProxy()}, "google_compute_target_ssl_proxy": {compute.ResourceConverterComputeTargetSslProxy()}, "google_compute_target_pool": {resourceConverterComputeTargetPool()}, + "google_compute_region_commitment": {resourceConverterCommitment()}, "google_dataflow_job": {resourceDataflowJob()}, "google_dataproc_autoscaling_policy": {dataproc.ResourceConverterDataprocAutoscalingPolicy()}, "google_dataproc_cluster": {resourceConverterDataprocCluster()}, diff --git a/mmv1/third_party/tgc/commitment.go b/mmv1/third_party/tgc/commitment.go new file mode 100644 index 000000000000..26bb7d47de5d --- /dev/null +++ b/mmv1/third_party/tgc/commitment.go @@ -0,0 +1,326 @@ +package google + +import ( + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const commitmentAssetType string = "compute.googleapis.com/Commitment" + +func resourceConverterCommitment() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: commitmentAssetType, + Convert: GetCommitmentCaiObject, + } +} + +func GetCommitmentCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//compute.googleapis.com/projects/{{project}}/regions/{{region}}/commitments") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetCommitmentApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: commitmentAssetType, + Resource: &cai.AssetResource{ + Version: "v1", + DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + DiscoveryName: "Commitment", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetCommitmentApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + nameProp, err := expandCommitmentName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + planProp, err := expandCommitmentPlan(d.Get("plan"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("plan"); !tpgresource.IsEmptyValue(reflect.ValueOf(planProp)) && (ok || !reflect.DeepEqual(v, planProp)) { + obj["plan"] = planProp + } + + descriptionProp, err := expandCommitmentDescription(d.Get("description"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + resourcesProp, err := expandCommitmentResources(d.Get("resources"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("resources"); !tpgresource.IsEmptyValue(reflect.ValueOf(resourcesProp)) && (ok || !reflect.DeepEqual(v, resourcesProp)) { + obj["resources"] = resourcesProp + } + + typeProp, err := expandCommitmentType(d.Get("type"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + obj["type"] = typeProp + } + + categoryProp, err := expandCommitmentCategory(d.Get("category"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("category"); !tpgresource.IsEmptyValue(reflect.ValueOf(categoryProp)) && (ok || !reflect.DeepEqual(v, categoryProp)) { + obj["category"] = categoryProp + } + + licenseResourceProp, err := expandCommitmentLicenseResource(d.Get("license_resource"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("license_resource"); !tpgresource.IsEmptyValue(reflect.ValueOf(licenseResourceProp)) && (ok || !reflect.DeepEqual(v, licenseResourceProp)) { + obj["licenseResource"] = licenseResourceProp + } + + autoRenewProp, err := expandCommitmentAutoRenew(d.Get("auto_renew"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("auto_renew"); !tpgresource.IsEmptyValue(reflect.ValueOf(autoRenewProp)) && (ok || !reflect.DeepEqual(v, autoRenewProp)) { + obj["autoRenew"] = autoRenewProp + } + + regionProp, err := expandCommitmentRegion(d.Get("region"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + projectProp, err := expandCommitmentProject(d.Get("project"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(projectProp)) && (ok || !reflect.DeepEqual(v, projectProp)) { + obj["project"] = projectProp + } + + idProp, err := expandCommitmentId(d.Get("commitment_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("commitment_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + + idIdentifierProp, err := expandCommitmentIdIdentifier(d.Get("id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idIdentifierProp)) && (ok || !reflect.DeepEqual(v, idIdentifierProp)) { + obj["id"] = idIdentifierProp + } + + creationTimestampProp, err := expandCommitmentCreationTimestamp(d.Get("creation_timestamp"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("creation_timestamp"); !tpgresource.IsEmptyValue(reflect.ValueOf(idIdentifierProp)) && (ok || !reflect.DeepEqual(v, idIdentifierProp)) { + obj["creationTimestamp"] = creationTimestampProp + } + + statusProp, err := expandCommitmentStatus(d.Get("status"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("status"); !tpgresource.IsEmptyValue(reflect.ValueOf(statusProp)) && (ok || !reflect.DeepEqual(v, statusProp)) { + obj["status"] = statusProp + } + + statusMessageProp, err := expandCommitmentStatusMessage(d.Get("status_message"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("status_message"); !tpgresource.IsEmptyValue(reflect.ValueOf(statusMessageProp)) && (ok || !reflect.DeepEqual(v, statusMessageProp)) { + obj["statusMessage"] = statusMessageProp + } + + startTimestampProp, err := expandCommitmentStartTimestamp(d.Get("start_timestamp"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("start_timestamp"); !tpgresource.IsEmptyValue(reflect.ValueOf(statusMessageProp)) && (ok || !reflect.DeepEqual(v, statusMessageProp)) { + obj["startTimestamp"] = startTimestampProp + } + + endTimestampProp, err := expandCommitmentEndTimestamp(d.Get("end_timestamp"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("end_timestamp"); !tpgresource.IsEmptyValue(reflect.ValueOf(endTimestampProp)) && (ok || !reflect.DeepEqual(v, endTimestampProp)) { + obj["endTimestamp"] = endTimestampProp + } + + selfLinkProp, err := expandCommitmentSelfLink(d.Get("self_link"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("self_link"); !tpgresource.IsEmptyValue(reflect.ValueOf(endTimestampProp)) && (ok || !reflect.DeepEqual(v, endTimestampProp)) { + obj["selfLink"] = selfLinkProp + } + + return obj, nil +} + +func expandCommitmentLicenseResource(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedlicense, err := expandCommitmentLicense(original["license"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedlicense); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["license"] = transformedlicense + } + + transformedAmount, err := expandCommitmentAmount(original["amount"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAmount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["amount"] = transformedAmount + } + + transformedCoresPerLicense, err := expandCommitmentCoresPerLicense(original["cores_per_license"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCoresPerLicense); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["coresPerLicense"] = transformedCoresPerLicense + } + + return transformed, nil +} + +func expandCommitmentSelfLink(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentEndTimestamp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentStartTimestamp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentStatusMessage(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentStatus(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentCreationTimestamp(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentIdIdentifier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/regions/{{region}}/commitments/{{name}}") + if err != nil { + return nil, err + } + + return v, nil +} + +func expandCommitmentId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentAutoRenew(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentCoresPerLicense(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentLicense(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentCategory(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentResources(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedType, err := expandCommitmentType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + transformedAmount, err := expandCommitmentAmount(original["amount"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAmount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["amount"] = transformedAmount + } + + transformedAcceleratorType, err := expandCommitmentAcceleratorType(original["accelerator_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAcceleratorType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["acceleratorType"] = transformedAcceleratorType + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandCommitmentAcceleratorType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentAmount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentPlan(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandCommitmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.json b/mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.json new file mode 100644 index 000000000000..66700f1d2f50 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.json @@ -0,0 +1,38 @@ +[ + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/regions/us-east1/commitments", + "asset_type": "compute.googleapis.com/Commitment", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Commitment", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "autoRenew": true, + "category": "MACHINE", + "creationTimestamp": "", + "description": "some description", + "id": "projects/{{.Provider.project}}/regions/us-east1/commitments/my-full-commitment", + "name": "my-full-commitment", + "plan": "THIRTY_SIX_MONTH", + "project": "{{.Provider.project}}", + "region": "us-east1", + "resources": [ + { + "amount": "4", + "type": "VCPU" + }, + { + "amount": "9", + "type": "MEMORY" + } + ], + "type": "MEMORY_OPTIMIZED" + } + }, + "ancestors": [ + "organizations/{{.OrgID}}" + ] + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.tf b/mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.tf new file mode 100644 index 000000000000..81b4bebb28c8 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_compute_region_commitment.tf @@ -0,0 +1,30 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_compute_region_commitment" "foobar" { + name = "my-full-commitment" + description = "some description" + plan = "THIRTY_SIX_MONTH" + type = "MEMORY_OPTIMIZED" + category = "MACHINE" + auto_renew = true + region = "us-east1" + resources { + type = "VCPU" + amount = "4" + } + resources { + type = "MEMORY" + amount = "9" + } +} \ No newline at end of file From d1c1137a1943963d80fb143c34d78851620d18fa Mon Sep 17 00:00:00 2001 From: Matheus Guilherme Souza Aleixo <82680416+matheusaleixo-cit@users.noreply.github.com> Date: Tue, 4 Jun 2024 17:58:41 -0300 Subject: [PATCH 053/356] Added new resource "Project Cloud Armor Tier" (#10698) --- .../compute/ProjectCloudArmorTier.yaml | 73 ++++++++ .../only_remove_from_state.go.erb | 3 + ...pute_project_cloud_armor_tier_basic.tf.erb | 3 + ...roject_cloud_armor_tier_project_set.tf.erb | 17 ++ ...e_compute_project_cloud_armor_tier_test.go | 165 ++++++++++++++++++ 5 files changed, 261 insertions(+) create mode 100644 mmv1/products/compute/ProjectCloudArmorTier.yaml create mode 100644 mmv1/templates/terraform/custom_delete/only_remove_from_state.go.erb create mode 100644 mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_project_set.tf.erb create mode 100644 mmv1/third_party/terraform/services/compute/resource_compute_project_cloud_armor_tier_test.go diff --git a/mmv1/products/compute/ProjectCloudArmorTier.yaml b/mmv1/products/compute/ProjectCloudArmorTier.yaml new file mode 100644 index 000000000000..b68e13b09890 --- /dev/null +++ b/mmv1/products/compute/ProjectCloudArmorTier.yaml @@ -0,0 +1,73 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ProjectCloudArmorTier' +base_url: 'projects/{{project}}' +create_url: 'projects/{{project}}/setCloudArmorTier' +update_url: 'projects/{{project}}/setCloudArmorTier' +read_query_params: '?fields=cloudArmorTier' +create_verb: :POST +update_verb: :POST +description: | + Sets the Cloud Armor tier of the project. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Subscribing to Cloud Armor Enterprise': 'https://cloud.google.com/armor/docs/managed-protection-overview#subscribing_to_plus' + api: + 'https://cloud.google.com/compute/docs/reference/rest/v1/projects/setCloudArmorTier' +id_format: 'projects/{{project}}' +import_format: ['projects/{{project}}'] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'compute_project_cloud_armor_tier_basic' + skip_test: true + primary_resource_id: 'cloud_armor_tier_config' + - !ruby/object:Provider::Terraform::Examples + name: 'compute_project_cloud_armor_tier_project_set' + skip_test: true + primary_resource_id: 'cloud_armor_tier_config' + vars: + project_id: 'your_project_id' + test_env_vars: + org_id: :ORG_ID + billing_account: :BILLING_ACCT +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + kind: 'compute#operation' + path: 'name' + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'targetLink' + status: !ruby/object:Api::OpAsync::Status + path: 'status' + complete: 'DONE' + allowed: + - 'PENDING' + - 'RUNNING' + - 'DONE' + error: !ruby/object:Api::OpAsync::Error + path: 'error/errors' + message: 'message' +custom_code: !ruby/object:Provider::Terraform::CustomCode + custom_delete: templates/terraform/custom_delete/only_remove_from_state.go.erb +properties: + - !ruby/object:Api::Type::Enum + name: 'cloudArmorTier' + required: true + description: | + Managed protection tier to be set. + values: + - :CA_STANDARD + - :CA_ENTERPRISE_PAYGO diff --git a/mmv1/templates/terraform/custom_delete/only_remove_from_state.go.erb b/mmv1/templates/terraform/custom_delete/only_remove_from_state.go.erb new file mode 100644 index 000000000000..0280b5d91c1d --- /dev/null +++ b/mmv1/templates/terraform/custom_delete/only_remove_from_state.go.erb @@ -0,0 +1,3 @@ +log.Printf("[WARNING] Resource [%s] will be only removed from Terraform state, but will be left intact on GCP. %s", d.Id(), userAgent) + +return schema.RemoveFromState(d, meta) \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_basic.tf.erb b/mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_basic.tf.erb new file mode 100644 index 000000000000..7dcf46e41b92 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_basic.tf.erb @@ -0,0 +1,3 @@ +resource "google_compute_project_cloud_armor_tier" "<%= ctx[:primary_resource_id] %>" { + cloud_armor_tier = "CA_STANDARD" +} diff --git a/mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_project_set.tf.erb b/mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_project_set.tf.erb new file mode 100644 index 000000000000..16122a86a963 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_project_cloud_armor_tier_project_set.tf.erb @@ -0,0 +1,17 @@ +resource "google_project" "project" { + project_id = "<%= ctx[:vars]['project_id'] %>" + name = "<%= ctx[:vars]['project_id'] %>" + org_id = "<%= ctx[:test_env_vars]['org_id'] %>" + billing_account = "<%= ctx[:test_env_vars]['billing_account'] %>" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_project_cloud_armor_tier" "<%= ctx[:primary_resource_id] %>" { + project = google_project.project.project_id + cloud_armor_tier = "CA_STANDARD" + depends_on = [google_project_service.compute] +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_project_cloud_armor_tier_test.go b/mmv1/third_party/terraform/services/compute/resource_compute_project_cloud_armor_tier_test.go new file mode 100644 index 000000000000..35e7b03b881b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/resource_compute_project_cloud_armor_tier_test.go @@ -0,0 +1,165 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeProjectCloudArmorTier_basic(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeProject_cloudArmorTier_standard(), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeProjectCloudArmorTier_modify(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeProject_cloudArmorTier_standard(), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeProject_cloudArmorTier_enterprise_paygo(), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeProject_cloudArmorTier_standard(), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeProjectCloudArmorTier_withProjectSet(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org": envvar.GetTestOrgFromEnv(t), + "billingId": envvar.GetTestBillingAccountFromEnv(t), + "projectID": fmt.Sprintf("tf-test-%d", acctest.RandInt(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeProject_cloudArmorTier_withProjectSet_standard(context), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeProject_cloudArmorTier_withProjectSet_enterprise_paygo(context), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeProject_cloudArmorTier_withProjectSet_standard(context), + }, + { + ResourceName: "google_compute_project_cloud_armor_tier.cloud_armor_tier_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeProject_cloudArmorTier_enterprise_paygo() string { + return fmt.Sprintln(` +resource "google_compute_project_cloud_armor_tier" "cloud_armor_tier_config" { + cloud_armor_tier = "CA_ENTERPRISE_PAYGO" +}`) +} + +func testAccComputeProject_cloudArmorTier_standard() string { + return fmt.Sprintln(` +resource "google_compute_project_cloud_armor_tier" "cloud_armor_tier_config" { + cloud_armor_tier = "CA_STANDARD" +}`) +} + +func testAccComputeProject_cloudArmorTier_withProjectSet_enterprise_paygo(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + project_id = "%{projectID}" + name = "%{projectID}" + org_id = "%{org}" + billing_account = "%{billingId}" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_project_cloud_armor_tier" "cloud_armor_tier_config" { + project = google_project.project.project_id + cloud_armor_tier = "CA_ENTERPRISE_PAYGO" + depends_on = [google_project_service.compute] +} +`, context) +} + +func testAccComputeProject_cloudArmorTier_withProjectSet_standard(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + project_id = "%{projectID}" + name = "%{projectID}" + org_id = "%{org}" + billing_account = "%{billingId}" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_project_cloud_armor_tier" "cloud_armor_tier_config" { + project = google_project.project.project_id + cloud_armor_tier = "CA_STANDARD" + depends_on = [google_project_service.compute] +} +`, context) +} From 09408819062fb5f7722c3bc62b00b4a676cca0eb Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 4 Jun 2024 15:59:11 -0500 Subject: [PATCH 054/356] Go Rewrite - resolve all current diffs (#10860) --- mmv1/api/resource.go | 22 +- mmv1/api/type.go | 14 - .../{go_instance.yaml => go_Instance.yaml} | 75 ++-- mmv1/products/pubsub/go_Schema.yaml | 6 +- mmv1/products/pubsub/go_Subscription.yaml | 330 ++++++++++-------- mmv1/products/pubsub/go_Topic.yaml | 117 +++++-- mmv1/provider/terraform.go | 17 +- .../base_configs/iam_test_file.go.tmpl | 1 - ...sub_subscription_push_cloudstorage.tf.tmpl | 5 +- ...ubscription_push_cloudstorage_avro.tf.tmpl | 7 +- ..._property_documentation.html.markdown.tmpl | 10 +- .../terraform/resource.html.markdown.tmpl | 6 +- mmv1/templates/terraform/update_mask.go.tmpl | 10 +- mmv1/third_party/terraform/go.mod | 2 +- 14 files changed, 367 insertions(+), 255 deletions(-) rename mmv1/products/datafusion/{go_instance.yaml => go_Instance.yaml} (79%) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 2dab965b708c..512a4bb5439c 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -14,6 +14,7 @@ package api import ( "fmt" + "maps" "regexp" "strings" @@ -1282,15 +1283,28 @@ func CompareByName(a, b *Type) int { return strings.Compare(a.Name, b.Name) } -func (r Resource) GetPropertyUpdateMasksGroups() map[string][]string { +func (r Resource) GetPropertyUpdateMasksGroupKeys(properties []*Type) []string { + keys := []string{} + for _, prop := range properties { + if prop.FlattenObject { + k := r.GetPropertyUpdateMasksGroupKeys(prop.Properties) + keys = append(keys, k...) + } else { + keys = append(keys, google.Underscore(prop.Name)) + } + } + return keys +} + +func (r Resource) GetPropertyUpdateMasksGroups(properties []*Type, maskPrefix string) map[string][]string { maskGroups := map[string][]string{} - for _, prop := range r.AllUserProperties() { + for _, prop := range properties { if prop.FlattenObject { - prop.GetNestedPropertyUpdateMasksGroups(maskGroups, prop.ApiName) + maps.Copy(maskGroups, r.GetPropertyUpdateMasksGroups(prop.Properties, prop.ApiName)) } else if len(prop.UpdateMaskFields) > 0 { maskGroups[google.Underscore(prop.Name)] = prop.UpdateMaskFields } else { - maskGroups[google.Underscore(prop.Name)] = []string{prop.ApiName} + maskGroups[google.Underscore(prop.Name)] = []string{maskPrefix + prop.ApiName} } } return maskGroups diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 7cd9cccc74b1..67b110c936c4 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -1292,20 +1292,6 @@ func (t Type) NamespaceProperty() string { // // end -// new utility function for recursive calls to GetPropertyUpdateMasksGroups - -func (t Type) GetNestedPropertyUpdateMasksGroups(maskGroups map[string][]string, maskPrefix string) { - for _, prop := range t.AllProperties() { - if prop.FlattenObject { - prop.GetNestedPropertyUpdateMasksGroups(maskGroups, prop.ApiName) - } else if len(prop.UpdateMaskFields) > 0 { - maskGroups[google.Underscore(prop.Name)] = prop.UpdateMaskFields - } else { - maskGroups[google.Underscore(prop.Name)] = []string{maskPrefix + prop.ApiName} - } - } -} - func (t Type) CustomTemplate(templatePath string, appendNewline bool) string { return resource.ExecuteTemplate(&t, templatePath, appendNewline) } diff --git a/mmv1/products/datafusion/go_instance.yaml b/mmv1/products/datafusion/go_Instance.yaml similarity index 79% rename from mmv1/products/datafusion/go_instance.yaml rename to mmv1/products/datafusion/go_Instance.yaml index fecffa4352c2..fd14261d37be 100644 --- a/mmv1/products/datafusion/go_instance.yaml +++ b/mmv1/products/datafusion/go_Instance.yaml @@ -90,7 +90,8 @@ examples: parameters: - name: 'region' type: String - description: "The region of the Data Fusion instance." + description: | + The region of the Data Fusion instance. url_param_only: true required: false immutable: true @@ -99,14 +100,16 @@ parameters: properties: - name: 'name' type: String - description: "The ID of the instance or a fully qualified identifier for the instance." + description: | + The ID of the instance or a fully qualified identifier for the instance. required: true immutable: true custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' custom_expand: 'templates/terraform/custom_expand/go/shortname_to_url.go.tmpl' - name: 'description' type: String - description: "An optional description of the instance." + description: | + An optional description of the instance. immutable: true - name: 'type' type: Enum @@ -129,13 +132,16 @@ properties: - 'DEVELOPER' - name: 'enableStackdriverLogging' type: Boolean - description: "Option to enable Stackdriver Logging." + description: | + Option to enable Stackdriver Logging. - name: 'enableStackdriverMonitoring' type: Boolean - description: "Option to enable Stackdriver Monitoring." + description: | + Option to enable Stackdriver Monitoring. - name: 'enableRbac' type: Boolean - description: "Option to enable granular role-based access control." + description: | + Option to enable granular role-based access control. - name: 'labels' type: KeyValueLabels description: | @@ -144,7 +150,8 @@ properties: immutable: false - name: 'options' type: KeyValuePairs - description: "Map of additional options used to configure the behavior of Data Fusion instance." + description: | + Map of additional options used to configure the behavior of Data Fusion instance. immutable: true default_from_api: true diff_suppress_func: 'instanceOptionsDiffSuppress' @@ -178,19 +185,23 @@ properties: - 'RESTARTING' - name: 'stateMessage' type: String - description: "Additional information about the current state of this Data Fusion instance if available." + description: | + Additional information about the current state of this Data Fusion instance if available. output: true - name: 'serviceEndpoint' type: String - description: "Endpoint on which the Data Fusion UI and REST APIs are accessible." + description: | + Endpoint on which the Data Fusion UI and REST APIs are accessible. output: true - name: 'version' type: String - description: "Current version of the Data Fusion." + description: | + Current version of the Data Fusion. default_from_api: true - name: 'serviceAccount' type: String - description: "Service account which will be used to access resources in the customer project." + description: | + Service account which will be used to access resources in the customer project. min_version: 'beta' output: true deprecation_message: '`service_account` is deprecated and will be removed in a future major release. Instead, use `tenant_project_id` to extract the tenant project ID.' @@ -203,19 +214,23 @@ properties: immutable: true - name: 'dataprocServiceAccount' type: String - description: "User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines." + description: | + User-managed service account to set on Dataproc when Cloud Data Fusion creates Dataproc to run data processing pipelines. immutable: true - name: 'tenantProjectId' type: String - description: "The name of the tenant project." + description: | + The name of the tenant project. output: true - name: 'gcsBucket' type: String - description: "Cloud Storage bucket generated by Data Fusion in the customer project." + description: | + Cloud Storage bucket generated by Data Fusion in the customer project. output: true - name: 'networkConfig' type: NestedObject - description: "Network configuration options. These are required when a private Data Fusion instance is to be created." + description: | + Network configuration options. These are required when a private Data Fusion instance is to be created. immutable: true properties: - name: 'ipAllocation' @@ -235,42 +250,51 @@ properties: immutable: true - name: 'zone' type: String - description: "Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field." + description: | + Name of the zone in which the Data Fusion instance will be created. Only DEVELOPER instances use this field. immutable: true default_from_api: true - name: 'displayName' type: String - description: "Display name for an instance." + description: | + Display name for an instance. immutable: true - name: 'apiEndpoint' type: String - description: "Endpoint on which the REST APIs is accessible." + description: | + Endpoint on which the REST APIs is accessible. output: true - name: 'p4ServiceAccount' type: String - description: "P4 service account for the customer project." + description: | + P4 service account for the customer project. output: true - name: 'cryptoKeyConfig' type: NestedObject - description: "The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature." + description: | + The crypto key configuration. This field is used by the Customer-Managed Encryption Keys (CMEK) feature. immutable: true properties: - name: 'keyReference' type: String - description: "The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of projects/*/locations/*/keyRings/*/cryptoKeys/*." + description: | + The name of the key which is used to encrypt/decrypt customer data. For key in Cloud KMS, the key should be in the format of projects/*/locations/*/keyRings/*/cryptoKeys/*. required: true immutable: true - name: 'eventPublishConfig' type: NestedObject - description: "Option to enable and pass metadata for event publishing." + description: | + Option to enable and pass metadata for event publishing. properties: - name: 'enabled' type: Boolean - description: "Option to enable Event Publishing." + description: | + Option to enable Event Publishing. required: true - name: 'topic' type: String - description: "The resource name of the Pub/Sub topic. Format: projects/{projectId}/topics/{topic_id}" + description: | + The resource name of the Pub/Sub topic. Format: projects/{projectId}/topics/{topic_id} required: true immutable: true - name: 'accelerators' @@ -284,7 +308,8 @@ properties: properties: - name: 'acceleratorType' type: Enum - description: "The type of an accelator for a CDF instance." + description: | + The type of an accelator for a CDF instance. required: true enum_values: - 'CDC' diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index c8642d31b3bc..46ee0533179b 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -58,7 +58,9 @@ examples: parameters: - name: 'name' type: String - description: "The ID to use for the schema, which will become the final component of the schema's resource name." + description: + The ID to use for the schema, which will become the final component of the + schema's resource name. required: true immutable: true diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' @@ -67,7 +69,7 @@ parameters: properties: - name: 'type' type: Enum - description: "The type of the schema definition" + description: The type of the schema definition default_value: TYPE_UNSPECIFIED enum_values: - 'TYPE_UNSPECIFIED' diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index e21927fbab71..3ee6f3eb70fa 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -37,7 +37,9 @@ timeouts: async: type: 'PollAsync' check_response_func_existence: 'transport_tpg.PollCheckForExistence' + check_response_func_absence: 'transport_tpg.PollCheckForAbsence' suppress_error: true + target_occurrences: 1 actions: ['create'] custom_code: constants: 'templates/terraform/constants/go/subscription.go.tmpl' @@ -89,7 +91,7 @@ parameters: properties: - name: 'name' type: String - description: "Name of the subscription." + description: 'Name of the subscription.' required: true immutable: true custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' @@ -108,75 +110,94 @@ properties: imports: 'name' - name: 'labels' type: KeyValueLabels - description: "A set of key/value label pairs to assign to this Subscription. -" + description: | + A set of key/value label pairs to assign to this Subscription. immutable: false - name: 'bigqueryConfig' type: NestedObject - description: "If delivery to BigQuery is used with this subscription, this field is used to configure it. -Either pushConfig, bigQueryConfig or cloudStorageConfig can be set, but not combined. -If all three are empty, then the subscriber will pull and ack messages using API methods." + description: | + If delivery to BigQuery is used with this subscription, this field is used to configure it. + Either pushConfig, bigQueryConfig or cloudStorageConfig can be set, but not combined. + If all three are empty, then the subscriber will pull and ack messages using API methods. conflicts: - push_config - cloud_storage_config properties: - name: 'table' type: String - description: "The name of the table to which to write data, of the form {projectId}:{datasetId}.{tableId}" + description: | + The name of the table to which to write data, of the form {projectId}:{datasetId}.{tableId} required: true - name: 'useTopicSchema' type: Boolean - description: "When true, use the topic's schema as the columns to write to in BigQuery, if it exists. - Only one of use_topic_schema and use_table_schema can be set." - conflicts: - - use_table_schema + description: | + When true, use the topic's schema as the columns to write to in BigQuery, if it exists. + Only one of use_topic_schema and use_table_schema can be set. + # Not present in Ruby version + # conflicts: + # - use_table_schema - name: 'useTableSchema' type: Boolean - description: "When true, use the BigQuery table's schema as the columns to write to in BigQuery. Messages - must be published in JSON format. Only one of use_topic_schema and use_table_schema can be set." - conflicts: - - use_topic_schema + description: | + When true, use the BigQuery table's schema as the columns to write to in BigQuery. Messages + must be published in JSON format. Only one of use_topic_schema and use_table_schema can be set. + # Not present in Ruby version + # conflicts: + # - use_topic_schema - name: 'writeMetadata' type: Boolean - description: "When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. - The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column." + description: | + When true, write the subscription name, messageId, publishTime, attributes, and orderingKey to additional columns in the table. + The subscription name, messageId, and publishTime fields are put in their own columns while all other message properties (other than data) are written to a JSON object in the attributes column. - name: 'dropUnknownFields' type: Boolean - description: "When true and use_topic_schema or use_table_schema is true, any fields that are a part of the topic schema or message schema that - are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync - and any messages with extra fields are not written and remain in the subscription's backlog." + description: | + When true and use_topic_schema or use_table_schema is true, any fields that are a part of the topic schema or message schema that + are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync + and any messages with extra fields are not written and remain in the subscription's backlog. - name: 'cloudStorageConfig' type: NestedObject - description: "If delivery to Cloud Storage is used with this subscription, this field is used to configure it. -Either pushConfig, bigQueryConfig or cloudStorageConfig can be set, but not combined. -If all three are empty, then the subscriber will pull and ack messages using API methods." + description: | + If delivery to Cloud Storage is used with this subscription, this field is used to configure it. + Either pushConfig, bigQueryConfig or cloudStorageConfig can be set, but not combined. + If all three are empty, then the subscriber will pull and ack messages using API methods. conflicts: - push_config - bigquery_config properties: - name: 'bucket' type: String - description: "User-provided name for the Cloud Storage bucket. The bucket must be created by the user. The bucket name must be without any prefix like 'gs://'." + description: | + User-provided name for the Cloud Storage bucket. The bucket must be created by the user. The bucket name must be without any prefix like "gs://". required: true - name: 'filenamePrefix' type: String - description: "User-provided prefix for Cloud Storage filename." + description: | + User-provided prefix for Cloud Storage filename. - name: 'filenameSuffix' type: String - description: "User-provided suffix for Cloud Storage filename. Must not end in '/'." + description: | + User-provided suffix for Cloud Storage filename. Must not end in "/". + - name: 'filenameDatetimeFormat' + type: String + description: | + User-provided format string specifying how to represent datetimes in Cloud Storage filenames. - name: 'maxDuration' type: String - description: "The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. - May not exceed the subscription's acknowledgement deadline. - A duration in seconds with up to nine fractional digits, ending with 's'. Example: '3.5s'." + description: | + The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. + May not exceed the subscription's acknowledgement deadline. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". default_value: 300s - name: 'maxBytes' type: Integer - description: "The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. - The maxBytes limit may be exceeded in cases where messages are larger than the limit." + description: | + The maximum bytes that can be written to a Cloud Storage file before a new file is created. Min 1 KB, max 10 GiB. + The maxBytes limit may be exceeded in cases where messages are larger than the limit. - name: 'state' type: Enum - description: "An output-only field that indicates whether or not the subscription can receive messages." + description: | + An output-only field that indicates whether or not the subscription can receive messages. output: true enum_values: - 'ACTIVE' @@ -184,83 +205,93 @@ If all three are empty, then the subscriber will pull and ack messages using API - 'NOT_FOUND' - name: 'avroConfig' type: NestedObject - description: "If set, message data will be written to Cloud Storage in Avro format." + description: | + If set, message data will be written to Cloud Storage in Avro format. properties: - name: 'writeMetadata' type: Boolean - description: "When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output." + description: | + When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. - name: 'pushConfig' type: NestedObject - description: "If push delivery is used with this subscription, this field is used to -configure it. An empty pushConfig signifies that the subscriber will -pull and ack messages using API methods." + description: | + If push delivery is used with this subscription, this field is used to + configure it. An empty pushConfig signifies that the subscriber will + pull and ack messages using API methods. conflicts: - bigquery_config - cloud_storage_config properties: - name: 'oidcToken' type: NestedObject - description: "If specified, Pub/Sub will generate and attach an OIDC JWT token as - an Authorization header in the HTTP request for every pushed message." + description: | + If specified, Pub/Sub will generate and attach an OIDC JWT token as + an Authorization header in the HTTP request for every pushed message. properties: - name: 'serviceAccountEmail' type: String - description: "Service account email to be used for generating the OIDC token. - The caller (for subscriptions.create, subscriptions.patch, and - subscriptions.modifyPushConfig RPCs) must have the - iam.serviceAccounts.actAs permission for the service account." + description: | + Service account email to be used for generating the OIDC token. + The caller (for subscriptions.create, subscriptions.patch, and + subscriptions.modifyPushConfig RPCs) must have the + iam.serviceAccounts.actAs permission for the service account. required: true - name: 'audience' type: String - description: "Audience to be used when generating OIDC token. The audience claim - identifies the recipients that the JWT is intended for. The audience - value is a single case-sensitive string. Having multiple values (array) - for the audience field is not supported. More info about the OIDC JWT - token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 - Note: if not specified, the Push endpoint URL will be used." + description: | + Audience to be used when generating OIDC token. The audience claim + identifies the recipients that the JWT is intended for. The audience + value is a single case-sensitive string. Having multiple values (array) + for the audience field is not supported. More info about the OIDC JWT + token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 + Note: if not specified, the Push endpoint URL will be used. - name: 'pushEndpoint' type: String - description: "A URL locating the endpoint to which messages should be pushed. - For example, a Webhook endpoint might use - 'https://example.com/push'." + description: | + A URL locating the endpoint to which messages should be pushed. + For example, a Webhook endpoint might use + "https://example.com/push". required: true - name: 'attributes' type: KeyValuePairs - description: "Endpoint configuration attributes. + description: | + Endpoint configuration attributes. - Every endpoint has a set of API supported attributes that can - be used to control different aspects of the message delivery. + Every endpoint has a set of API supported attributes that can + be used to control different aspects of the message delivery. - The currently supported attribute is x-goog-version, which you - can use to change the format of the pushed message. This - attribute indicates the version of the data expected by - the endpoint. This controls the shape of the pushed message - (i.e., its fields and metadata). The endpoint version is - based on the version of the Pub/Sub API. + The currently supported attribute is x-goog-version, which you + can use to change the format of the pushed message. This + attribute indicates the version of the data expected by + the endpoint. This controls the shape of the pushed message + (i.e., its fields and metadata). The endpoint version is + based on the version of the Pub/Sub API. - If not present during the subscriptions.create call, - it will default to the version of the API used to make - such call. If not present during a subscriptions.modifyPushConfig - call, its value will not be changed. subscriptions.get - calls will always return a valid version, even if the - subscription was created without this attribute. + If not present during the subscriptions.create call, + it will default to the version of the API used to make + such call. If not present during a subscriptions.modifyPushConfig + call, its value will not be changed. subscriptions.get + calls will always return a valid version, even if the + subscription was created without this attribute. - The possible values for this attribute are: + The possible values for this attribute are: - - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. - - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API." + - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. + - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API. diff_suppress_func: 'tpgresource.IgnoreMissingKeyInMap("x-goog-version")' - name: 'noWrapper' type: NestedObject - description: "When set, the payload to the push endpoint is not wrapped.Sets the - `data` field as the HTTP body for delivery." + description: | + When set, the payload to the push endpoint is not wrapped.Sets the + `data` field as the HTTP body for delivery. custom_flatten: 'templates/terraform/custom_flatten/go/pubsub_no_wrapper_write_metadata_flatten.go.tmpl' properties: - name: 'writeMetadata' type: Boolean - description: "When true, writes the Pub/Sub message metadata to - `x-goog-pubsub-:` headers of the HTTP request. Writes the - Pub/Sub message attributes to `:` headers of the HTTP request." + description: | + When true, writes the Pub/Sub message metadata to + `x-goog-pubsub-:` headers of the HTTP request. Writes the + Pub/Sub message attributes to `:` headers of the HTTP request. required: true send_empty_value: true - name: 'ackDeadlineSeconds' @@ -287,124 +318,137 @@ pull and ack messages using API methods." default_from_api: true - name: 'messageRetentionDuration' type: String - description: "How long to retain unacknowledged messages in the subscription's -backlog, from the moment a message is published. If -retain_acked_messages is true, then this also configures the retention -of acknowledged messages, and thus configures how far back in time a -subscriptions.seek can be done. Defaults to 7 days. Cannot be more -than 7 days (`'604800s'`) or less than 10 minutes (`'600s'`). + description: | + How long to retain unacknowledged messages in the subscription's + backlog, from the moment a message is published. If + retain_acked_messages is true, then this also configures the retention + of acknowledged messages, and thus configures how far back in time a + subscriptions.seek can be done. Defaults to 7 days. Cannot be more + than 7 days (`"604800s"`) or less than 10 minutes (`"600s"`). -A duration in seconds with up to nine fractional digits, terminated -by 's'. Example: `'600.5s'`." + A duration in seconds with up to nine fractional digits, terminated + by 's'. Example: `"600.5s"`. default_value: 604800s - name: 'retainAckedMessages' type: Boolean - description: "Indicates whether to retain acknowledged messages. If `true`, then -messages are not expunged from the subscription's backlog, even if -they are acknowledged, until they fall out of the -messageRetentionDuration window." + description: | + Indicates whether to retain acknowledged messages. If `true`, then + messages are not expunged from the subscription's backlog, even if + they are acknowledged, until they fall out of the + messageRetentionDuration window. - name: 'expirationPolicy' type: NestedObject - description: "A policy that specifies the conditions for this subscription's expiration. -A subscription is considered active as long as any connected subscriber -is successfully consuming messages from the subscription or is issuing -operations on the subscription. If expirationPolicy is not set, a default -policy with ttl of 31 days will be used. If it is set but ttl is '', the -resource never expires. The minimum allowed value for expirationPolicy.ttl -is 1 day." + description: | + A policy that specifies the conditions for this subscription's expiration. + A subscription is considered active as long as any connected subscriber + is successfully consuming messages from the subscription or is issuing + operations on the subscription. If expirationPolicy is not set, a default + policy with ttl of 31 days will be used. If it is set but ttl is "", the + resource never expires. The minimum allowed value for expirationPolicy.ttl + is 1 day. default_from_api: true send_empty_value: true allow_empty_object: true properties: - name: 'ttl' type: String - description: "Specifies the 'time-to-live' duration for an associated resource. The - resource expires if it is not active for a period of ttl. - If ttl is set to '', the associated resource never expires. - A duration in seconds with up to nine fractional digits, terminated by 's'. - Example - '3.5s'." + description: | + Specifies the "time-to-live" duration for an associated resource. The + resource expires if it is not active for a period of ttl. + If ttl is set to "", the associated resource never expires. + A duration in seconds with up to nine fractional digits, terminated by 's'. + Example - "3.5s". required: true diff_suppress_func: 'comparePubsubSubscriptionExpirationPolicy' - name: 'filter' type: String - description: "The subscription only delivers the messages that match the filter. -Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages -by their attributes. The maximum length of a filter is 256 bytes. After creating the subscription, -you can't modify the filter." + description: | + The subscription only delivers the messages that match the filter. + Pub/Sub automatically acknowledges the messages that don't match the filter. You can filter messages + by their attributes. The maximum length of a filter is 256 bytes. After creating the subscription, + you can't modify the filter. required: false immutable: true - name: 'deadLetterPolicy' type: NestedObject - description: "A policy that specifies the conditions for dead lettering messages in -this subscription. If dead_letter_policy is not set, dead lettering -is disabled. + description: | + A policy that specifies the conditions for dead lettering messages in + this subscription. If dead_letter_policy is not set, dead lettering + is disabled. -The Cloud Pub/Sub service account associated with this subscription's -parent project (i.e., -service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have -permission to Acknowledge() messages on this subscription." + The Cloud Pub/Sub service account associated with this subscription's + parent project (i.e., + service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + permission to Acknowledge() messages on this subscription. send_empty_value: true properties: - name: 'deadLetterTopic' type: String - description: "The name of the topic to which dead letter messages should be published. - Format is `projects/{project}/topics/{topic}`. + description: | + The name of the topic to which dead letter messages should be published. + Format is `projects/{project}/topics/{topic}`. - The Cloud Pub/Sub service account associated with the enclosing subscription's - parent project (i.e., - service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have - permission to Publish() to this topic. + The Cloud Pub/Sub service account associated with the enclosing subscription's + parent project (i.e., + service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + permission to Publish() to this topic. - The operation will fail if the topic does not exist. - Users should ensure that there is a subscription attached to this topic - since messages published to a topic with no subscriptions are lost." + The operation will fail if the topic does not exist. + Users should ensure that there is a subscription attached to this topic + since messages published to a topic with no subscriptions are lost. - name: 'maxDeliveryAttempts' type: Integer - description: "The maximum number of delivery attempts for any message. The value must be - between 5 and 100. + description: | + The maximum number of delivery attempts for any message. The value must be + between 5 and 100. - The number of delivery attempts is defined as 1 + (the sum of number of - NACKs and number of times the acknowledgement deadline has been exceeded for the message). + The number of delivery attempts is defined as 1 + (the sum of number of + NACKs and number of times the acknowledgement deadline has been exceeded for the message). - A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that - client libraries may automatically extend ack_deadlines. + A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that + client libraries may automatically extend ack_deadlines. - This field will be honored on a best effort basis. + This field will be honored on a best effort basis. - If this parameter is 0, a default value of 5 is used." + If this parameter is 0, a default value of 5 is used. - name: 'retryPolicy' type: NestedObject - description: "A policy that specifies how Pub/Sub retries message delivery for this subscription. + description: | + A policy that specifies how Pub/Sub retries message delivery for this subscription. -If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. -RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message" + If not set, the default retry policy is applied. This generally implies that messages will be retried as soon as possible for healthy subscribers. + RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded events for a given message properties: - name: 'minimumBackoff' type: String - description: "The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. - A duration in seconds with up to nine fractional digits, terminated by 's'. Example: '3.5s'." + description: | + The minimum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 10 seconds. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". default_from_api: true diff_suppress_func: 'tpgresource.DurationDiffSuppress' - name: 'maximumBackoff' type: String - description: "The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. - A duration in seconds with up to nine fractional digits, terminated by 's'. Example: '3.5s'." + description: | + The maximum delay between consecutive deliveries of a given message. Value should be between 0 and 600 seconds. Defaults to 600 seconds. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". default_from_api: true diff_suppress_func: 'tpgresource.DurationDiffSuppress' - name: 'enableMessageOrdering' type: Boolean - description: "If `true`, messages published with the same orderingKey in PubsubMessage will be delivered to -the subscribers in the order in which they are received by the Pub/Sub system. Otherwise, they -may be delivered in any order." + description: | + If `true`, messages published with the same orderingKey in PubsubMessage will be delivered to + the subscribers in the order in which they are received by the Pub/Sub system. Otherwise, they + may be delivered in any order. immutable: true - name: 'enableExactlyOnceDelivery' type: Boolean - description: "If `true`, Pub/Sub provides the following guarantees for the delivery -of a message with a given value of messageId on this Subscriptions': + description: | + If `true`, Pub/Sub provides the following guarantees for the delivery + of a message with a given value of messageId on this Subscriptions': -- The message sent to a subscriber is guaranteed not to be resent before the message's acknowledgement deadline expires. + - The message sent to a subscriber is guaranteed not to be resent before the message's acknowledgement deadline expires. -- An acknowledged message will not be resent to a subscriber. + - An acknowledged message will not be resent to a subscriber. -Note that subscribers may still receive multiple copies of a message when `enable_exactly_once_delivery` -is true if the message was published multiple times by a publisher client. These copies are considered distinct by Pub/Sub and have distinct messageId values" + Note that subscribers may still receive multiple copies of a message when `enable_exactly_once_delivery` + is true if the message was published multiple times by a publisher client. These copies are considered distinct by Pub/Sub and have distinct messageId values diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index 8bf902dd89c2..32c9ce54bce2 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -24,6 +24,12 @@ docs: note: | You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding by using the `google_project_service_identity` resource. + # PubSub resources don't have operations but are negatively cached + # and eventually consistent. + # Because some users check whether the PubSub resource exists prior + # to applying a new resource, we need to add this PollAsync to GET the + # resource until it exists and the negative cached result goes away. + # Context: hashicorp/terraform-provider-google#4993 base_url: 'projects/{{project}}/topics' create_verb: 'PUT' update_url: 'projects/{{project}}/topics/{{name}}' @@ -39,6 +45,7 @@ async: check_response_func_absence: 'transport_tpg.PollCheckForAbsence' suppress_error: true target_occurrences: 1 + actions: ['create'] iam_policy: method_name_separator: ':' parent_resource_attribute: 'topic' @@ -72,11 +79,15 @@ examples: schema_name: 'example' test_env_vars: project_name: 'PROJECT_NAME' + - name: 'pubsub_topic_ingestion_kinesis' + primary_resource_id: 'example' + vars: + topic_name: 'example-topic' parameters: properties: - name: 'name' type: String - description: "Name of the topic." + description: 'Name of the topic.' required: true immutable: true diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' @@ -84,62 +95,104 @@ properties: custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' - name: 'kmsKeyName' type: String - description: "The resource name of the Cloud KMS CryptoKey to be used to protect access -to messages published on this topic. Your project's PubSub service account -(`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) must have -`roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. -The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`" + description: | + The resource name of the Cloud KMS CryptoKey to be used to protect access + to messages published on this topic. Your project's PubSub service account + (`service-{{PROJECT_NUMBER}}@gcp-sa-pubsub.iam.gserviceaccount.com`) must have + `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*` - name: 'labels' type: KeyValueLabels - description: "A set of key/value label pairs to assign to this Topic. -" + description: | + A set of key/value label pairs to assign to this Topic. immutable: false - name: 'messageStoragePolicy' type: NestedObject - description: "Policy constraining the set of Google Cloud Platform regions where -messages published to the topic may be stored. If not present, then no -constraints are in effect." + description: | + Policy constraining the set of Google Cloud Platform regions where + messages published to the topic may be stored. If not present, then no + constraints are in effect. default_from_api: true properties: - name: 'allowedPersistenceRegions' type: Array - description: "A list of IDs of GCP regions where messages that are published to - the topic may be persisted in storage. Messages published by - publishers running in non-allowed GCP regions (or running outside - of GCP altogether) will be routed for storage in one of the - allowed regions. An empty list means that no regions are allowed, - and is not a valid configuration." + description: | + A list of IDs of GCP regions where messages that are published to + the topic may be persisted in storage. Messages published by + publishers running in non-allowed GCP regions (or running outside + of GCP altogether) will be routed for storage in one of the + allowed regions. An empty list means that no regions are allowed, + and is not a valid configuration. required: true item_type: type: String - name: 'schemaSettings' type: NestedObject - description: "Settings for validating messages published against a schema." + description: | + Settings for validating messages published against a schema. default_from_api: true properties: - name: 'schema' type: String - description: "The name of the schema that messages published should be - validated against. Format is projects/{project}/schemas/{schema}. - The value of this field will be _deleted-schema_ - if the schema has been deleted." + description: | + The name of the schema that messages published should be + validated against. Format is projects/{project}/schemas/{schema}. + The value of this field will be _deleted-schema_ + if the schema has been deleted. required: true - name: 'encoding' type: Enum - description: "The encoding of messages validated against schema." + description: The encoding of messages validated against schema. default_value: ENCODING_UNSPECIFIED enum_values: - 'ENCODING_UNSPECIFIED' - 'JSON' - 'BINARY' - - '' - name: 'messageRetentionDuration' type: String - description: "Indicates the minimum duration to retain a message after it is published -to the topic. If this field is set, messages published to the topic in -the last messageRetentionDuration are always available to subscribers. -For instance, it allows any attached subscription to seek to a timestamp -that is up to messageRetentionDuration in the past. If this field is not -set, message retention is controlled by settings on individual subscriptions. -The rotation period has the format of a decimal number, followed by the -letter `s` (seconds). Cannot be more than 31 days or less than 10 minutes." + description: | + Indicates the minimum duration to retain a message after it is published + to the topic. If this field is set, messages published to the topic in + the last messageRetentionDuration are always available to subscribers. + For instance, it allows any attached subscription to seek to a timestamp + that is up to messageRetentionDuration in the past. If this field is not + set, message retention is controlled by settings on individual subscriptions. + The rotation period has the format of a decimal number, followed by the + letter `s` (seconds). Cannot be more than 31 days or less than 10 minutes. + - name: 'ingestionDataSourceSettings' + type: NestedObject + description: | + Settings for ingestion from a data source into this topic. + properties: + - name: 'awsKinesis' + type: NestedObject + description: | + Settings for ingestion from Amazon Kinesis Data Streams. + properties: + - name: 'streamArn' + type: String + description: | + The Kinesis stream ARN to ingest data from. + required: true + - name: 'consumerArn' + type: String + description: | + The Kinesis consumer ARN to used for ingestion in + Enhanced Fan-Out mode. The consumer must be already + created and ready to be used. + required: true + - name: 'awsRoleArn' + type: String + description: | + AWS role ARN to be used for Federated Identity authentication with + Kinesis. Check the Pub/Sub docs for how to set up this role and the + required permissions that need to be attached to it. + required: true + - name: 'gcpServiceAccount' + type: String + description: | + The GCP service account to be used for Federated Identity authentication + with Kinesis (via a `AssumeRoleWithWebIdentity` call for the provided + role). The `awsRoleArn` must be set up with `accounts.google.com:sub` + equals to this service account number. + required: true diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 80d128e11367..358b4ad9e33a 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -90,22 +90,7 @@ func (t *Terraform) Generate(outputFolder, productPath string, generateCode, gen func (t *Terraform) GenerateObjects(outputFolder string, generateCode, generateDocs bool) { for _, object := range t.Product.Objects { - // TODO Q2: Exclude objects - // if !types.empty? && !types.include?(object.name) - // Google::LOGGER.info "Excluding #{object.name} per user request" - // elsif types.empty? && object.exclude - // Google::LOGGER.info "Excluding #{object.name} per API catalog" - // elsif types.empty? && object.not_in_version?(@version) - // Google::LOGGER.info "Excluding #{object.name} per API version" - // else - // Google::LOGGER.info "Generating #{object.name}" - // # exclude_if_not_in_version must be called in order to filter out - // # beta properties that are nested within GA resources - // object.exclude_if_not_in_version!(@version) - // - // # Make object immutable. - // object.freeze - // object.all_user_properties.each(&:freeze) + object.ExcludeIfNotInVersion(&t.Version) t.GenerateObject(*object, outputFolder, t.TargetVersionName, generateCode, generateDocs) } diff --git a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl index 748e9d1ddcd5..80e05c9afce9 100644 --- a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl @@ -1,4 +1,3 @@ -{{/* <% if hc_downstream */ -}} // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 diff --git a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl index e86725487dc4..07543bc458d0 100644 --- a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl @@ -17,11 +17,12 @@ resource "google_pubsub_subscription" "{{$.PrimaryResourceId}}" { filename_prefix = "pre-" filename_suffix = "-%{random_suffix}" - + filename_datetime_format = "YYYY-MM-DD/hh_mm_ssZ" + max_bytes = 1000 max_duration = "300s" } - depends_on = [ + depends_on = [ google_storage_bucket.{{$.PrimaryResourceId}}, google_storage_bucket_iam_member.admin, ] diff --git a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl index 273081424975..4d78207e4c9f 100644 --- a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl @@ -17,15 +17,16 @@ resource "google_pubsub_subscription" "{{$.PrimaryResourceId}}" { filename_prefix = "pre-" filename_suffix = "-%{random_suffix}" - + filename_datetime_format = "YYYY-MM-DD/hh_mm_ssZ" + max_bytes = 1000 max_duration = "300s" - + avro_config { write_metadata = true } } - depends_on = [ + depends_on = [ google_storage_bucket.{{$.PrimaryResourceId}}, google_storage_bucket_iam_member.admin, ] diff --git a/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl index 05c013f0a221..f451327fe851 100644 --- a/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl @@ -8,11 +8,11 @@ {{ if $.IsA "Map" }} * `{{ underscore $.KeyName }}` - (Required) The identifier for this object. Format specified above. {{- end}} - {{- end}} - {{- range $np := $.NestedProperties }} -{{- template "propertyDocumentation" $np -}} - {{- end}} - {{- range $np := $.NestedProperties }} + {{- range $np := $.NestedProperties }} +{{- template "propertyDocumentation" $np }} + {{- end }} + {{- range $np := $.NestedProperties }} {{- template "nestedPropertyDocumentation" $np -}} + {{- end}} {{- end}} {{- end}} \ No newline at end of file diff --git a/mmv1/templates/terraform/resource.html.markdown.tmpl b/mmv1/templates/terraform/resource.html.markdown.tmpl index a9bb6ee8ab17..ab8c9a1fe58b 100644 --- a/mmv1/templates/terraform/resource.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource.html.markdown.tmpl @@ -97,7 +97,7 @@ The following arguments are supported: {{- end }} {{- end }} -{{- range $p := $.AllNestedProperties $.RootProperties }} +{{- range $p := $.AllUserProperties }} {{- if $p.Required }} {{- template "nestedPropertyDocumentation" $p}} {{- end}} @@ -120,7 +120,7 @@ The following arguments are supported: {{ if $.Docs.OptionalProperties }} {{ $.Docs.OptionalProperties }} {{- end }} -{{- range $p := $.AllNestedProperties $.RootProperties }} +{{- range $p := $.AllUserProperties }} {{- if and (not $p.Required) (not $p.Output) }} {{- template "nestedPropertyDocumentation" $p -}} {{ end}} @@ -138,7 +138,7 @@ In addition to the arguments listed above, the following computed attributes are {{- if $.HasSelfLink }} * `self_link` - The URI of the created resource. {{- end }} -{{ range $p := $.AllNestedProperties $.RootProperties }} +{{ range $p := $.AllUserProperties }} {{- if $p.Output }} {{- template "nestedPropertyDocumentation" $p -}} {{- end }} diff --git a/mmv1/templates/terraform/update_mask.go.tmpl b/mmv1/templates/terraform/update_mask.go.tmpl index 98a47f4f2194..b4bca9377c55 100644 --- a/mmv1/templates/terraform/update_mask.go.tmpl +++ b/mmv1/templates/terraform/update_mask.go.tmpl @@ -12,11 +12,13 @@ */}} {{- define "UpdateMask" }} updateMask := []string{} -{{- range $field, $masks := $.GetPropertyUpdateMasksGroups }} -if d.HasChange("{{ $field }}") { - updateMask = append(updateMask, {{ join $masks ","}}) +{{- $maskGroups := $.GetPropertyUpdateMasksGroups $.UpdateBodyProperties "" }} +{{- range $key := $.GetPropertyUpdateMasksGroupKeys $.UpdateBodyProperties }} + +if d.HasChange("{{ $key }}") { + updateMask = append(updateMask, "{{ join (index $maskGroups $key) "\",\""}}") } -{{ end }} +{{- end }} // updateMask is a URL parameter but not present in the schema, so ReplaceVars // won't set it url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index 26e6f46d5d43..8fa4bcaf42e6 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -97,7 +97,7 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - go4.org/netipx v0.0.0-20231129151722-fdeea329fbba + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect golang.org/x/crypto v0.22.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect From c85842ff834e4f0e19a29038aa1baea8fcb393d1 Mon Sep 17 00:00:00 2001 From: Aleksandr Averbukh Date: Tue, 4 Jun 2024 23:03:25 +0200 Subject: [PATCH 055/356] Add notification_config.send_for_bulk_import field to google_healthcare_dicom_store (#10855) --- mmv1/products/healthcare/DicomStore.yaml | 5 +++++ .../examples/go/healthcare_dicom_store_bq_stream.tf.tmpl | 3 ++- .../examples/healthcare_dicom_store_bq_stream.tf.erb | 3 ++- .../healthcare/resource_healthcare_dicom_store_test.go | 7 ++++++- 4 files changed, 15 insertions(+), 3 deletions(-) diff --git a/mmv1/products/healthcare/DicomStore.yaml b/mmv1/products/healthcare/DicomStore.yaml index 98378ff2998e..488e65fcc45f 100644 --- a/mmv1/products/healthcare/DicomStore.yaml +++ b/mmv1/products/healthcare/DicomStore.yaml @@ -104,6 +104,11 @@ properties: project. service-PROJECT_NUMBER@gcp-sa-healthcare.iam.gserviceaccount.com must have publisher permissions on the given Cloud Pub/Sub topic. Not having adequate permissions will cause the calls that send notifications to fail. required: true + - !ruby/object:Api::Type::Boolean + name: sendForBulkImport + required: false + description: | + Indicates whether or not to send Pub/Sub notifications on bulk import. Only supported for DICOM imports. - !ruby/object:Api::Type::String name: 'selfLink' description: | diff --git a/mmv1/templates/terraform/examples/go/healthcare_dicom_store_bq_stream.tf.tmpl b/mmv1/templates/terraform/examples/go/healthcare_dicom_store_bq_stream.tf.tmpl index a51cb8014d2a..c376a5c41b0e 100644 --- a/mmv1/templates/terraform/examples/go/healthcare_dicom_store_bq_stream.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/healthcare_dicom_store_bq_stream.tf.tmpl @@ -5,7 +5,8 @@ resource "google_healthcare_dicom_store" "default" { dataset = google_healthcare_dataset.dataset.id notification_config { - pubsub_topic = google_pubsub_topic.topic.id + pubsub_topic = google_pubsub_topic.topic.id + send_for_bulk_import = true } labels = { diff --git a/mmv1/templates/terraform/examples/healthcare_dicom_store_bq_stream.tf.erb b/mmv1/templates/terraform/examples/healthcare_dicom_store_bq_stream.tf.erb index a6c9d7632717..2d2b3df301cc 100644 --- a/mmv1/templates/terraform/examples/healthcare_dicom_store_bq_stream.tf.erb +++ b/mmv1/templates/terraform/examples/healthcare_dicom_store_bq_stream.tf.erb @@ -5,7 +5,8 @@ resource "google_healthcare_dicom_store" "default" { dataset = google_healthcare_dataset.dataset.id notification_config { - pubsub_topic = google_pubsub_topic.topic.id + pubsub_topic = google_pubsub_topic.topic.id + send_for_bulk_import = true } labels = { diff --git a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_dicom_store_test.go b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_dicom_store_test.go index 39dd582532da..b96e9fbfaa6c 100644 --- a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_dicom_store_test.go +++ b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_dicom_store_test.go @@ -142,7 +142,8 @@ resource "google_healthcare_dicom_store" "default" { dataset = google_healthcare_dataset.dataset.id notification_config { - pubsub_topic = google_pubsub_topic.topic.id + pubsub_topic = google_pubsub_topic.topic.id + send_for_bulk_import = true } labels = { @@ -190,6 +191,10 @@ func testAccCheckGoogleHealthcareDicomStoreUpdate(t *testing.T, pubsubTopic stri if topicName != pubsubTopic { return fmt.Errorf("dicomStore 'NotificationConfig' not updated ('%s' != '%s'): %s", topicName, pubsubTopic, gcpResourceUri) } + + if !response.NotificationConfig.SendForBulkImport { + return fmt.Errorf("dicomStore 'NotificationConfig.SendForBulkImport' not changed to true: %s", gcpResourceUri) + } } if !foundResource { From 9a20da4eb1553c0d5851f4b7002ac0a49f94d701 Mon Sep 17 00:00:00 2001 From: Sam Levenick Date: Tue, 4 Jun 2024 17:19:35 -0400 Subject: [PATCH 056/356] Rework comments on deletion_protection (#10767) --- mmv1/products/privateca/CertificateAuthority.yaml | 8 +++++--- mmv1/products/spanner/Database.yaml | 8 ++++++-- .../services/bigquery/resource_bigquery_table.go.erb | 2 +- .../services/bigtable/resource_bigtable_instance.go | 2 +- .../services/container/resource_container_cluster.go.erb | 2 +- .../terraform/website/docs/r/bigquery_table.html.markdown | 6 ++++-- .../website/docs/r/bigtable_instance.html.markdown | 5 +++-- .../website/docs/r/sql_database_instance.html.markdown | 6 ++++-- 8 files changed, 25 insertions(+), 14 deletions(-) diff --git a/mmv1/products/privateca/CertificateAuthority.yaml b/mmv1/products/privateca/CertificateAuthority.yaml index e16af334a022..87dd6f902da2 100644 --- a/mmv1/products/privateca/CertificateAuthority.yaml +++ b/mmv1/products/privateca/CertificateAuthority.yaml @@ -114,7 +114,7 @@ examples: - !ruby/object:Provider::Terraform::Examples name: 'privateca_certificate_authority_custom_ski' primary_resource_id: 'default' - # Multiple IAM bindings on the same key cause non-determinism + # Multiple IAM bindings on the same key cause non-determinism skip_vcr: true vars: kms_key_name: 'projects/keys-project/locations/us-central1/keyRings/key-ring/cryptoKeys/crypto-key' @@ -136,8 +136,10 @@ virtual_fields: name: 'deletion_protection' default_value: true description: | - Whether or not to allow Terraform to destroy the CertificateAuthority. Unless this field is set to false - in Terraform state, a `terraform destroy` or `terraform apply` that would delete the instance will fail. + Whether Terraform will be prevented from destroying the CertificateAuthority. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the CertificateAuthority will fail. + When the field is set to false, deleting the CertificateAuthority is allowed. - !ruby/object:Api::Type::Enum name: 'desired_state' description: | diff --git a/mmv1/products/spanner/Database.yaml b/mmv1/products/spanner/Database.yaml index bcb1c3c21033..39014777c206 100644 --- a/mmv1/products/spanner/Database.yaml +++ b/mmv1/products/spanner/Database.yaml @@ -74,8 +74,12 @@ virtual_fields: name: 'deletion_protection' default_value: true description: | - Whether or not to allow Terraform to destroy the database. Defaults to true. Unless this field is set to false - in Terraform state, a `terraform destroy` or `terraform apply` that would delete the database will fail. + Whether Terraform will be prevented from destroying the database. Defaults to true. + When a`terraform destroy` or `terraform apply` would delete the database, + the command will fail if this field is not set to false in Terraform state. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the database will fail. + When the field is set to false, deleting the database is allowed. custom_code: !ruby/object:Provider::Terraform::CustomCode constants: 'templates/terraform/constants/spanner_database.go.erb' encoder: templates/terraform/encoders/spanner_database.go.erb diff --git a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb index 847bfaa6d224..460af934bcb6 100644 --- a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb +++ b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_table.go.erb @@ -1248,7 +1248,7 @@ func ResourceBigQueryTable() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - Description: `Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.`, + Description: `Whether Terraform will be prevented from destroying the instance. When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the table will fail. When the field is set to false, deleting the table is allowed.`, }, <% unless version == 'ga' -%> diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go index 3940082bb731..b5ca76ee8b28 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance.go @@ -182,7 +182,7 @@ func ResourceBigtableInstance() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - Description: `Whether or not to allow Terraform to destroy the instance. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the instance will fail.`, + Description: ` When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the instance will fail. When the field is set to false, deleting the instance is allowed.`, }, "labels": { diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 85808489e5d6..495a5e546d5e 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -285,7 +285,7 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - Description: `Whether or not to allow Terraform to destroy the instance. Defaults to true. Unless this field is set to false in Terraform state, a terraform destroy or terraform apply that would delete the cluster will fail.`, + Description: `When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the cluster will fail. When the field is set to false, deleting the cluster is allowed.`, }, "addons_config": { diff --git a/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown b/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown index a1aeff62a04d..48109ed6a06c 100644 --- a/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/bigquery_table.html.markdown @@ -161,8 +161,10 @@ The following arguments are supported: * `materialized_view` - (Optional) If specified, configures this table as a materialized view. Structure is [documented below](#nested_materialized_view). -* `deletion_protection` - (Optional) Whether or not to allow Terraform to destroy the instance. Unless this field is set to false -in Terraform state, a `terraform destroy` or `terraform apply` that would delete the instance will fail. +* `deletion_protection` - (Optional) Whether Terraform will be prevented from destroying the table. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the table will fail. + When the field is set to false, deleting the table is allowed.. * `table_constraints` - (Optional) Defines the primary key and foreign keys. Structure is [documented below](#nested_table_constraints). diff --git a/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown index af3945f5ac2c..c9b952fdefb7 100644 --- a/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/bigtable_instance.html.markdown @@ -99,8 +99,9 @@ to default to the backend value. See [structure below](#nested_cluster). * `force_destroy` - (Optional) Deleting a BigTable instance can be blocked if any backups are present in the instance. When `force_destroy` is set to true, Terraform will delete all backups found in the BigTable instance before attempting to delete the instance itself. Defaults to false. -* `deletion_protection` - (Optional) Whether or not to allow Terraform to destroy the instance. Unless this field is set to false -in Terraform state, a `terraform destroy` or `terraform apply` that would delete the instance will fail. Defaults to true. +* `deletion_protection` - (Optional) Whether Terraform will be prevented from destroying the instance. + When the field is set to true or unset in Terraform state, a `terraform apply` or `terraform destroy` that would delete + the instance will fail. When the field is set to false, deleting the instance is allowed. * `labels` - (Optional) A set of key/value label pairs to assign to the resource. Label keys must follow the requirements at https://cloud.google.com/resource-manager/docs/creating-managing-labels#requirements. diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index caea50a3601b..806bcd2bdc02 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -248,8 +248,10 @@ includes an up-to-date reference of supported versions. That service account needs the `Cloud KMS > Cloud KMS CryptoKey Encrypter/Decrypter` role on your key - please see [this step](https://cloud.google.com/sql/docs/mysql/configure-cmek#grantkey). -* `deletion_protection` - (Optional) Whether or not to allow Terraform to destroy the instance. Unless this field is set to false -in Terraform state, a `terraform destroy` or `terraform apply` command that deletes the instance will fail. Defaults to `true`. +* `deletion_protection` - (Optional) Whether Terraform will be prevented from destroying the instance. + When the field is set to true or unset in Terraform state, a `terraform apply` + or `terraform destroy` that would delete the instance will fail. + When the field is set to false, deleting the instance is allowed. ~> **NOTE:** This flag only protects instances from deletion within Terraform. To protect your instances from accidental deletion across all surfaces (API, gcloud, Cloud Console and Terraform), use the API flag `settings.deletion_protection_enabled`. From 496ae0d4ed8789804048199edad90d5d6e85ae47 Mon Sep 17 00:00:00 2001 From: rahul2393 Date: Wed, 5 Jun 2024 03:54:28 +0530 Subject: [PATCH 057/356] feat(spanner): Add InstanceConfig resource in Spanner product (#10341) Co-authored-by: Stephen Lewis (Burrows) --- mmv1/products/spanner/InstanceConfig.yaml | 102 ++++++++++++++- .../constants/spanner_instance_config.go.erb | 57 ++++++++ .../decoders/spanner_instance_config.go.erb | 28 ++++ .../encoders/spanner_instance_config.go.erb | 19 +++ .../go/spanner_instance_config_basic.tf.tmpl | 13 ++ .../spanner_instance_config_basic.tf.erb | 13 ++ .../spanner_instance_config_update.go.erb | 8 ++ ...esource_spanner_instance_config_sweeper.go | 122 ++++++++++++++++++ .../resource_spanner_instance_config_test.go | 65 ++++++++++ 9 files changed, 420 insertions(+), 7 deletions(-) create mode 100644 mmv1/templates/terraform/constants/spanner_instance_config.go.erb create mode 100644 mmv1/templates/terraform/decoders/spanner_instance_config.go.erb create mode 100644 mmv1/templates/terraform/encoders/spanner_instance_config.go.erb create mode 100644 mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/spanner_instance_config_basic.tf.erb create mode 100644 mmv1/templates/terraform/update_encoder/spanner_instance_config_update.go.erb create mode 100644 mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_sweeper.go create mode 100644 mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_test.go diff --git a/mmv1/products/spanner/InstanceConfig.yaml b/mmv1/products/spanner/InstanceConfig.yaml index 60b5b2749c04..b7e211a3f413 100644 --- a/mmv1/products/spanner/InstanceConfig.yaml +++ b/mmv1/products/spanner/InstanceConfig.yaml @@ -11,23 +11,111 @@ # See the License for the specific language governing permissions and # limitations under the License. + --- !ruby/object:Api::Resource -name: 'InstanceConfig' -base_url: 'projects/{{project}}/instanceConfigs' +name: InstanceConfig +base_url: projects/{{project}}/instanceConfigs +update_mask: true +update_verb: :PATCH description: | A possible configuration for a Cloud Spanner instance. Configurations define the geographic placement of nodes and their replication. -readonly: true -# Used as a resource ref -exclude: true +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + Official Documentation: https://cloud.google.com/spanner/ + api: https://cloud.google.com/spanner/docs/reference/rest/v1/projects.instanceConfigs +async: !ruby/object:Api::OpAsync + actions: [create, update] + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' + result: !ruby/object:Api::OpAsync::Result + resource_inside_response: true +autogen_async: true +id_format: '{{project}}/{{name}}' +import_format: + - projects/{{project}}/instanceConfigs/{{name}} + - '{{project}}/{{name}}' + - '{{name}}' +examples: + - !ruby/object:Provider::Terraform::Examples + name: spanner_instance_config_basic + primary_resource_id: example + vars: + instance_config_name: '"custom-nam11-config"' + test_vars_overrides: + instance_config_name: '"custom-tf-test-nam11-config"' +# Sweeper skipped as this resource has customized deletion. +skip_sweeper: true +exclude_tgc: true +custom_code: !ruby/object:Provider::Terraform::CustomCode + encoder: templates/terraform/encoders/spanner_instance_config.go.erb + update_encoder: templates/terraform/update_encoder/spanner_instance_config_update.go.erb + constants: 'templates/terraform/constants/spanner_instance_config.go.erb' + decoder: templates/terraform/decoders/spanner_instance_config.go.erb properties: - !ruby/object:Api::Type::String - name: 'name' + name: name description: | A unique identifier for the instance configuration. Values are of the form projects//instanceConfigs/[a-z][-a-z0-9]* + immutable: true + required: true + default_from_api: true - !ruby/object:Api::Type::String - name: 'displayName' + name: displayName description: | The name of this instance configuration as it appears in UIs. + required: true + - !ruby/object:Api::Type::String + name: baseConfig + description: | + Base configuration name, e.g. nam3, based on which this configuration is created. + Only set for user managed configurations. + baseConfig must refer to a configuration of type GOOGLE_MANAGED in the same project as this configuration. + immutable: true + default_from_api: true + custom_expand: templates/terraform/custom_expand/spanner_instance_config.go.erb + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' + - !ruby/object:Api::Type::String + name: configType + description: | + Output only. Whether this instance config is a Google or User Managed Configuration. output: true + - !ruby/object:Api::Type::Array + name: replicas + description: | + The geographic placement of nodes in this instance configuration and their replication properties. + immutable: true + required: true + is_set: true + set_hash_func: replicasHash + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: location + description: | + The location of the serving resources, e.g. "us-central1". + immutable: true + - !ruby/object:Api::Type::Enum + name: type + description: | + Indicates the type of replica. See the [replica types + documentation](https://cloud.google.com/spanner/docs/replication#replica_types) + for more details. + immutable: true + values: + - :READ_WRITE + - :READ_ONLY + - :WITNESS + - !ruby/object:Api::Type::Boolean + name: defaultLeaderLocation + default_value: false + immutable: true + description: |- + If true, this location is designated as the default leader location where + leader replicas are placed. + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: | + An object containing a list of "key": value pairs. + Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. diff --git a/mmv1/templates/terraform/constants/spanner_instance_config.go.erb b/mmv1/templates/terraform/constants/spanner_instance_config.go.erb new file mode 100644 index 000000000000..553c0c23bd7c --- /dev/null +++ b/mmv1/templates/terraform/constants/spanner_instance_config.go.erb @@ -0,0 +1,57 @@ +<%# The license inside this block applies to this file + # Copyright 2021 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-%> +func replicasHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["location"].(string)))) // ToLower just in case + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["type"].(string)))) + var isLeader interface{} + if m["defaultLeaderLocation"] != nil { + isLeader = m["defaultLeaderLocation"] + } else { + isLeader = false + } + buf.WriteString(fmt.Sprintf("%v-", isLeader.(bool))) + return tpgresource.Hashcode(buf.String()) +} + +func getBaseInstanceConfigReplicas(d *schema.ResourceData, config *transport_tpg.Config, baseConfigProp interface{}, billingProject, userAgent string) ([]interface{}, error) { + url, err := tpgresource.ReplaceVars(d, config, "{{SpannerBasePath}}") + if err != nil { + return nil, err + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: fmt.Sprintf("%s%s", url, baseConfigProp.(string)), + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return nil, fmt.Errorf("Error fetching base InstanceConfig: %s", err) + } + + data, ok := res["replicas"] + if !ok || data == nil { + log.Print("[DEBUG] No replicas in the base InstanceConfig.") + return nil, nil + } + + return data.([]interface{}), nil +} diff --git a/mmv1/templates/terraform/decoders/spanner_instance_config.go.erb b/mmv1/templates/terraform/decoders/spanner_instance_config.go.erb new file mode 100644 index 000000000000..6324f8f3da38 --- /dev/null +++ b/mmv1/templates/terraform/decoders/spanner_instance_config.go.erb @@ -0,0 +1,28 @@ +config := meta.(*transport_tpg.Config) +d.SetId(res["name"].(string)) +if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/instanceConfigs/(?P[^/]+)"}, d, config); err != nil { + return nil, err +} +res["project"] = d.Get("project").(string) +res["name"] = d.Get("name").(string) +id, err := tpgresource.ReplaceVars(d, config, "{{project}}/{{name}}") +if err != nil { +return nil, err +} +baseReplicas, err := getBaseInstanceConfigReplicas(d, config, res["baseConfig"], res["project"].(string), config.UserAgent) +if err != nil { + return nil, err +} +customReplica := make(map[int]interface{}) +for _, b := range baseReplicas { + customReplica[replicasHash(b)] = b +} +var cR []interface{} +for _, r := range res["replicas"].([]interface{}) { + if _, ok := customReplica[replicasHash(r)]; !ok { + cR = append(cR, r) + } +} +res["replicas"] = cR +d.SetId(id) +return res, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/spanner_instance_config.go.erb b/mmv1/templates/terraform/encoders/spanner_instance_config.go.erb new file mode 100644 index 000000000000..b12071a83112 --- /dev/null +++ b/mmv1/templates/terraform/encoders/spanner_instance_config.go.erb @@ -0,0 +1,19 @@ +config := meta.(*transport_tpg.Config) +project, err := tpgresource.GetProject(d, config) +if err != nil { + return nil, err +} +newObj := make(map[string]interface{}) +if obj["name"] == nil { + return nil, fmt.Errorf("Error setting instance config name") +} +newObj["instanceConfigId"] = obj["name"] +obj["name"] = fmt.Sprintf("projects/%s/instanceConfigs/%s", project, obj["name"]) +baseReplicas, err := getBaseInstanceConfigReplicas(d, config, obj["baseConfig"], project, meta.(*transport_tpg.Config).UserAgent) +if err != nil { + return nil, err +} +r := obj["replicas"].([]interface{}) +obj["replicas"] = append(r, baseReplicas...) +newObj["instanceConfig"] = obj +return newObj, nil diff --git a/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl new file mode 100644 index 000000000000..c4c8e2723258 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl @@ -0,0 +1,13 @@ +resource "google_spanner_instance_config" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['instance_config_name'] %>" + display_name = "Test Spanner Instance Config" + base_config = "nam11" + replicas { + location = "us-west1" + type = "READ_ONLY" + default_leader_location = false + } + labels = { + "foo" = "bar" + } +} diff --git a/mmv1/templates/terraform/examples/spanner_instance_config_basic.tf.erb b/mmv1/templates/terraform/examples/spanner_instance_config_basic.tf.erb new file mode 100644 index 000000000000..c4c8e2723258 --- /dev/null +++ b/mmv1/templates/terraform/examples/spanner_instance_config_basic.tf.erb @@ -0,0 +1,13 @@ +resource "google_spanner_instance_config" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['instance_config_name'] %>" + display_name = "Test Spanner Instance Config" + base_config = "nam11" + replicas { + location = "us-west1" + type = "READ_ONLY" + default_leader_location = false + } + labels = { + "foo" = "bar" + } +} diff --git a/mmv1/templates/terraform/update_encoder/spanner_instance_config_update.go.erb b/mmv1/templates/terraform/update_encoder/spanner_instance_config_update.go.erb new file mode 100644 index 000000000000..6805f5d6b35d --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/spanner_instance_config_update.go.erb @@ -0,0 +1,8 @@ +project, err := tpgresource.GetProject(d, meta.(*transport_tpg.Config)) +if err != nil { +return nil, err +} +obj["name"] = fmt.Sprintf("projects/%s/instanceConfigs/%s", project, obj["name"]) +newObj := make(map[string]interface{}) +newObj["instanceConfig"] = obj +return newObj, nil diff --git a/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_sweeper.go b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_sweeper.go new file mode 100644 index 000000000000..955e41a637c5 --- /dev/null +++ b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_sweeper.go @@ -0,0 +1,122 @@ +package spanner + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("SpannerInstanceConfig", testSweepSpannerInstanceConfig) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepSpannerInstanceConfig(region string) error { + resourceName := "SpannerInstanceConfig" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://spanner.googleapis.com/v1/projects/{{project}}/instanceConfigs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instanceConfigs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(strings.ReplaceAll(name, "custom-", "")) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://spanner.googleapis.com/v1/projects/{{project}}/instanceConfigs/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_test.go b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_test.go new file mode 100644 index 000000000000..b0a39ff656dd --- /dev/null +++ b/mmv1/third_party/terraform/services/spanner/resource_spanner_instance_config_test.go @@ -0,0 +1,65 @@ +package spanner_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +// Acceptance Tests + +func TestAccSpannerInstanceConfig_update(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("custom-tf-test-config-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerInstanceConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerInstanceConfig_update(name, "display name", false), + }, + { + ResourceName: "google_spanner_instance_config.updater", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccSpannerInstanceConfig_update(name, "display name updated", true), + }, + { + ResourceName: "google_spanner_instance_config.updater", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccSpannerInstanceConfig_update(name, displayName string, addLabel bool) string { + extraLabel := "" + if addLabel { + extraLabel = "\"key2\" = \"value2\"" + } + return fmt.Sprintf(` +resource "google_spanner_instance_config" "updater" { + name = "%s" + display_name = "%s-dname" + base_config = "nam11" + replicas { + location = "us-west1" + type = "READ_ONLY" + } + labels = { + "key1" = "value1" + %s + } +} +`, name, displayName, extraLabel) +} From 042ce9a6893952685b18d666959ab533d0763420 Mon Sep 17 00:00:00 2001 From: aston-github <39973638+aston-github@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:29:14 -0700 Subject: [PATCH 058/356] feat(google_container_node_pool): Handle nil mode in secondary boot disk. (#10854) --- .../services/container/node_config.go.erb | 24 ++++++++++++++---- .../resource_container_node_pool_test.go.erb | 25 ++++++++++++++++++- 2 files changed, 43 insertions(+), 6 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index 546661a54e3e..8dc9247c46ac 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -896,11 +896,25 @@ func expandNodeConfig(v interface{}) *container.NodeConfig { } if v, ok := nodeConfig["secondary_boot_disks"]; ok && len(v.([]interface{})) > 0 { - conf := v.([]interface{})[0].(map[string]interface{}) - nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ - DiskImage: conf["disk_image"].(string), - Mode: conf["mode"].(string), - }) + conf, confOK := v.([]interface{})[0].(map[string]interface{}) + if confOK { + modeValue, modeOK := conf["mode"] + diskImage := conf["disk_image"].(string) + if modeOK { + nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ + DiskImage: diskImage, + Mode: modeValue.(string), + }) + } else { + nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ + DiskImage: diskImage, + }) + } + } else { + nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ + DiskImage: "", + }) + } } if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 { diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 612f66e0d713..735d39b34e4e 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -1665,6 +1665,11 @@ func TestAccContainerNodePool_secondaryBootDisks(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + resource.TestStep{ + ResourceName: "google_container_node_pool.np-no-mode", + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -1699,7 +1704,25 @@ resource "google_container_node_pool" "np" { } } } -`, cluster, networkName, subnetworkName, np) + +resource "google_container_node_pool" "np-no-mode" { + name = "%s-no-mode" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + gcfs_config { + enabled = true + } + secondary_boot_disks { + disk_image = "" + } + } +} +`, cluster, networkName, subnetworkName, np, np) } func TestAccContainerNodePool_gcfsConfig(t *testing.T) { From 02cf34c5dd30da27f4482b65a616c9eac823ec18 Mon Sep 17 00:00:00 2001 From: patrickmoy <53500820+patrickmoy@users.noreply.github.com> Date: Tue, 4 Jun 2024 15:31:26 -0700 Subject: [PATCH 059/356] Add secrets_discovery_target field in google_data_loss_prevention_discovery_config, as well as fields to support single-resource mode for big_query_target and cloud_sql_target (#10798) --- mmv1/products/dlp/DiscoveryConfig.yaml | 46 +++ ...a_loss_prevention_discovery_config_test.go | 272 ++++++++++++++++++ 2 files changed, 318 insertions(+) diff --git a/mmv1/products/dlp/DiscoveryConfig.yaml b/mmv1/products/dlp/DiscoveryConfig.yaml index 137a33fdba9e..bea342a48494 100644 --- a/mmv1/products/dlp/DiscoveryConfig.yaml +++ b/mmv1/products/dlp/DiscoveryConfig.yaml @@ -32,17 +32,20 @@ id_format: '{{parent}}/discoveryConfigs/{{name}}' examples: - !ruby/object:Provider::Terraform::Examples name: 'dlp_discovery_config_basic' + skip_test: true primary_resource_id: 'basic' test_env_vars: project: :PROJECT_NAME location: :REGION - !ruby/object:Provider::Terraform::Examples name: 'dlp_discovery_config_actions' + skip_test: true primary_resource_id: 'actions' test_env_vars: project: :PROJECT_NAME - !ruby/object:Provider::Terraform::Examples name: 'dlp_discovery_config_org_running' + skip_test: true primary_resource_id: 'org_running' test_env_vars: project: :PROJECT_NAME @@ -55,16 +58,19 @@ examples: organization: :ORG_ID - !ruby/object:Provider::Terraform::Examples name: 'dlp_discovery_config_conditions_cadence' + skip_test: true primary_resource_id: 'conditions_cadence' test_env_vars: project: :PROJECT_NAME - !ruby/object:Provider::Terraform::Examples name: 'dlp_discovery_config_filter_regexes_and_conditions' + skip_test: true primary_resource_id: 'filter_regexes_and_conditions' test_env_vars: project: :PROJECT_NAME - !ruby/object:Provider::Terraform::Examples name: 'dlp_discovery_config_cloud_sql' + skip_test: true primary_resource_id: 'cloud_sql' test_env_vars: project: :PROJECT_NAME @@ -237,6 +243,18 @@ properties: # The fields below are necessary to include the "otherTables" filter in the payload send_empty_value: true allow_empty_object: true + - !ruby/object:Api::Type::NestedObject + name: tableReference + description: The table to scan. Discovery configurations including this can only include one DiscoveryTarget (the DiscoveryTarget with this TableReference). + properties: + - !ruby/object:Api::Type::String + name: datasetId + description: Dataset ID of the table. + required: true + - !ruby/object:Api::Type::String + name: tableId + description: Name of the table. + required: true - !ruby/object:Api::Type::NestedObject name: conditions description: In addition to matching the filter, these conditions must be true before a profile is generated @@ -371,6 +389,26 @@ properties: [] # Meant to be an empty object with no properties. The fields below are necessary to include the "others" filter in the payload send_empty_value: true allow_empty_object: true + - !ruby/object:Api::Type::NestedObject + name: databaseResourceReference + description: The database resource to scan. Targets including this can only include one target (the target with this database resource reference). + properties: + - !ruby/object:Api::Type::String + name: projectId + description: Required. If within a project-level config, then this must match the config's project ID. + required: true + - !ruby/object:Api::Type::String + name: instance + description: 'Required. The instance where this resource is located. For example: Cloud SQL instance ID.' + required: true + - !ruby/object:Api::Type::String + name: database + description: Required. Name of a database within the instance. + required: true + - !ruby/object:Api::Type::String + name: databaseResource + description: Required. Name of a database resource, for example, a table within the database. + required: true - !ruby/object:Api::Type::NestedObject name: conditions description: 'In addition to matching the filter, these conditions must be true before a profile is generated.' @@ -439,6 +477,14 @@ properties: # The fields below are necessary to include the "disabled" filter in the payload send_empty_value: true allow_empty_object: true + - !ruby/object:Api::Type::NestedObject + name: secretsTarget + description: Discovery target that looks for credentials and secrets stored in cloud resource metadata and reports them as vulnerabilities to Security Command Center. Only one target of this type is allowed. + properties: + [] # Meant to be an empty object with no properties - see here : https://cloud.google.com/sensitive-data-protection/docs/reference/rest/v2/organizations.locations.discoveryConfigs#DiscoveryConfig.SecretsDiscoveryTarget + # The fields below are necessary to include the "secretsDiscoveryTarget" target in the payload + send_empty_value: true + allow_empty_object: true - !ruby/object:Api::Type::Array name: 'errors' description: Output only. A stream of errors encountered when the config was activated. Repeated errors may result in the config automatically being paused. Output only field. Will return the last 100 errors. Whenever the config is modified this list will be cleared. diff --git a/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go b/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go index 58c38d206033..dfd6d137ff81 100644 --- a/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go +++ b/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go @@ -16,6 +16,9 @@ func TestAccDataLossPreventionDiscoveryConfig_Update(t *testing.T) { "conditions": testAccDataLossPreventionDiscoveryConfig_ConditionsCadenceUpdate, "filter": testAccDataLossPreventionDiscoveryConfig_FilterUpdate, "cloud_sql": testAccDataLossPreventionDiscoveryConfig_CloudSqlUpdate, + "bq_single": testAccDataLossPreventionDiscoveryConfig_BqSingleTable, + "sql_single": testAccDataLossPreventionDiscoveryConfig_SqlSingleTable, + "secrets": testAccDataLossPreventionDiscoveryConfig_SecretsUpdate, } for name, tc := range testCases { // shadow the tc variable into scope so that when @@ -249,6 +252,111 @@ func testAccDataLossPreventionDiscoveryConfig_CloudSqlUpdate(t *testing.T) { }) } +func testAccDataLossPreventionDiscoveryConfig_BqSingleTable(t *testing.T) { + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataLossPreventionDiscoveryConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigStart(context), + }, + { + ResourceName: "google_data_loss_prevention_discovery_config.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + }, + { + Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigBqSingleUpdate(context), + }, + { + ResourceName: "google_data_loss_prevention_discovery_config.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + }, + }, + }) +} + +func testAccDataLossPreventionDiscoveryConfig_SqlSingleTable(t *testing.T) { + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataLossPreventionDiscoveryConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigStartCloudSql(context), + }, + { + ResourceName: "google_data_loss_prevention_discovery_config.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + }, + { + Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigCloudSqlSingleUpdate(context), + }, + { + ResourceName: "google_data_loss_prevention_discovery_config.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + }, + }, + }) +} + +func testAccDataLossPreventionDiscoveryConfig_SecretsUpdate(t *testing.T) { + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "location": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataLossPreventionDiscoveryConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigSecretsStart(context), + }, + { + ResourceName: "google_data_loss_prevention_discovery_config.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + }, + { + Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigSecretsUpdate(context), + }, + { + ResourceName: "google_data_loss_prevention_discovery_config.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + }, + }, + }) +} + func testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigStart(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_data_loss_prevention_inspect_template" "basic" { @@ -738,3 +846,167 @@ resource "google_data_loss_prevention_discovery_config" "basic" { } `, context) } + +func testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigBqSingleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_data_loss_prevention_inspect_template" "basic" { + parent = "projects/%{project}" + description = "Description" + display_name = "Display" + + inspect_config { + info_types { + name = "EMAIL_ADDRESS" + } + } +} +resource "google_bigquery_dataset" "default" { + dataset_id = "tf_test_%{random_suffix}" + friendly_name = "terraform-test" + description = "Description for the dataset created by terraform" + location = "US" + default_table_expiration_ms = 3600000 + + labels = { + env = "default" + } +} +resource "google_bigquery_table" "default" { + dataset_id = google_bigquery_dataset.default.dataset_id + table_id = "tf_test_%{random_suffix}" + deletion_protection = false + + labels = { + env = "default" + } + + schema = < Date: Tue, 4 Jun 2024 15:35:29 -0700 Subject: [PATCH 060/356] Add flexibleRuntimeSettings to app engine flexible (#10795) --- mmv1/products/appengine/FlexibleAppVersion.yaml | 12 ++++++++++++ .../resource_app_engine_flexible_app_version_test.go | 12 ++++++++++++ .../test-fixtures/hello-world-flask/requirements.txt | 5 ++++- 3 files changed, 28 insertions(+), 1 deletion(-) diff --git a/mmv1/products/appengine/FlexibleAppVersion.yaml b/mmv1/products/appengine/FlexibleAppVersion.yaml index 5c0b94832124..ae6831824759 100644 --- a/mmv1/products/appengine/FlexibleAppVersion.yaml +++ b/mmv1/products/appengine/FlexibleAppVersion.yaml @@ -236,6 +236,18 @@ properties: name: 'runtimeChannel' description: | The channel of the runtime to use. Only available for some runtimes. + - !ruby/object:Api::Type::NestedObject + name: 'flexibleRuntimeSettings' + description: Runtime settings for App Engine flexible environment. + properties: + - !ruby/object:Api::Type::String + name: 'operatingSystem' + description: | + Operating System of the application runtime. + - !ruby/object:Api::Type::String + name: 'runtimeVersion' + description: | + The runtime version of an App Engine flexible application. - !ruby/object:Api::Type::KeyValuePairs name: 'betaSettings' description: | diff --git a/mmv1/third_party/terraform/services/appengine/resource_app_engine_flexible_app_version_test.go b/mmv1/third_party/terraform/services/appengine/resource_app_engine_flexible_app_version_test.go index ed49962c399f..c851367ba49a 100644 --- a/mmv1/third_party/terraform/services/appengine/resource_app_engine_flexible_app_version_test.go +++ b/mmv1/third_party/terraform/services/appengine/resource_app_engine_flexible_app_version_test.go @@ -65,6 +65,7 @@ resource "google_project_service" "appengineflex" { service = "appengineflex.googleapis.com" disable_dependent_services = false + depends_on = [google_project_service.compute] } resource "google_compute_network" "network" { @@ -140,6 +141,11 @@ resource "google_app_engine_flexible_app_version" "foo" { shell = "gunicorn -b :$PORT main:app" } + flexible_runtime_settings { + operating_system = "ubuntu22" + runtime_version = "3.11" + } + deployment { files { name = "main.py" @@ -232,6 +238,7 @@ resource "google_project_service" "appengineflex" { service = "appengineflex.googleapis.com" disable_dependent_services = false + depends_on = [google_project_service.compute] } resource "google_compute_network" "network" { @@ -307,6 +314,11 @@ resource "google_app_engine_flexible_app_version" "foo" { shell = "gunicorn -b :$PORT main:app" } + flexible_runtime_settings { + operating_system = "ubuntu22" + runtime_version = "3.11" + } + deployment { files { name = "main.py" diff --git a/mmv1/third_party/terraform/services/appengine/test-fixtures/hello-world-flask/requirements.txt b/mmv1/third_party/terraform/services/appengine/test-fixtures/hello-world-flask/requirements.txt index f358d0ad8e44..7398bca96cd4 100644 --- a/mmv1/third_party/terraform/services/appengine/test-fixtures/hello-world-flask/requirements.txt +++ b/mmv1/third_party/terraform/services/appengine/test-fixtures/hello-world-flask/requirements.txt @@ -1,2 +1,5 @@ -Flask==1.1.1 +Flask==3.0.3; python_version > '3.6' +Flask==2.0.1; python_version < '3.7' +Werkzeug==3.0.3; python_version > '3.6' +Werkzeug==2.0.3; python_version < '3.7' gunicorn==20.0.4 \ No newline at end of file From 8f5b22e97822549c544901c6ae3e12de14eaa0d0 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 4 Jun 2024 15:45:27 -0700 Subject: [PATCH 061/356] Convert operation file template with Go (#10870) --- mmv1/api/resource.go | 4 + mmv1/provider/template_data.go | 9 ++ mmv1/provider/terraform.go | 33 +++--- mmv1/templates/terraform/operation.go.tmpl | 124 +++++++++++++++++++++ 4 files changed, 151 insertions(+), 19 deletions(-) create mode 100644 mmv1/templates/terraform/operation.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 512a4bb5439c..09a4de18142a 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -834,6 +834,10 @@ func (r Resource) HasProject() bool { return strings.Contains(r.BaseUrl, "{{project}}") || strings.Contains(r.CreateUrl, "{{project}}") } +func (r Resource) IncludeProjectForOperation() bool { + return strings.Contains(r.BaseUrl, "{{project}}") || (r.GetAsync().IsA("OpAsync") && r.GetAsync().IncludeProject) +} + // def region? func (r Resource) HasRegion() bool { return strings.Contains(r.BaseUrl, "{{region}}") || strings.Contains(r.CreateUrl, "{{region}}") diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 55a9a1928c74..8096d3450e1e 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -73,6 +73,7 @@ func subtract(a, b int) int { var TemplateFunctions = template.FuncMap{ "title": google.SpaceSeparatedTitle, "replace": strings.Replace, + "replaceAll": strings.ReplaceAll, "camelize": google.Camelize, "underscore": google.Underscore, "plural": google.Plural, @@ -124,6 +125,14 @@ func (td *TemplateData) GenerateResourceFile(filePath string, resource api.Resou td.GenerateFile(filePath, templatePath, resource, true, templates...) } +func (td *TemplateData) GenerateOperationFile(filePath string, resource api.Resource) { + templatePath := "templates/terraform/operation.go.tmpl" + templates := []string{ + templatePath, + } + td.GenerateFile(filePath, templatePath, resource, true, templates...) +} + func (td *TemplateData) GenerateDocumentationFile(filePath string, resource api.Resource) { templatePath := "templates/terraform/resource.html.markdown.tmpl" templates := []string{ diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 358b4ad9e33a..fb20d03e2d7b 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -97,7 +97,6 @@ func (t *Terraform) GenerateObjects(outputFolder string, generateCode, generateD } func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPath string, generateCode, generateDocs bool) { - templateData := NewTemplateData(outputFolder, t.Version) if !object.ExcludeResource { @@ -178,25 +177,21 @@ func (t *Terraform) GenerateResourceSweeper(object api.Resource, templateData Te } func (t *Terraform) GenerateOperation(outputFolder string) { + asyncObjects := google.Select(t.Product.Objects, func(o *api.Resource) bool { + return o.AutogenAsync + }) + + if len(asyncObjects) == 0 { + return + } - // TODO Q2 - // def generate_operation(pwd, output_folder, _types) - // return if @api.objects.select(&:autogen_async).empty? - // - // product_name = @api.api_name - // product_name_underscore = @api.name.underscore - // data = build_object_data(pwd, @api.objects.first, output_folder, @target_version_name) - // - // data.object = @api.objects.select(&:autogen_async).first - // - // data.async = data.object.async - // target_folder = File.join(folder_name(data.version), 'services', product_name) - // FileUtils.mkpath target_folder - // data.generate(pwd, - // 'templates/terraform/operation.go.erb', - // "#{target_folder}/#{product_name_underscore}_operation.go", - // self) - // end + targetFolder := path.Join(outputFolder, t.FolderName(), "services", t.Product.ApiName) + if err := os.MkdirAll(targetFolder, os.ModePerm); err != nil { + log.Println(fmt.Errorf("error creating parent directory %v: %v", targetFolder, err)) + } + targetFilePath := path.Join(targetFolder, fmt.Sprintf("%s_operation.go", google.Underscore(t.Product.Name))) + templateData := NewTemplateData(outputFolder, t.Version) + templateData.GenerateOperationFile(targetFilePath, *asyncObjects[0]) } // Generate the IAM policy for this object. This is used to query and test diff --git a/mmv1/templates/terraform/operation.go.tmpl b/mmv1/templates/terraform/operation.go.tmpl new file mode 100644 index 000000000000..966c36f6762a --- /dev/null +++ b/mmv1/templates/terraform/operation.go.tmpl @@ -0,0 +1,124 @@ +{{/* TODO: if hc_downstream */ -}} +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package {{ lower $.ProductMetadata.Name }} + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "{{ $.ImportPath }}/tpgresource" + transport_tpg "{{ $.ImportPath }}/transport" +) + +type {{ $.ProductMetadata.Name }}OperationWaiter struct { + Config *transport_tpg.Config + UserAgent string +{{- if $.IncludeProjectForOperation }} + Project string +{{- end }} +{{- if $.ProductMetadata.OperationRetry }} + retryCount int +{{- end }} + tpgresource.CommonOperationWaiter +} + +func (w *{{ $.ProductMetadata.Name }}OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + {{- if $.GetAsync.Operation.FullUrl }} + url := fmt.Sprintf("{{ replaceAll $.GetAsync.Operation.FullUrl "{{op_id}}" "%s" }}", w.CommonOperationWaiter.Op.Name) + {{- else }} + url := fmt.Sprintf("%s{{ replaceAll $.GetAsync.Operation.BaseUrl "{{op_id}}" "%s" }}", w.Config.{{ $.ProductMetadata.Name }}BasePath, w.CommonOperationWaiter.Op.Name) + {{- end }} + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + {{- if $.IncludeProjectForOperation }} + Project: w.Project, + {{- end }} + RawURL: url, + UserAgent: w.UserAgent, + {{- if $.ErrorRetryPredicates }} + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorRetryPredicates "," -}} }, + {{- end }} + {{- if $.ErrorAbortPredicates }} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorAbortPredicates "," -}} }, + {{- end }} + }) +} + +{{- if $.ProductMetadata.OperationRetry }} +func (w *{{ $.ProductMetadata.Name }}OperationWaiter) IsRetryable(err error) bool { + {{- $.CustomTemplate $.ProductMetadata.OperationRetry false }} +} +{{- end }} + + +func create{{ $.ProductMetadata.Name }}Waiter(config *transport_tpg.Config, op map[string]interface{}, {{- if $.IncludeProjectForOperation }} project, {{- end }} activity, userAgent string) (*{{ $.ProductMetadata.Name }}OperationWaiter, error) { + w := &{{ $.ProductMetadata.Name }}OperationWaiter{ + Config: config, + UserAgent: userAgent, +{{- if $.IncludeProjectForOperation }} + Project: project, +{{- end }} + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +{{/* Not all APIs will need a WithResponse operation, but it's hard to check whether + they will or not since it involves iterating over all resources. + Might as well just nolint it so we can pass the linter checks. +*/}} + +// nolint: deadcode,unused {{/* TODO: remove the comment */}} +func {{ camelize $.ProductMetadata.Name "upper" }}OperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{},{{- if $.IncludeProjectForOperation }} project,{{- end }} activity, userAgent string, timeout time.Duration) error { + w, err := create{{ $.ProductMetadata.Name }}Waiter(config, op, {{- if $.IncludeProjectForOperation }} project, {{ end }} activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + rawResponse := []byte(w.CommonOperationWaiter.Op.Response) + if len(rawResponse) == 0 { + return errors.New("`resource` not set in operation response") + } + return json.Unmarshal(rawResponse, response) +} + +func {{ camelize $.ProductMetadata.Name "upper" }}OperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, {{- if $.IncludeProjectForOperation }} project,{{- end }} activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := create{{ $.ProductMetadata.Name }}Waiter(config, op, {{- if $.IncludeProjectForOperation }} project, {{ end }} activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} From 8d4af997ff99e9e9c8cdd25909e3068c9fcdb492 Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Tue, 4 Jun 2024 22:59:53 +0000 Subject: [PATCH 062/356] Add support for advanced_machine_features in `google_compute_instance` to TGC (#10848) --- mmv1/third_party/tgc/compute_instance.go.erb | 1 + .../example_compute_instance_iam_binding.json | 134 +++---- .../example_compute_instance_iam_member.json | 133 +++---- .../example_compute_instance_iam_policy.json | 133 +++---- .../tgc/tests/data/full_compute_instance.json | 333 +++++++++--------- mmv1/third_party/tgc/tests/data/instance.json | 142 ++++---- mmv1/third_party/tgc/tests/data/instance.tf | 11 +- 7 files changed, 449 insertions(+), 438 deletions(-) diff --git a/mmv1/third_party/tgc/compute_instance.go.erb b/mmv1/third_party/tgc/compute_instance.go.erb index 1bcd136acc8a..b82ee4841032 100644 --- a/mmv1/third_party/tgc/compute_instance.go.erb +++ b/mmv1/third_party/tgc/compute_instance.go.erb @@ -170,6 +170,7 @@ func expandComputeInstance(project string, d tpgresource.TerraformResourceData, ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, ShieldedInstanceConfig: expandShieldedVmConfigs(d), DisplayDevice: expandDisplayDevice(d), + AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), }, nil } diff --git a/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_binding.json b/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_binding.json index 9d20c2b308dc..f61b38050481 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_binding.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_binding.json @@ -1,76 +1,76 @@ [ - { - "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test", - "asset_type": "compute.googleapis.com/Instance", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", - "discovery_name": "Instance", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "canIpForward": false, - "deletionProtection": false, - "disks": [ - { - "autoDelete": true, - "boot": true, - "initializeParams": { - "sourceImage": "projects/debian-cloud/global/images/debian-11" - }, - "mode": "READ_WRITE" - }, - { - "autoDelete": true, - "initializeParams": { - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test", + "asset_type": "compute.googleapis.com/Instance", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Instance", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "canIpForward": false, + "deletionProtection": false, + "disks": [ + { + "autoDelete": true, + "boot": true, + "initializeParams": { + "sourceImage": "projects/debian-cloud/global/images/debian-11" + }, + "mode": "READ_WRITE" }, - "interface": "SCSI", - "type": "SCRATCH" - } - ], - "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", - "metadata": { - "items": [ { - "key": "foo", - "value": "bar" + "autoDelete": true, + "initializeParams": { + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + }, + "interface": "SCSI", + "type": "SCRATCH" } - ] - }, - "name": "test", - "networkInterfaces": [ - { - "accessConfigs": [ + ], + "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "items": [ { - "type": "ONE_TO_ONE_NAT" + "key": "foo", + "value": "bar" } - ], - "network": "projects/{{.Provider.project}}/global/networks/default" + ] + }, + "name": "test", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "type": "ONE_TO_ONE_NAT" + } + ], + "network": "projects/{{.Provider.project}}/global/networks/default" + } + ], + "scheduling": { + "automaticRestart": true + }, + "tags": { + "items": [ + "bar", + "foo" + ] + }, + "zone": "us-central1-a" + } + }, + "iam_policy": { + "bindings": [ + { + "role": "roles/compute.osLogin", + "members": [ + "user:example-b@google.com", + "user:example-a@google.com" + ] } - ], - "scheduling": { - "automaticRestart": true - }, - "tags": { - "items": [ - "bar", - "foo" - ] - }, - "zone": "us-central1-a" + ] } - }, - "iam_policy": { - "bindings": [ - { - "role": "roles/compute.osLogin", - "members": [ - "user:example-b@google.com", - "user:example-a@google.com" - ] - } - ] } - } -] + ] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_member.json b/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_member.json index ece286c7d39e..fb47cf37cfb5 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_member.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_member.json @@ -1,75 +1,76 @@ [ - { - "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test", - "asset_type": "compute.googleapis.com/Instance", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", - "discovery_name": "Instance", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "canIpForward": false, - "deletionProtection": false, - "disks": [ - { - "autoDelete": true, - "boot": true, - "initializeParams": { - "sourceImage": "projects/debian-cloud/global/images/debian-11" - }, - "mode": "READ_WRITE" - }, - { - "autoDelete": true, - "initializeParams": { - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test", + "asset_type": "compute.googleapis.com/Instance", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Instance", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "canIpForward": false, + "deletionProtection": false, + "disks": [ + { + "autoDelete": true, + "boot": true, + "initializeParams": { + "sourceImage": "projects/debian-cloud/global/images/debian-11" + }, + "mode": "READ_WRITE" }, - "interface": "SCSI", - "type": "SCRATCH" - } - ], - "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", - "metadata": { - "items": [ { - "key": "foo", - "value": "bar" + "autoDelete": true, + "initializeParams": { + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + }, + "interface": "SCSI", + "type": "SCRATCH" } - ] - }, - "name": "test", - "networkInterfaces": [ - { - "accessConfigs": [ + ], + "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "items": [ { - "type": "ONE_TO_ONE_NAT" + "key": "foo", + "value": "bar" } - ], - "network": "projects/{{.Provider.project}}/global/networks/default" + ] + }, + "name": "test", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "type": "ONE_TO_ONE_NAT" + } + ], + "network": "projects/{{.Provider.project}}/global/networks/default" + } + ], + "scheduling": { + "automaticRestart": true + }, + "tags": { + "items": [ + "bar", + "foo" + ] + }, + "zone": "us-central1-a" + } + }, + "iam_policy": { + "bindings": [ + { + "role": "roles/compute.osLogin", + "members": [ + "user:example-a@google.com" + ] } - ], - "scheduling": { - "automaticRestart": true - }, - "tags": { - "items": [ - "bar", - "foo" - ] - }, - "zone": "us-central1-a" + ] } - }, - "iam_policy": { - "bindings": [ - { - "role": "roles/compute.osLogin", - "members": [ - "user:example-a@google.com" - ] - } - ] } - } -] + ] + \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_policy.json b/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_policy.json index 2c9c9e211037..dc0b6de60e4d 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_policy.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_instance_iam_policy.json @@ -1,75 +1,76 @@ [ - { - "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test", - "asset_type": "compute.googleapis.com/Instance", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", - "discovery_name": "Instance", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "canIpForward": false, - "deletionProtection": false, - "disks": [ - { - "autoDelete": true, - "boot": true, - "initializeParams": { - "sourceImage": "projects/debian-cloud/global/images/debian-11" - }, - "mode": "READ_WRITE" - }, - { - "autoDelete": true, - "initializeParams": { - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test", + "asset_type": "compute.googleapis.com/Instance", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Instance", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "canIpForward": false, + "deletionProtection": false, + "disks": [ + { + "autoDelete": true, + "boot": true, + "initializeParams": { + "sourceImage": "projects/debian-cloud/global/images/debian-11" + }, + "mode": "READ_WRITE" }, - "interface": "SCSI", - "type": "SCRATCH" - } - ], - "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", - "metadata": { - "items": [ { - "key": "foo", - "value": "bar" + "autoDelete": true, + "initializeParams": { + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + }, + "interface": "SCSI", + "type": "SCRATCH" } - ] - }, - "name": "test", - "networkInterfaces": [ - { - "accessConfigs": [ + ], + "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "items": [ { - "type": "ONE_TO_ONE_NAT" + "key": "foo", + "value": "bar" } - ], - "network": "projects/{{.Provider.project}}/global/networks/default" + ] + }, + "name": "test", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "type": "ONE_TO_ONE_NAT" + } + ], + "network": "projects/{{.Provider.project}}/global/networks/default" + } + ], + "scheduling": { + "automaticRestart": true + }, + "tags": { + "items": [ + "bar", + "foo" + ] + }, + "zone": "us-central1-a" + } + }, + "iam_policy": { + "bindings": [ + { + "role": "roles/compute.osLogin", + "members": [ + "user:jane@example.com" + ] } - ], - "scheduling": { - "automaticRestart": true - }, - "tags": { - "items": [ - "bar", - "foo" - ] - }, - "zone": "us-central1-a" + ] } - }, - "iam_policy": { - "bindings": [ - { - "role": "roles/compute.osLogin", - "members": [ - "user:jane@example.com" - ] - } - ] } - } -] + ] + \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/full_compute_instance.json b/mmv1/third_party/tgc/tests/data/full_compute_instance.json index 689812cbc0d3..c85855a98e27 100644 --- a/mmv1/third_party/tgc/tests/data/full_compute_instance.json +++ b/mmv1/third_party/tgc/tests/data/full_compute_instance.json @@ -1,181 +1,182 @@ [ - { - "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test1", - "asset_type": "compute.googleapis.com/Instance", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", - "discovery_name": "Instance", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "canIpForward": true, - "shieldedInstanceConfig": { - "enableIntegrityMonitoring": true, - "enableSecureBoot": true, - "enableVtpm": true - }, - "deletionProtection": true, - "description": "test-description", - "disks": [ - { - "autoDelete": true, - "boot": true, - "deviceName": "test-device_name", - "diskEncryptionKey": { - "rawKey": "test-disk_encryption_key_raw" + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test1", + "asset_type": "compute.googleapis.com/Instance", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Instance", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "canIpForward": true, + "shieldedInstanceConfig": { + "enableIntegrityMonitoring": true, + "enableSecureBoot": true, + "enableVtpm": true + }, + "deletionProtection": true, + "description": "test-description", + "disks": [ + { + "autoDelete": true, + "boot": true, + "deviceName": "test-device_name", + "diskEncryptionKey": { + "rawKey": "test-disk_encryption_key_raw" + }, + "initializeParams": { + "diskSizeGb": "42", + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/pd-standard", + "sourceImage": "projects/debian-cloud/global/images/debian-11" + }, + "mode": "READ_WRITE", + "source": "projects/{{.Provider.project}}/zones/us-central1-a/disks/test-source" }, - "initializeParams": { - "diskSizeGb": "42", - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/pd-standard", - "sourceImage": "projects/debian-cloud/global/images/debian-11" + { + "autoDelete": true, + "initializeParams": { + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + }, + "interface": "SCSI", + "type": "SCRATCH" }, - "mode": "READ_WRITE", - "source": "projects/{{.Provider.project}}/zones/us-central1-a/disks/test-source" - }, - { - "autoDelete": true, - "initializeParams": { - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + { + "autoDelete": true, + "initializeParams": { + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + }, + "interface": "SCSI", + "type": "SCRATCH" }, - "interface": "SCSI", - "type": "SCRATCH" - }, - { - "autoDelete": true, - "initializeParams": { - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + { + "deviceName": "test-device_name", + "diskEncryptionKey": { + "kmsKeyName": "test-kms_key_self_link" + }, + "mode": "READ_ONLY", + "source": "projects/{{.Provider.project}}/zones/us-central1-a/disks/test-source" }, - "interface": "SCSI", - "type": "SCRATCH" - }, - { - "deviceName": "test-device_name", - "diskEncryptionKey": { - "kmsKeyName": "test-kms_key_self_link" + { + "mode": "READ_WRITE", + "source": "projects/{{.Provider.project}}/zones/us-central1-a/disks/test-source2" + } + ], + "guestAccelerators": [ + { + "acceleratorCount": 42, + "acceleratorType": "projects/{{.Provider.project}}/zones/us-central1-a/acceleratorTypes/test-guest_accelerator-type1" }, - "mode": "READ_ONLY", - "source": "projects/{{.Provider.project}}/zones/us-central1-a/disks/test-source" - }, - { - "mode": "READ_WRITE", - "source": "projects/{{.Provider.project}}/zones/us-central1-a/disks/test-source2" - } - ], - "guestAccelerators": [ - { - "acceleratorCount": 42, - "acceleratorType": "projects/{{.Provider.project}}/zones/us-central1-a/acceleratorTypes/test-guest_accelerator-type1" - }, - { - "acceleratorCount": 42, - "acceleratorType": "projects/{{.Provider.project}}/zones/us-central1-a/acceleratorTypes/test-guest_accelerator-type2" - } - ], - "hostname": "test-hostname", - "labels": { - "label_foo1": "label-bar1" - }, - "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", - "metadata": { - "items": [ { - "key": "metadata_foo1", - "value": "metadata-bar1" + "acceleratorCount": 42, + "acceleratorType": "projects/{{.Provider.project}}/zones/us-central1-a/acceleratorTypes/test-guest_accelerator-type2" } - ] - }, - "minCpuPlatform": "test-min_cpu_platform", - "name": "test1", - "networkInterfaces": [ - { - "accessConfigs": [ - { - "natIP": "192.168.0.42", - "type": "ONE_TO_ONE_NAT" - }, - { - "networkTier": "STANDARD", - "type": "ONE_TO_ONE_NAT" - }, - { - "publicPtrDomainName": "test-public_ptr_domain_name", - "setPublicPtr": true, - "type": "ONE_TO_ONE_NAT" - } - ], - "aliasIpRanges": [ + ], + "hostname": "test-hostname", + "labels": { + "label_foo1": "label-bar1" + }, + "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "items": [ { - "ipCidrRange": "test-ip_cidr_range", - "subnetworkRangeName": "test-subnetwork_range_name" + "key": "metadata_foo1", + "value": "metadata-bar1" } - ], - "network": "projects/{{.Provider.project}}/global/networks/default", - "networkIP": "test-network_ip" + ] }, - { - "subnetwork": "projects/test-subnetwork_project/regions/us-central1/subnetworks/test-subnetwork" - } - ], - "scheduling": { - "automaticRestart": true, - "onHostMaintenance": "test-on_host_maintenance", - "preemptible": true - }, - "serviceAccounts": [ - { - "email": "test-email", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" + "minCpuPlatform": "test-min_cpu_platform", + "name": "test1", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "natIP": "192.168.0.42", + "type": "ONE_TO_ONE_NAT" + }, + { + "networkTier": "STANDARD", + "type": "ONE_TO_ONE_NAT" + }, + { + "publicPtrDomainName": "test-public_ptr_domain_name", + "setPublicPtr": true, + "type": "ONE_TO_ONE_NAT" + } + ], + "aliasIpRanges": [ + { + "ipCidrRange": "test-ip_cidr_range", + "subnetworkRangeName": "test-subnetwork_range_name" + } + ], + "network": "projects/{{.Provider.project}}/global/networks/default", + "networkIP": "test-network_ip" + }, + { + "subnetwork": "projects/test-subnetwork_project/regions/us-central1/subnetworks/test-subnetwork" + } + ], + "scheduling": { + "automaticRestart": true, + "onHostMaintenance": "test-on_host_maintenance", + "preemptible": true + }, + "serviceAccounts": [ + { + "email": "test-email", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + ], + "tags": { + "items": [ + "bar", + "foo" ] - } - ], - "tags": { - "items": [ - "bar", - "foo" - ] - }, - "zone": "us-central1-a" + }, + "zone": "us-central1-a" + } } - } - }, - { - "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test2", - "asset_type": "compute.googleapis.com/Instance", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", - "discovery_name": "Instance", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "canIpForward": false, - "deletionProtection": false, - "disks": [ - { - "autoDelete": true, - "boot": true, - "diskEncryptionKey": { - "kmsKeyName": "test-kms_key_self_link" - }, - "mode": "READ_WRITE" - } - ], - "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", - "metadata": {}, - "name": "test2", - "networkInterfaces": [ - { - "network": "projects/{{.Provider.project}}/global/networks/default" - } - ], - "scheduling": { - "automaticRestart": true - }, - "tags": {}, - "zone": "us-central1-a" + }, + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/test2", + "asset_type": "compute.googleapis.com/Instance", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Instance", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "canIpForward": false, + "deletionProtection": false, + "disks": [ + { + "autoDelete": true, + "boot": true, + "diskEncryptionKey": { + "kmsKeyName": "test-kms_key_self_link" + }, + "mode": "READ_WRITE" + } + ], + "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": {}, + "name": "test2", + "networkInterfaces": [ + { + "network": "projects/{{.Provider.project}}/global/networks/default" + } + ], + "scheduling": { + "automaticRestart": true + }, + "tags": {}, + "zone": "us-central1-a" + } } } - } -] + ] + \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/instance.json b/mmv1/third_party/tgc/tests/data/instance.json index 516d64d85fbf..89a37ddcf007 100644 --- a/mmv1/third_party/tgc/tests/data/instance.json +++ b/mmv1/third_party/tgc/tests/data/instance.json @@ -1,73 +1,77 @@ [ - { - "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/my-instance", - "asset_type": "compute.googleapis.com/Instance", - "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", - "resource": { - "version": "v1", - "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", - "discovery_name": "Instance", - "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", - "data": { - "canIpForward": false, - "deletionProtection": false, - "disks": [ - { - "autoDelete": true, - "boot": true, - "initializeParams": { - "sourceImage": "projects/debian-cloud/global/images/debian-8-jessie-v20170523" - }, - "mode": "READ_WRITE" - }, - { - "autoDelete": true, - "initializeParams": { - "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" - }, - "interface": "SCSI", - "type": "SCRATCH" - } - ], - "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", - "metadata": { - "items": [ - { - "key": "startup-script", - "value": "echo hi > /test.txt" + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/zones/us-central1-a/instances/my-compute-instance", + "asset_type": "compute.googleapis.com/Instance", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Instance", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "advancedMachineFeatures": { + "enableNestedVirtualization": true + }, + "canIpForward": false, + "deletionProtection": false, + "disks": [ + { + "autoDelete": true, + "boot": true, + "initializeParams": { + "sourceImage": "projects/debian-cloud/global/images/debian-8-jessie-v20170523" + }, + "mode": "READ_WRITE" + }, + { + "autoDelete": true, + "initializeParams": { + "diskType": "projects/{{.Provider.project}}/zones/us-central1-a/diskTypes/local-ssd" + }, + "interface": "SCSI", + "type": "SCRATCH" + } + ], + "machineType": "projects/{{.Provider.project}}/zones/us-central1-a/machineTypes/n1-standard-1", + "metadata": { + "items": [ + { + "key": "startup-script", + "value": "echo hi \u003e /test.txt" + } + ] + }, + "name": "my-compute-instance", + "networkInterfaces": [ + { + "accessConfigs": [ + { + "type": "ONE_TO_ONE_NAT" + } + ], + "network": "projects/{{.Provider.project}}/global/networks/default" + } + ], + "scheduling": { + "automaticRestart": true + }, + "serviceAccounts": [ + { + "email": "default", + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform" + ] + } + ], + "tags": { + "items": [ + "bar", + "foo" + ] + }, + "zone": "us-central1-a" } - ] }, - "name": "my-instance", - "networkInterfaces": [ - { - "accessConfigs": [ - { - "type": "ONE_TO_ONE_NAT" - } - ], - "network": "projects/{{.Provider.project}}/global/networks/default" - } - ], - "scheduling": { - "automaticRestart": true - }, - "serviceAccounts": [ - { - "email": "default", - "scopes": [ - "https://www.googleapis.com/auth/cloud-platform" - ] - } - ], - "tags": { - "items": [ - "bar", - "foo" - ] - }, - "zone": "us-central1-a" - } + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] } - } -] +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/instance.tf b/mmv1/third_party/tgc/tests/data/instance.tf index 924c97c1af71..b43c991c88a2 100644 --- a/mmv1/third_party/tgc/tests/data/instance.tf +++ b/mmv1/third_party/tgc/tests/data/instance.tf @@ -27,9 +27,8 @@ provider "google" { {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} } -resource "google_compute_instance" "my-test-instance" { - project = "{{.Provider.project}}" - name = "my-instance" +resource "google_compute_instance" "my-test-instance"{ + name = "my-compute-instance" machine_type = "n1-standard-1" zone = "us-central1-a" @@ -59,4 +58,8 @@ resource "google_compute_instance" "my-test-instance" { service_account { scopes = ["cloud-platform"] } -} + + advanced_machine_features { + enable_nested_virtualization = true + } +} \ No newline at end of file From 50ad40d6cf618247cab742c7d81e8b8dc8b8b118 Mon Sep 17 00:00:00 2001 From: Samir Ribeiro <42391123+Samir-Cit@users.noreply.github.com> Date: Tue, 4 Jun 2024 22:42:54 -0300 Subject: [PATCH 063/356] Add new resource "Service LB Policy" to the provider (#10730) --- mmv1/products/compute/BackendService.yaml | 5 + .../networkservices/ServiceLBPolicies.yaml | 116 ++++++++++++++++++ ...services_service_lb_policies_advanced.tmpl | 30 +++++ ...rk_services_service_lb_policies_basic.tmpl | 6 + ...rvices_service_lb_policies_advanced.tf.erb | 30 +++++ ..._services_service_lb_policies_basic.tf.erb | 6 + ...k_services_service_lb_policies_test.go.erb | 79 ++++++++++++ 7 files changed, 272 insertions(+) create mode 100644 mmv1/products/networkservices/ServiceLBPolicies.yaml create mode 100644 mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tmpl create mode 100644 mmv1/templates/terraform/examples/network_services_service_lb_policies_advanced.tf.erb create mode 100644 mmv1/templates/terraform/examples/network_services_service_lb_policies_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/networkservices/resource_network_services_service_lb_policies_test.go.erb diff --git a/mmv1/products/compute/BackendService.yaml b/mmv1/products/compute/BackendService.yaml index 042f90f8a2a7..d88fa9c1084c 100644 --- a/mmv1/products/compute/BackendService.yaml +++ b/mmv1/products/compute/BackendService.yaml @@ -1311,3 +1311,8 @@ properties: The default value is 1.0. default_value: 1.0 diff_suppress_func: 'suppressWhenDisabled' + - !ruby/object:Api::Type::String + name: 'serviceLbPolicy' + description: | + URL to networkservices.ServiceLbPolicy resource. + Can only be set if load balancing scheme is EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED or INTERNAL_SELF_MANAGED and the scope is global. diff --git a/mmv1/products/networkservices/ServiceLBPolicies.yaml b/mmv1/products/networkservices/ServiceLBPolicies.yaml new file mode 100644 index 000000000000..a26c0bb1cbf4 --- /dev/null +++ b/mmv1/products/networkservices/ServiceLBPolicies.yaml @@ -0,0 +1,116 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the 'License'); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an 'AS IS' BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ServiceLbPolicies' +base_url: 'projects/{{project}}/locations/{{location}}/serviceLbPolicies' +min_version: beta +create_url: 'projects/{{project}}/locations/{{location}}/serviceLbPolicies?serviceLbPolicyId={{name}}' +self_link: 'projects/{{project}}/locations/{{location}}/serviceLbPolicies/{{name}}' +update_verb: :PATCH +update_mask: true +description: | + ServiceLbPolicy holds global load balancing and traffic distribution configuration that can be applied to a BackendService. +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{{op_id}}' + wait_ms: 1000 + timeouts: !ruby/object:Api::Timeouts + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' +autogen_async: true +import_format: ['projects/{{project}}/locations/{{location}}/serviceLbPolicies/{{name}}'] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'network_services_service_lb_policies_basic' + primary_resource_id: 'default' + vars: + resource_name: 'my-lb-policy' + - !ruby/object:Provider::Terraform::Examples + name: 'network_services_service_lb_policies_advanced' + primary_resource_id: 'default' + vars: + resource_name: 'my-lb-policy' + backend_name: 'my-lb-backend' +parameters: + - !ruby/object:Api::Type::String + name: 'name' + required: true + url_param_only: true + description: | + Name of the ServiceLbPolicy resource. It matches pattern `projects/{project}/locations/{location}/serviceLbPolicies/{service_lb_policy_name}`. + - !ruby/object:Api::Type::String + name: 'location' + required: true + url_param_only: true + description: | + The location of the service lb policy. +properties: + - !ruby/object:Api::Type::Time + name: 'createTime' + description: | + Time the ServiceLbPolicy was created in UTC. + output: true + - !ruby/object:Api::Type::Time + name: 'updateTime' + description: | + Time the ServiceLbPolicy was updated in UTC. + output: true + - !ruby/object:Api::Type::KeyValueLabels + name: 'labels' + description: 'Set of label tags associated with the ServiceLbPolicy resource.' + - !ruby/object:Api::Type::String + name: 'description' + description: | + A free-text description of the resource. Max length 1024 characters. + - !ruby/object:Api::Type::Enum + name: 'loadBalancingAlgorithm' + description: | + The type of load balancing algorithm to be used. The default behavior is WATERFALL_BY_REGION. + values: + - :SPRAY_TO_REGION + - :SPRAY_TO_WORLD + - :WATERFALL_BY_REGION + - :WATERFALL_BY_ZONE + - !ruby/object:Api::Type::NestedObject + name: 'autoCapacityDrain' + description: | + Option to specify if an unhealthy MIG/NEG should be considered for global load balancing and traffic routing. + properties: + - !ruby/object:Api::Type::Boolean + name: 'enable' + description: + Optional. If set to 'True', an unhealthy MIG/NEG will be set as drained. - An MIG/NEG is considered unhealthy if less than 25% of the instances/endpoints in the MIG/NEG are healthy. - This option will never result in draining more than 50% of the configured IGs/NEGs for the Backend Service. + - !ruby/object:Api::Type::NestedObject + name: 'failoverConfig' + description: | + Option to specify health based failover behavior. This is not related to Network load balancer FailoverPolicy. + properties: + - !ruby/object:Api::Type::Integer + name: 'failoverHealthThreshold' + required: true + description: + Optional. The percentage threshold that a load balancer will begin to send traffic to failover backends. If the percentage of endpoints in a MIG/NEG is smaller than this value, traffic would be sent to failover backends if possible. This field should be set to a value between 1 and 99. The default value is 50 for Global external HTTP(S) load balancer (classic) and Proxyless service mesh, and 70 for others. diff --git a/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tmpl b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tmpl new file mode 100644 index 000000000000..51680aa0e867 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tmpl @@ -0,0 +1,30 @@ +resource "google_network_services_service_lb_policies" "{{$.PrimaryResourceId}}" { + provider = google-beta + + name = "{{index $.Vars "resource_name"}}" + location = "global" + description = "my description" + load_balancing_algorithm = "SPRAY_TO_REGION" + + auto_capacity_drain { + enable = true + } + + failover_config { + failover_health_threshold = 70 + } + + labels = { + foo = "bar" + } +} + +resource "google_compute_backend_service" "default" { + provider = google-beta + + name = "{{index $.Vars "backend_name"}}" + description = "my description" + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + protocol = "HTTP" + service_lb_policy = "//networkservices.googleapis.com/${google_network_services_service_lb_policies.{{$.PrimaryResourceId}}.id}" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tmpl b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tmpl new file mode 100644 index 000000000000..db6ea6cd28e2 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tmpl @@ -0,0 +1,6 @@ +resource "google_network_services_service_lb_policies" "{{$.PrimaryResourceId}}" { + provider = google-beta + + name = "{{index $.Vars "resource_name"}}" + location = "global" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/network_services_service_lb_policies_advanced.tf.erb b/mmv1/templates/terraform/examples/network_services_service_lb_policies_advanced.tf.erb new file mode 100644 index 000000000000..6e91dd464af9 --- /dev/null +++ b/mmv1/templates/terraform/examples/network_services_service_lb_policies_advanced.tf.erb @@ -0,0 +1,30 @@ +resource "google_network_services_service_lb_policies" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + + name = "<%= ctx[:vars]['resource_name'] %>" + location = "global" + description = "my description" + load_balancing_algorithm = "SPRAY_TO_REGION" + + auto_capacity_drain { + enable = true + } + + failover_config { + failover_health_threshold = 70 + } + + labels = { + foo = "bar" + } +} + +resource "google_compute_backend_service" "default" { + provider = google-beta + + name = "<%= ctx[:vars]['backend_name'] %>" + description = "my description" + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + protocol = "HTTP" + service_lb_policy = "//networkservices.googleapis.com/${google_network_services_service_lb_policies.<%= ctx[:primary_resource_id] %>.id}" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/network_services_service_lb_policies_basic.tf.erb b/mmv1/templates/terraform/examples/network_services_service_lb_policies_basic.tf.erb new file mode 100644 index 000000000000..caa7e088e26c --- /dev/null +++ b/mmv1/templates/terraform/examples/network_services_service_lb_policies_basic.tf.erb @@ -0,0 +1,6 @@ +resource "google_network_services_service_lb_policies" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + + name = "<%= ctx[:vars]['resource_name'] %>" + location = "global" +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_service_lb_policies_test.go.erb b/mmv1/third_party/terraform/services/networkservices/resource_network_services_service_lb_policies_test.go.erb new file mode 100644 index 000000000000..939d627f99ed --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_service_lb_policies_test.go.erb @@ -0,0 +1,79 @@ +<% autogen_exception -%> +package networkservices_test +<% unless version == 'ga' -%> + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesLBPolicies_update(t *testing.T) { + t.Parallel() + + policyName := fmt.Sprintf("tf-test-lb-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesServiceLbPoliciesDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesLBPolicies_basic(policyName), + }, + { + ResourceName: "google_network_services_service_lb_policies.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesLBPolicies_update(policyName), + }, + { + ResourceName: "google_network_services_service_lb_policies.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesLBPolicies_basic(policyName string) string { + return fmt.Sprintf(` +resource "google_network_services_service_lb_policies" "foobar" { + name = "%s" + location = "global" + description = "my description" +} +`, policyName) +} + +func testAccNetworkServicesLBPolicies_update(policyName string) string { + return fmt.Sprintf(` +resource "google_network_services_service_lb_policies" "foobar" { + name = "%s" + location = "global" + description = "my description" + load_balancing_algorithm = "SPRAY_TO_REGION" + + auto_capacity_drain { + enable = true + } + + failover_config { + failover_health_threshold = 70 + } + + labels = { + foo = "bar" + } +} +`, policyName) +} + +<% end -%> From cc013404d5b6ccce53491e1d3e807df55855846b Mon Sep 17 00:00:00 2001 From: Rohit Ramkumar Date: Wed, 5 Jun 2024 11:45:29 -0400 Subject: [PATCH 064/356] Add support for preventDrift field in ConfigManagement Fleet-level default config (#10809) Co-authored-by: Stephen Lewis (Burrows) --- mmv1/products/gkehub2/Feature.yaml | 3 +++ .../services/gkehub2/resource_gke_hub_feature_test.go.erb | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/mmv1/products/gkehub2/Feature.yaml b/mmv1/products/gkehub2/Feature.yaml index d4cca5e376cb..33d622d2ee0f 100644 --- a/mmv1/products/gkehub2/Feature.yaml +++ b/mmv1/products/gkehub2/Feature.yaml @@ -285,6 +285,9 @@ properties: - !ruby/object:Api::Type::String name: sourceFormat description: 'Specifies whether the Config Sync Repo is in hierarchical or unstructured mode' + - !ruby/object:Api::Type::Boolean + name: preventDrift + description: 'Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.' - !ruby/object:Api::Type::NestedObject name: git description: 'Git repo configuration for the cluster' diff --git a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb index 7c4dc908d2fa..faed586ab947 100644 --- a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb +++ b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb @@ -488,7 +488,8 @@ resource "google_gke_hub_feature" "feature" { fleet_default_member_config { configmanagement { version = "1.16.1" - config_sync { + config_sync { + prevent_drift = true source_format = "unstructured" oci { sync_repo = "us-central1-docker.pkg.dev/corp-gke-build-artifacts/acm/configs:latest" From c227f3fe6b2fc8c9bdae8b105f86fa82ef13399e Mon Sep 17 00:00:00 2001 From: Shruthi Rao <55725103+shruthi019@users.noreply.github.com> Date: Wed, 5 Jun 2024 21:47:25 +0530 Subject: [PATCH 065/356] Allow guest OS validation to be handled by GCE APIs (#10856) --- mmv1/products/compute/Disk.yaml | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) diff --git a/mmv1/products/compute/Disk.yaml b/mmv1/products/compute/Disk.yaml index c848552852f3..f8737babfc7d 100644 --- a/mmv1/products/compute/Disk.yaml +++ b/mmv1/products/compute/Disk.yaml @@ -484,24 +484,11 @@ properties: is_set: true item_type: !ruby/object:Api::Type::NestedObject properties: - - !ruby/object:Api::Type::Enum + - !ruby/object:Api::Type::String name: 'type' required: true description: | The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. - values: - - :MULTI_IP_SUBNET - - :SECURE_BOOT - - :SEV_CAPABLE - - :UEFI_COMPATIBLE - - :VIRTIO_SCSI_MULTIQUEUE - - :WINDOWS - - :GVNIC - - :SEV_LIVE_MIGRATABLE - - :SEV_SNP_CAPABLE - - :SUSPEND_RESUME_COMPATIBLE - - :TDX_CAPABLE - - :SEV_LIVE_MIGRATABLE_V2 - !ruby/object:Api::Type::Array name: 'licenses' description: Any applicable license URI. From d64895dd8b52ebb98b915297eecd80ef27617009 Mon Sep 17 00:00:00 2001 From: Mehul3217 <44620455+Mehul3217@users.noreply.github.com> Date: Wed, 5 Jun 2024 22:09:47 +0530 Subject: [PATCH 066/356] adding builtin administrators field in AD resource (#10842) --- mmv1/products/netapp/activeDirectory.yaml | 7 ++++++- .../terraform/examples/netapp_active_directory_full.tf.erb | 1 + .../netapp/resource_netapp_active_directory_test.go | 2 ++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/mmv1/products/netapp/activeDirectory.yaml b/mmv1/products/netapp/activeDirectory.yaml index ffe9a7042a42..5e43ef704a84 100644 --- a/mmv1/products/netapp/activeDirectory.yaml +++ b/mmv1/products/netapp/activeDirectory.yaml @@ -53,7 +53,6 @@ examples: - !ruby/object:Provider::Terraform::Examples name: 'netapp_active_directory_full' primary_resource_id: 'test_active_directory_full' - skip_test: true vars: active_directory_full_name: 'test-active-directory-full' properties: @@ -130,6 +129,12 @@ properties: Domain user/group accounts to be added to the Backup Operators group of the SMB service. The Backup Operators group allows members to backup and restore files regardless of whether they have read or write access to the files. Comma-separated list. required: false item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'administrators' + description: | + Domain user accounts to be added to the local Administrators group of the SMB service. Comma-separated list of domain users or groups. The Domain Admin group is automatically added when the service joins your domain as a hidden group. + required: false + item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'securityOperators' description: | diff --git a/mmv1/templates/terraform/examples/netapp_active_directory_full.tf.erb b/mmv1/templates/terraform/examples/netapp_active_directory_full.tf.erb index 4c30d9811bcb..7c60647b53a9 100644 --- a/mmv1/templates/terraform/examples/netapp_active_directory_full.tf.erb +++ b/mmv1/templates/terraform/examples/netapp_active_directory_full.tf.erb @@ -8,6 +8,7 @@ resource "google_netapp_active_directory" "<%= ctx[:primary_resource_id] %>" { password = "pass" aes_encryption = false backup_operators = ["test1", "test2"] + administrators = ["test1", "test2"] description = "ActiveDirectory is the public representation of the active directory config." encrypt_dc_connections = false kdc_hostname = "hostname" diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go index f1a5c99d974d..3bc163628ad5 100644 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_active_directory_test.go @@ -53,6 +53,7 @@ resource "google_netapp_active_directory" "test_active_directory_full" { password = "pass" aes_encryption = false backup_operators = ["test1", "test2"] + administrators = ["test1", "test2"] description = "ActiveDirectory is the public representation of the active directory config." encrypt_dc_connections = false kdc_hostname = "hostname" @@ -81,6 +82,7 @@ resource "google_netapp_active_directory" "test_active_directory_full" { password = "pass" aes_encryption = false backup_operators = ["test1", "test2"] + administrators = ["test1", "test2"] description = "ActiveDirectory is the public representation of the active directory config." encrypt_dc_connections = false kdc_hostname = "hostname" From 6888073642618b5c67a2d8045e7477a0304d5e3f Mon Sep 17 00:00:00 2001 From: DDDDarrenWB <46464119+DDDDarrenWB@users.noreply.github.com> Date: Wed, 5 Jun 2024 10:22:05 -0700 Subject: [PATCH 067/356] Add Enterprise to security_posture_config.mode (#10852) --- .../resource_container_cluster.go.erb | 4 +-- ...esource_container_cluster_migratev1.go.erb | 4 +-- .../resource_container_cluster_test.go.erb | 25 +++++++++++++++++++ .../docs/r/container_cluster.html.markdown | 4 ++- 4 files changed, 32 insertions(+), 5 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index 495a5e546d5e..dcc2d64cca07 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -1161,8 +1161,8 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "MODE_UNSPECIFIED"}, false), - Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED and BASIC.`, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "ENTERPRISE", "MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED, BASIC, and ENTERPRISE.`, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("MODE_UNSPECIFIED"), }, "vulnerability_mode": { diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb index c243462ed640..f1e76dc1b3cd 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb @@ -931,8 +931,8 @@ func resourceContainerClusterResourceV1() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, - ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "MODE_UNSPECIFIED"}, false), - Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED and BASIC.`, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "ENTERPRISE", "MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED, BASIC, and ENTERPRISE.`, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("MODE_UNSPECIFIED"), }, "vulnerability_mode": { diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index e8cad2c7d0e0..d405dc7daf90 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -4617,6 +4617,15 @@ func TestAccContainerCluster_withSecurityPostureConfig(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"deletion_protection"}, }, + { + Config: testAccContainerCluster_SetSecurityPostureToEnterprise(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_security_posture_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, { Config: testAccContainerCluster_SetWorkloadVulnerabilityToStandard(clusterName, networkName, subnetworkName), }, @@ -4816,6 +4825,22 @@ resource "google_container_cluster" "with_security_posture_config" { `, resource_name, networkName, subnetworkName) } +func testAccContainerCluster_SetSecurityPostureToEnterprise(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_security_posture_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + security_posture_config { + mode = "ENTERPRISE" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + func testAccContainerCluster_SetWorkloadVulnerabilityToStandard(resource_name, networkName, subnetworkName string) string { return fmt.Sprintf(` resource "google_container_cluster" "with_security_posture_config" { diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 94443c5c1d4a..70dfa529be9d 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -1335,7 +1335,9 @@ linux_node_config { The `security_posture_config` block supports: -* `mode` - (Optional) Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include `DISABLED` and `BASIC`. +**Note:** `ENTERPRISE` and `VULNERABILITY_ENTERPRISE` are only available for [GKE Enterprise](http://cloud/kubernetes-engine/enterprise/docs/concepts/overview) projects. + +* `mode` - (Optional) Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include `DISABLED`, `BASIC`, and `ENTERPRISE`. * `vulnerability_mode` - (Optional) Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Available options include `VULNERABILITY_DISABLED`, `VULNERABILITY_BASIC` and `VULNERABILITY_ENTERPRISE`. From 3f55f4031cc89504c577dabf4caf7bbc0ff75ec3 Mon Sep 17 00:00:00 2001 From: ma-g-22 <123424520+ma-g-22@users.noreply.github.com> Date: Wed, 5 Jun 2024 16:12:42 -0400 Subject: [PATCH 068/356] AB (#10674) Co-authored-by: Thomas Rodgers --- mmv1/third_party/terraform/go.mod | 34 ++-- mmv1/third_party/terraform/go.mod.erb | 34 ++-- mmv1/third_party/terraform/go.sum | 72 ++++---- .../bigtable/resource_bigtable_table.go | 114 +++++++++++++ .../bigtable/resource_bigtable_table_test.go | 161 ++++++++++++++++++ 5 files changed, 345 insertions(+), 70 deletions(-) diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go.mod index 8fa4bcaf42e6..491855828e38 100644 --- a/mmv1/third_party/terraform/go.mod +++ b/mmv1/third_party/terraform/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-google go 1.21 require ( - cloud.google.com/go/bigtable v1.23.0 + cloud.google.com/go/bigtable v1.24.0 github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 @@ -25,23 +25,24 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 github.com/sirupsen/logrus v1.8.1 + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 - golang.org/x/net v0.24.0 - golang.org/x/oauth2 v0.19.0 - google.golang.org/api v0.177.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 + golang.org/x/net v0.25.0 + golang.org/x/oauth2 v0.20.0 + google.golang.org/api v0.180.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.34.0 + google.golang.org/protobuf v1.34.1 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.112.2 // indirect - cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go v0.113.0 // indirect + cloud.google.com/go/auth v0.4.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.7 // indirect - cloud.google.com/go/longrunning v0.5.6 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + cloud.google.com/go/longrunning v0.5.7 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -64,7 +65,7 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect @@ -97,15 +98,14 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be // indirect + google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index 0e4b61503874..1e6bde117210 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -4,7 +4,7 @@ module github.com/hashicorp/terraform-provider-google go 1.21 require ( - cloud.google.com/go/bigtable v1.23.0 + cloud.google.com/go/bigtable v1.24.0 github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 @@ -26,23 +26,24 @@ require ( github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/hashstructure v1.1.0 github.com/sirupsen/logrus v1.8.1 + go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 - golang.org/x/net v0.24.0 - golang.org/x/oauth2 v0.19.0 - google.golang.org/api v0.177.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 + golang.org/x/net v0.25.0 + golang.org/x/oauth2 v0.20.0 + google.golang.org/api v0.180.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.34.0 + google.golang.org/protobuf v1.34.1 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.112.2 // indirect - cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go v0.113.0 // indirect + cloud.google.com/go/auth v0.4.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect - cloud.google.com/go/iam v1.1.7 // indirect - cloud.google.com/go/longrunning v0.5.6 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + cloud.google.com/go/longrunning v0.5.7 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -65,7 +66,7 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-plugin v1.6.0 // indirect @@ -98,15 +99,14 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be // indirect + google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 029cf013033d..cd2fce52ae52 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -1,23 +1,25 @@ bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= -cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= -cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= -cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= +cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= +cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/bigtable v1.23.0 h1:ufk3XFeq5ZmFmkTZrWiCEMjn9kefKbHT8LVsrX+iqqc= -cloud.google.com/go/bigtable v1.23.0/go.mod h1:WWRrYMBZpmHUO76ccwN7lx681FdyUWbIK2B4DDly0P4= +cloud.google.com/go/bigtable v1.24.0 h1:RtBERIoZZsQm3LUExDGFWgOwMEHCO04O9/pDA0KoAZI= +cloud.google.com/go/bigtable v1.24.0/go.mod h1:NlsITD7sKXo97kKIfF83ROd6P1bw8J4zsAUUYqk167Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= -cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= -cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= -cloud.google.com/go/longrunning v0.5.6 h1:xAe8+0YaWoCKr9t1+aWe+OeQgN/iJK1fEgZSXmjuEaE= -cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= @@ -131,8 +133,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= @@ -291,8 +293,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= @@ -315,11 +317,11 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.19.0 h1:9+E/EZBCbTLNrbN35fHv/a/d/mOBatymz1zbtQrXpIg= -golang.org/x/oauth2 v0.19.0/go.mod h1:vYi7skDa1x015PmRRYZ7+s1cWyPgrPiSYRe4rnsexc8= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -345,19 +347,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -375,8 +377,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= -google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= +google.golang.org/api v0.180.0 h1:M2D87Yo0rGBPWpo1orwfCLehUUL6E7/TYe5gvMQWDh4= +google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -385,12 +387,12 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be h1:Zz7rLWqp0ApfsR/l7+zSHhY3PMiH2xqgxlfYfAfNpoU= -google.golang.org/genproto/googleapis/api v0.0.0-20240415180920-8c6c420018be/go.mod h1:dvdCTIoAGbkWbcIKBniID56/7XHTt6WfxXNMxuziJ+w= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6 h1:MTmrc2F5TZKDKXigcZetYkH04YwqtOPEQJwh4PPOgfk= +google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6/go.mod h1:2ROWwqCIx97Y7CSyp11xB8fori0wzvD6+gbacaf5c8I= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= +google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -410,8 +412,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= -google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -428,5 +430,3 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table.go index 22dfdb88c53f..58cb6c0f5c31 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table.go @@ -103,6 +103,31 @@ func ResourceBigtableTable() *schema.Resource { ValidateFunc: verify.ValidateDuration(), Description: `Duration to retain change stream data for the table. Set to 0 to disable. Must be between 1 and 7 days.`, }, + + "automated_backup_policy": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_period": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidateDuration(), + Description: `How long the automated backups should be retained.`, + }, + "frequency": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: verify.ValidateDuration(), + Description: `How frequently automated backups should occur.`, + }, + }, + }, + Description: `Defines an automated backup policy for a table, specified by Retention Period and Frequency. To disable, set both Retention Period and Frequency to 0.`, + }, }, UseJSONNumber: true, } @@ -152,6 +177,35 @@ func resourceBigtableTableCreate(d *schema.ResourceData, meta interface{}) error } } + if automatedBackupPolicyField, ok := d.GetOk("automated_backup_policy"); ok { + automatedBackupPolicyElements := automatedBackupPolicyField.(*schema.Set).List() + if len(automatedBackupPolicyElements) == 0 { + return fmt.Errorf("Incomplete automated_backup_policy") + } else { + automatedBackupPolicy := automatedBackupPolicyElements[0].(map[string]interface{}) + abpRetentionPeriodField, retentionPeriodExists := automatedBackupPolicy["retention_period"] + if !retentionPeriodExists { + return fmt.Errorf("Automated backup policy retention period must be specified") + } + abpFrequencyField, frequencyExists := automatedBackupPolicy["frequency"] + if !frequencyExists { + return fmt.Errorf("Automated backup policy frequency must be specified") + } + abpRetentionPeriod, err := ParseDuration(abpRetentionPeriodField.(string)) + if err != nil { + return fmt.Errorf("Error parsing automated backup policy retention period: %s", err) + } + abpFrequency, err := ParseDuration(abpFrequencyField.(string)) + if err != nil { + return fmt.Errorf("Error parsing automated backup policy frequency: %s", err) + } + tblConf.AutomatedBackupConfig = &bigtable.TableAutomatedBackupPolicy{ + RetentionPeriod: abpRetentionPeriod, + Frequency: abpFrequency, + } + } + } + // Set the split keys if given. if v, ok := d.GetOk("split_keys"); ok { tblConf.SplitKeys = tpgresource.ConvertStringArr(v.([]interface{})) @@ -251,6 +305,26 @@ func resourceBigtableTableRead(d *schema.ResourceData, meta interface{}) error { } } + if table.AutomatedBackupConfig != nil { + switch automatedBackupConfig := table.AutomatedBackupConfig.(type) { + case *bigtable.TableAutomatedBackupPolicy: + var tableAbp bigtable.TableAutomatedBackupPolicy = *automatedBackupConfig + abpRetentionPeriod := tableAbp.RetentionPeriod.(time.Duration).String() + abpFrequency := tableAbp.Frequency.(time.Duration).String() + abp := []interface{}{ + map[string]interface{}{ + "retention_period": abpRetentionPeriod, + "frequency": abpFrequency, + }, + } + if err := d.Set("automated_backup_policy", abp); err != nil { + return fmt.Errorf("Error setting automated_backup_policy: %s", err) + } + default: + return fmt.Errorf("error: Unknown type of automated backup configuration") + } + } + return nil } @@ -335,6 +409,46 @@ func resourceBigtableTableUpdate(d *schema.ResourceData, meta interface{}) error } } + if d.HasChange("automated_backup_policy") { + automatedBackupPolicyField := d.Get("automated_backup_policy").(*schema.Set) + automatedBackupPolicyElements := automatedBackupPolicyField.List() + if len(automatedBackupPolicyElements) == 0 { + return fmt.Errorf("Incomplete automated_backup_policy") + } + automatedBackupPolicy := automatedBackupPolicyElements[0].(map[string]interface{}) + abp := bigtable.TableAutomatedBackupPolicy{} + + abpRetentionPeriodField, retentionPeriodExists := automatedBackupPolicy["retention_period"] + if retentionPeriodExists && abpRetentionPeriodField != "" { + abpRetentionPeriod, err := ParseDuration(abpRetentionPeriodField.(string)) + if err != nil { + return fmt.Errorf("Error parsing automated backup policy retention period: %s", err) + } + abp.RetentionPeriod = abpRetentionPeriod + } + + abpFrequencyField, frequencyExists := automatedBackupPolicy["frequency"] + if frequencyExists && abpFrequencyField != "" { + abpFrequency, err := ParseDuration(abpFrequencyField.(string)) + if err != nil { + return fmt.Errorf("Error parsing automated backup policy frequency: %s", err) + } + abp.Frequency = abpFrequency + } + + if abp.RetentionPeriod != nil && abp.RetentionPeriod.(time.Duration) == 0 && abp.Frequency != nil && abp.Frequency.(time.Duration) == 0 { + // Disable Automated Backups + if err := c.UpdateTableDisableAutomatedBackupPolicy(ctxWithTimeout, name); err != nil { + return fmt.Errorf("Error disabling automated backup configuration on table %v: %s", name, err) + } + } else { + // Update Automated Backups config + if err := c.UpdateTableWithAutomatedBackupPolicy(ctxWithTimeout, name, abp); err != nil { + return fmt.Errorf("Error updating automated backup configuration on table %v: %s", name, err) + } + } + } + return resourceBigtableTableRead(d, meta) } diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table_test.go index 6f462cd7cd40..f42fc570c1d3 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_table_test.go @@ -272,6 +272,84 @@ func TestAccBigtableTable_change_stream_enable(t *testing.T) { }) } +func TestAccBigtableTable_automated_backups_enable(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + tableName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + family := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigtableTableDestroyProducer(t), + Steps: []resource.TestStep{ + // Creating a table with automated backups enabled + { + Config: testAccBigtableTable_automated_backups(instanceName, tableName, "72h0m0s", "24h0m0s", family), + }, + { + ResourceName: "google_bigtable_table.table", + ImportState: true, + ImportStateVerify: true, + }, + // Changing automated backup retention period value + { + Config: testAccBigtableTable_automated_backups(instanceName, tableName, "72h0m0s", "", family), + }, + { + ResourceName: "google_bigtable_table.table", + ImportState: true, + ImportStateVerify: true, + }, + // Changing automated backup frequency value + { + Config: testAccBigtableTable_automated_backups(instanceName, tableName, "", "24h0m0s", family), + }, + { + ResourceName: "google_bigtable_table.table", + ImportState: true, + ImportStateVerify: true, + }, + // Changing both automated backup retention period and frequency values + { + Config: testAccBigtableTable_automated_backups(instanceName, tableName, "72h0m0s", "24h0m0s", family), + }, + { + ResourceName: "google_bigtable_table.table", + ImportState: true, + ImportStateVerify: true, + }, + // Disable automated backups + { + Config: testAccBigtableTable_automated_backups(instanceName, tableName, "0", "0", family), + Check: resource.ComposeTestCheckFunc(verifyBigtableAutomatedBackupsDisabled(t)), + }, + // Renable automated backups + { + Config: testAccBigtableTable_automated_backups(instanceName, tableName, "72h0m0s", "24h0m0s", family), + }, + { + ResourceName: "google_bigtable_table.table", + ImportState: true, + ImportStateVerify: true, + }, + // it is possible to delete the table when automated backups is enabled + { + Config: testAccBigtableTable_destroyTable(instanceName), + }, + { + ResourceName: "google_bigtable_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "instance_type"}, + }, + }, + }) +} + func TestAccBigtableTable_familyMany(t *testing.T) { // bigtable instance does not use the shared HTTP client, this test creates an instance acctest.SkipIfVcr(t) @@ -418,6 +496,34 @@ func testAccBigtableChangeStreamDisabled(t *testing.T) resource.TestCheckFunc { } } +func verifyBigtableAutomatedBackupsDisabled(t *testing.T) resource.TestCheckFunc { + var ctx = context.Background() + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources["google_bigtable_table.table"] + if !ok { + return fmt.Errorf("Table not found: %s", "google_bigtable_table.table") + } + + config := acctest.GoogleProviderConfig(t) + c, err := config.BigTableClientFactory(config.UserAgent).NewAdminClient(config.Project, rs.Primary.Attributes["instance_name"]) + if err != nil { + return fmt.Errorf("Error starting admin client. %s", err) + } + + defer c.Close() + + table, err := c.TableInfo(ctx, rs.Primary.Attributes["name"]) + if err != nil { + return fmt.Errorf("Error retrieving table. Could not find %s in %s.", rs.Primary.Attributes["name"], rs.Primary.Attributes["instance_name"]) + } + if table.AutomatedBackupConfig != nil { + return fmt.Errorf("Automated Backups are expected to be disabled but they are not: %v", table) + } + + return nil + } +} + func testAccBigtableTable(instanceName, tableName string) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { @@ -536,6 +642,61 @@ resource "google_bigtable_table" "table" { `, instanceName, instanceName, tableName, changeStreamRetention, family) } +func testAccBigtableTable_automated_backups(instanceName, tableName, automatedBackupsRetentionPeriod, automatedBackupsFrequency, family string) string { + var retentionPeriod string + if automatedBackupsRetentionPeriod != "" { + retentionPeriod = fmt.Sprintf(`retention_period = "%s"`, automatedBackupsRetentionPeriod) + } + var frequency string + if automatedBackupsFrequency != "" { + frequency = fmt.Sprintf(`frequency = "%s"`, automatedBackupsFrequency) + } + config := fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s" + zone = "us-central1-b" + } + instance_type = "DEVELOPMENT" + deletion_protection = false +} +resource "google_bigtable_table" "table" { + name = "%s" + instance_name = google_bigtable_instance.instance.name + automated_backup_policy { + %s + %s + } + column_family { + family = "%s" + } +} +`, instanceName, instanceName, tableName, retentionPeriod, frequency, family) + return config +} + +func testAccBigtableTable_disable_automated_backups(instanceName, tableName, family string) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + cluster { + cluster_id = "%s" + zone = "us-central1-b" + } + instance_type = "DEVELOPMENT" + deletion_protection = false +} +resource "google_bigtable_table" "table" { + name = "%s" + instance_name = google_bigtable_instance.instance.name + column_family { + family = "%s" + } +} +`, instanceName, instanceName, tableName, family) +} + func testAccBigtableTable_familyMany(instanceName, tableName, family string) string { return fmt.Sprintf(` resource "google_bigtable_instance" "instance" { From 575c4127def38687d60e6aa13a310ed971b3bf9c Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Wed, 5 Jun 2024 14:12:43 -0700 Subject: [PATCH 069/356] chore(ci): jsonify missing tests and move template into DIFF_COMMIT.md (#10816) --- .ci/magician/cmd/DIFF_COMMENT.md | 17 ++++++++++- .ci/magician/cmd/generate_comment.go | 19 ++++++++---- .ci/magician/cmd/generate_comment_test.go | 9 ++++-- .ci/magician/cmd/mock_runner_test.go | 2 +- .../cmd/detect_missing_tests.go | 29 ++++--------------- tools/diff-processor/missing_test_output.tmpl | 11 ------- 6 files changed, 44 insertions(+), 43 deletions(-) delete mode 100644 tools/diff-processor/missing_test_output.tmpl diff --git a/.ci/magician/cmd/DIFF_COMMENT.md b/.ci/magician/cmd/DIFF_COMMENT.md index 99095de696bd..02a145bc402c 100644 --- a/.ci/magician/cmd/DIFF_COMMENT.md +++ b/.ci/magician/cmd/DIFF_COMMENT.md @@ -24,7 +24,22 @@ If you believe this detection to be incorrect please raise the concern with your If you intend to make this change you will need to wait for a [major release](https://www.terraform.io/plugin/sdkv2/best-practices/versioning#example-major-number-increments) window. An `override-breaking-change` label can be added to allow merging. {{end}} -{{.MissingTests}} + +{{if gt (len .MissingTests) 0}} +## Missing test report +Your PR includes resource fields which are not covered by any test. +{{ range $resourceName, $missingTestInfo := .MissingTests }} +Resource: `{{ $resourceName }}` ({{ len $missingTestInfo.Tests }} total tests) +Please add an acceptance test which includes these fields. The test should include the following: + +```hcl +{{ $missingTestInfo.SuggestedTest }} + +``` + +{{- end }} +{{end}} + {{- $errorsLength := len .Errors}} {{- if gt $errorsLength 0}} ## Errors diff --git a/.ci/magician/cmd/generate_comment.go b/.ci/magician/cmd/generate_comment.go index a8dccbf2435a..1422e582d17f 100644 --- a/.ci/magician/cmd/generate_comment.go +++ b/.ci/magician/cmd/generate_comment.go @@ -55,6 +55,11 @@ type BreakingChange struct { DocumentationReference string } +type MissingTestInfo struct { + SuggestedTest string + Tests []string +} + type Errors struct { Title string Errors []string @@ -64,7 +69,7 @@ type diffCommentData struct { PrNumber int Diffs []Diff BreakingChanges []BreakingChange - MissingTests string + MissingTests map[string]*MissingTestInfo Errors []Errors } @@ -465,17 +470,21 @@ func changedSchemaResources(diffProcessorPath string, rnr ExecRunner) ([]string, // Run the missing test detector and return the results. // Returns an empty string unless there are missing tests. // Error will be nil unless an error occurs during setup. -func detectMissingTests(diffProcessorPath, tpgbLocalPath string, rnr ExecRunner) (string, error) { +func detectMissingTests(diffProcessorPath, tpgbLocalPath string, rnr ExecRunner) (map[string]*MissingTestInfo, error) { if err := rnr.PushDir(diffProcessorPath); err != nil { - return "", err + return nil, err } output, err := rnr.Run("bin/diff-processor", []string{"detect-missing-tests", fmt.Sprintf("%s/google-beta/services", tpgbLocalPath)}, nil) if err != nil { - return "", err + return nil, err } - return output, rnr.PopDir() + var missingTests map[string]*MissingTestInfo + if err = json.Unmarshal([]byte(output), &missingTests); err != nil { + return nil, err + } + return missingTests, rnr.PopDir() } func formatDiffComment(data diffCommentData) (string, error) { diff --git a/.ci/magician/cmd/generate_comment_test.go b/.ci/magician/cmd/generate_comment_test.go index 91180c26369b..a246dac83b17 100644 --- a/.ci/magician/cmd/generate_comment_test.go +++ b/.ci/magician/cmd/generate_comment_test.go @@ -105,7 +105,7 @@ func TestExecGenerateComment(t *testing.T) { for method, expectedCalls := range map[string][][]any{ "PostBuildStatus": {{"123456", "terraform-provider-breaking-change-test", "success", "https://console.cloud.google.com/cloud-build/builds;region=global/build1;step=17?project=project1", "sha1"}}, - "PostComment": {{"123456", "Hi there, I'm the Modular magician. I've detected the following information about your changes:\n\n## Diff report\n\nYour PR generated some diffs in downstreams - here they are.\n\n`google` provider: [Diff](https://github.com/modular-magician/terraform-provider-google/compare/auto-pr-123456-old..auto-pr-123456) ( 2 files changed, 40 insertions(+))\n`google-beta` provider: [Diff](https://github.com/modular-magician/terraform-provider-google-beta/compare/auto-pr-123456-old..auto-pr-123456) ( 2 files changed, 40 insertions(+))\n`terraform-google-conversion`: [Diff](https://github.com/modular-magician/terraform-google-conversion/compare/auto-pr-123456-old..auto-pr-123456) ( 1 file changed, 10 insertions(+))\n\n## Missing test report\nYour PR includes resource fields which are not covered by any test.\n\nResource: `google_folder_access_approval_settings` (3 total tests)\nPlease add an acceptance test which includes these fields. The test should include the following:\n\n```hcl\nresource \"google_folder_access_approval_settings\" \"primary\" {\n uncovered_field = # value needed\n}\n\n```\n"}}, + "PostComment": {{"123456", "Hi there, I'm the Modular magician. I've detected the following information about your changes:\n\n## Diff report\n\nYour PR generated some diffs in downstreams - here they are.\n\n`google` provider: [Diff](https://github.com/modular-magician/terraform-provider-google/compare/auto-pr-123456-old..auto-pr-123456) ( 2 files changed, 40 insertions(+))\n`google-beta` provider: [Diff](https://github.com/modular-magician/terraform-provider-google-beta/compare/auto-pr-123456-old..auto-pr-123456) ( 2 files changed, 40 insertions(+))\n`terraform-google-conversion`: [Diff](https://github.com/modular-magician/terraform-google-conversion/compare/auto-pr-123456-old..auto-pr-123456) ( 1 file changed, 10 insertions(+))\n\n\n\n## Missing test report\nYour PR includes resource fields which are not covered by any test.\n\nResource: `google_folder_access_approval_settings` (3 total tests)\nPlease add an acceptance test which includes these fields. The test should include the following:\n\n```hcl\nresource \"google_folder_access_approval_settings\" \"primary\" {\n uncovered_field = # value needed\n}\n\n```\n"}}, "AddLabels": {{"123456", []string{"service/alloydb"}}}, } { if actualCalls, ok := gh.calledMethods[method]; !ok { @@ -214,7 +214,12 @@ func TestFormatDiffComment(t *testing.T) { }, "missing tests are displayed": { data: diffCommentData{ - MissingTests: "## Missing test report", + MissingTests: map[string]*MissingTestInfo{ + "resource": { + Tests: []string{"test-a", "test-b"}, + SuggestedTest: "x", + }, + }, }, expectedStrings: []string{ "## Diff report", diff --git a/.ci/magician/cmd/mock_runner_test.go b/.ci/magician/cmd/mock_runner_test.go index dd06fd0f2c8a..c3a1abccbde3 100644 --- a/.ci/magician/cmd/mock_runner_test.go +++ b/.ci/magician/cmd/mock_runner_test.go @@ -71,7 +71,7 @@ func NewMockRunner() MockRunner { "/mock/dir/magic-modules/tools/diff-processor bin/diff-processor [breaking-changes] map[]": "", "/mock/dir/magic-modules/tools/diff-processor make [build] " + sortedEnvString(diffProcessorEnv): "", "/mock/dir/magic-modules/tools/diff-processor bin/diff-processor [changed-schema-resources] map[]": "[\"google_alloydb_instance\"]", - "/mock/dir/magic-modules/tools/diff-processor bin/diff-processor [detect-missing-tests /mock/dir/tpgb/google-beta/services] map[]": "## Missing test report\nYour PR includes resource fields which are not covered by any test.\n\nResource: `google_folder_access_approval_settings` (3 total tests)\nPlease add an acceptance test which includes these fields. The test should include the following:\n\n```hcl\nresource \"google_folder_access_approval_settings\" \"primary\" {\n uncovered_field = # value needed\n}\n\n```\n", + "/mock/dir/magic-modules/tools/diff-processor bin/diff-processor [detect-missing-tests /mock/dir/tpgb/google-beta/services] map[]": `{"google_folder_access_approval_settings":{"SuggestedTest":"resource \"google_folder_access_approval_settings\" \"primary\" {\n uncovered_field = # value needed\n}","Tests":["a","b","c"]}}`, "/mock/dir/tgc git [diff origin/auto-pr-123456-old origin/auto-pr-123456 --shortstat] map[]": " 1 file changed, 10 insertions(+)\n", "/mock/dir/tgc git [fetch origin auto-pr-123456-old] map[]": "", "/mock/dir/tfoics git [diff origin/auto-pr-123456-old origin/auto-pr-123456 --shortstat] map[]": "", diff --git a/tools/diff-processor/cmd/detect_missing_tests.go b/tools/diff-processor/cmd/detect_missing_tests.go index 9417f767567d..044a7943d372 100644 --- a/tools/diff-processor/cmd/detect_missing_tests.go +++ b/tools/diff-processor/cmd/detect_missing_tests.go @@ -1,13 +1,13 @@ package cmd import ( + "encoding/json" newProvider "google/provider/new/google/provider" oldProvider "google/provider/old/google/provider" + "io" "fmt" "os" - "strings" - "text/template" "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/detector" "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" @@ -20,11 +20,13 @@ const detectMissingTestsDesc = "Run the missing test detector using the given se type detectMissingTestsOptions struct { rootOptions *rootOptions + stdout io.Writer } func newDetectMissingTestsCmd(rootOptions *rootOptions) *cobra.Command { o := &detectMissingTestsOptions{ rootOptions: rootOptions, + stdout: os.Stdout, } return &cobra.Command{ Use: "detect-missing-tests SERVICES_DIR", @@ -49,27 +51,8 @@ func (o *detectMissingTestsOptions) run(args []string) error { if err != nil { return fmt.Errorf("error detecting missing tests: %v", err) } - if len(missingTests) > 0 { - funcs := template.FuncMap{ - "join": strings.Join, - "backTickAll": func(ss []string) []string { - rs := make([]string, len(ss)) - for i, s := range ss { - rs[i] = fmt.Sprintf("`%s`", s) - } - return rs - }, - } - outputTemplate, err := template.New("missing_test_output.tmpl").Funcs(funcs).ParseFiles("missing_test_output.tmpl") - if err != nil { - return fmt.Errorf("Error parsing missing test template file: %s", err) - } - if err := outputTemplate.Execute(os.Stdout, missingTests); err != nil { - return fmt.Errorf("Error executing missing test output template: %s", err) - } - for resourceName, missingTestInfo := range missingTests { - glog.Infof("%s tests parsed: %v", resourceName, missingTestInfo.Tests) - } + if err := json.NewEncoder(o.stdout).Encode(missingTests); err != nil { + return fmt.Errorf("error encoding json: %w", err) } return nil } diff --git a/tools/diff-processor/missing_test_output.tmpl b/tools/diff-processor/missing_test_output.tmpl deleted file mode 100644 index dda934cc1117..000000000000 --- a/tools/diff-processor/missing_test_output.tmpl +++ /dev/null @@ -1,11 +0,0 @@ -## Missing test report -Your PR includes resource fields which are not covered by any test. -{{ range $resourceName, $missingTestInfo := . }} -Resource: `{{ $resourceName }}` ({{ len $missingTestInfo.Tests }} total tests) -Please add an acceptance test which includes these fields. The test should include the following: - -```hcl -{{ $missingTestInfo.SuggestedTest }} -``` - -{{- end }} From 9d4ef4860a8d1988cd2207e276a020c6508238da Mon Sep 17 00:00:00 2001 From: Nancy Hong Date: Thu, 6 Jun 2024 09:25:47 -0700 Subject: [PATCH 070/356] Update AlloyDB e2e tests and remove custom code for Public-IP feature (#10885) --- mmv1/products/alloydb/Instance.yaml | 1 - .../post_create/alloydb_instance.go.erb | 44 ------------------- .../pre_create/alloydb_instance.go.erb | 15 ------- .../alloydb/resource_alloydb_instance_test.go | 12 +++++ 4 files changed, 12 insertions(+), 60 deletions(-) delete mode 100644 mmv1/templates/terraform/post_create/alloydb_instance.go.erb diff --git a/mmv1/products/alloydb/Instance.yaml b/mmv1/products/alloydb/Instance.yaml index e5c0adb30934..cd216533012a 100644 --- a/mmv1/products/alloydb/Instance.yaml +++ b/mmv1/products/alloydb/Instance.yaml @@ -56,7 +56,6 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode custom_import: templates/terraform/custom_import/alloydb_instance.go.erb pre_create: templates/terraform/pre_create/alloydb_instance.go.erb pre_delete: templates/terraform/pre_delete/alloydb_instance.go.erb - post_create: templates/terraform/post_create/alloydb_instance.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'alloydb_instance_basic' diff --git a/mmv1/templates/terraform/post_create/alloydb_instance.go.erb b/mmv1/templates/terraform/post_create/alloydb_instance.go.erb deleted file mode 100644 index 5de3de716796..000000000000 --- a/mmv1/templates/terraform/post_create/alloydb_instance.go.erb +++ /dev/null @@ -1,44 +0,0 @@ -// If enablePublicIp is set to true, then we must create the instance first with -// it disabled then update to enable it. -networkConfigProp, err = expandAlloydbInstanceNetworkConfig(d.Get("network_config"), d, config) -if err != nil { - return err -} else if v, ok := d.GetOkExists("network_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(networkConfigProp)) && (ok || !reflect.DeepEqual(v, networkConfigProp)) { - nc := networkConfigProp.(map[string]interface{}) - if nc["enablePublicIp"] == true { - obj["networkConfig"] = networkConfigProp - - updateMask := []string{} - updateMask = append(updateMask, "networkConfig") - url, err := tpgresource.ReplaceVars(d, config, "{{AlloydbBasePath}}{{cluster}}/instances/{{instance_id}}") - if err != nil { - return err - } - url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) - if err != nil { - return err - } - - updateRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "PATCH", - Project: billingProject, - RawURL: url, - UserAgent: userAgent, - Body: obj, - Timeout: d.Timeout(schema.TimeoutUpdate), - }) - if err != nil { - return fmt.Errorf("Error updating the Instance to enable public ip: %s", err) - } else { - log.Printf("[DEBUG] Finished updating Instance to enable public ip %q: %#v", d.Id(), updateRes) - } - err = AlloydbOperationWaitTime( - config, updateRes, project, "Updating Instance", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - } -} diff --git a/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb b/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb index 75366c58bff8..ba3d97920cd4 100644 --- a/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb +++ b/mmv1/templates/terraform/pre_create/alloydb_instance.go.erb @@ -1,18 +1,3 @@ -// Temporarily remove the enablePublicIp field if it is set to true since the -// API prohibits creating instances with public IP enabled. -var nc map[string]interface{} -if obj["networkConfig"] == nil { - nc = make(map[string]interface{}) -} else { - nc = obj["networkConfig"].(map[string]interface{}) -} -if nc["enablePublicIp"] == true { - delete(nc, "enablePublicIp") - delete(nc, "authorizedExternalNetworks") -} -obj["networkConfig"] = nc - - // Read the config and call createsecondary api if instance_type is SECONDARY if instanceType := d.Get("instance_type"); instanceType == "SECONDARY" { diff --git a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go index f6475ce10bc0..8924c4d234d9 100644 --- a/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go +++ b/mmv1/third_party/terraform/services/alloydb/resource_alloydb_instance_test.go @@ -655,6 +655,9 @@ resource "google_alloydb_instance" "default" { cluster = google_alloydb_cluster.default.name instance_id = "tf-test-alloydb-instance%{random_suffix}" instance_type = "PRIMARY" + database_flags = { + "password.enforce_complexity" = "on" + } network_config { enable_public_ip = %{enable_public_ip} @@ -666,6 +669,9 @@ resource "google_alloydb_cluster" "default" { cluster_id = "tf-test-alloydb-cluster%{random_suffix}" location = "us-central1" network = data.google_compute_network.default.id + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } } data "google_project" "project" {} @@ -682,6 +688,9 @@ resource "google_alloydb_instance" "default" { cluster = google_alloydb_cluster.default.name instance_id = "tf-test-alloydb-instance%{random_suffix}" instance_type = "PRIMARY" + database_flags = { + "password.enforce_complexity" = "on" + } network_config { enable_public_ip = %{enable_public_ip} @@ -695,6 +704,9 @@ resource "google_alloydb_cluster" "default" { cluster_id = "tf-test-alloydb-cluster%{random_suffix}" location = "us-central1" network = data.google_compute_network.default.id + initial_user { + password = "tf-test-alloydb-cluster%{random_suffix}" + } } data "google_project" "project" {} From a4d43e6167451d6574c964d86cc889dab7c0bde6 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 6 Jun 2024 17:42:41 +0100 Subject: [PATCH 071/356] Make `google_compute_security_policy`'s `rule_visibility` optional + computed and remove default value (#10823) --- .../resource_compute_security_policy.go.erb | 2 +- ...source_compute_security_policy_test.go.erb | 63 +++++++++++++++++-- 2 files changed, 60 insertions(+), 5 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb index 34bc756a864c..3add4c9093d7 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb @@ -496,7 +496,7 @@ func ResourceComputeSecurityPolicy() *schema.Resource { "rule_visibility": { Type: schema.TypeString, Optional: true, - Default: "STANDARD", + Computed: true, ValidateFunc: validation.StringInSlice([]string{"STANDARD", "PREMIUM"}, false), Description: `Rule visibility. Supported values include: "STANDARD", "PREMIUM".`, }, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb index 8912053cfc53..0e9fdd517043 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb @@ -239,7 +239,7 @@ func TestAccComputeSecurityPolicy_withAdaptiveProtection(t *testing.T) { CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccComputeSecurityPolicy_withAdaptiveProtection(spName), + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_enabled(spName), }, { ResourceName: "google_compute_security_policy.policy", @@ -247,7 +247,7 @@ func TestAccComputeSecurityPolicy_withAdaptiveProtection(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccComputeSecurityPolicy_withAdaptiveProtectionUpdate(spName), + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_update(spName), }, { ResourceName: "google_compute_security_policy.policy", @@ -258,6 +258,37 @@ func TestAccComputeSecurityPolicy_withAdaptiveProtection(t *testing.T) { }) } +func TestAccComputeSecurityPolicy_withoutAdaptiveProtection(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Can create with layer 7 protection disabled + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_disabled(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + // Can update to layer 7 protection enabled + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_enabled(spName), + }, + { + // Can update to layer 7 protection disabled again + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_disabled(spName), + }, + }, + }) +} + <% unless version == 'ga' -%> func TestAccComputeSecurityPolicy_withAdaptiveProtectionAutoDeployConfig(t *testing.T) { t.Parallel() @@ -1192,7 +1223,31 @@ resource "google_compute_security_policy" "policy" { `, spName) } -func testAccComputeSecurityPolicy_withAdaptiveProtection(spName string) string { +func testAccComputeSecurityPolicy_withoutAdaptiveProtection(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdaptiveProtection_disabled(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + adaptive_protection_config { + layer_7_ddos_defense_config { + enable = false + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdaptiveProtection_enabled(spName string) string { return fmt.Sprintf(` resource "google_compute_security_policy" "policy" { name = "%s" @@ -1208,7 +1263,7 @@ resource "google_compute_security_policy" "policy" { `, spName) } -func testAccComputeSecurityPolicy_withAdaptiveProtectionUpdate(spName string) string { +func testAccComputeSecurityPolicy_withAdaptiveProtection_update(spName string) string { return fmt.Sprintf(` resource "google_compute_security_policy" "policy" { name = "%s" From 3b8006dd9cc03d295833cf83a0ebdb4a079ffb6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20=27Cherit=27=20Sz=C3=B3stak?= Date: Thu, 6 Jun 2024 19:32:53 +0200 Subject: [PATCH 072/356] Add the beta support for Port Mapping NEG and endpoint (#10867) --- .../compute/RegionNetworkEndpoint.yaml | 28 +++++ .../compute/RegionNetworkEndpointGroup.yaml | 12 +- ...gion_network_endpoint_group_portmap.tf.erb | 22 ++++ .../region_network_endpoint_portmap.tf.erb | 58 ++++++++++ .../compute_region_network_endpoint.go.erb | 22 +++- ...ompute_region_network_endpoint_test.go.erb | 104 ++++++++++++++++++ 6 files changed, 244 insertions(+), 2 deletions(-) create mode 100644 mmv1/templates/terraform/examples/region_network_endpoint_group_portmap.tf.erb create mode 100644 mmv1/templates/terraform/examples/region_network_endpoint_portmap.tf.erb diff --git a/mmv1/products/compute/RegionNetworkEndpoint.yaml b/mmv1/products/compute/RegionNetworkEndpoint.yaml index 81d31f7e1f6e..9350e1fa0437 100644 --- a/mmv1/products/compute/RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/RegionNetworkEndpoint.yaml @@ -73,6 +73,20 @@ examples: vars: neg_name: 'fqdn-port-neg' network_name: 'network' + - !ruby/object:Provider::Terraform::Examples + name: 'region_network_endpoint_portmap' + primary_resource_id: 'region_network_endpoint_portmap' + min_version: 'beta' + # Fine-grained resource need different autogenerated tests, as + # we need to check destroy during a test step where the parent resource + # still exists, rather than during CheckDestroy (when read returns + # nothing because the parent resource has then also been destroyed) + skip_test: true + vars: + network_name: 'network' + subnetwork_name: 'subnetwork' + instance_name: 'instance' + neg_name: 'portmap-neg' custom_code: !ruby/object:Provider::Terraform::CustomCode pre_delete: templates/terraform/pre_delete/compute_region_network_endpoint.go.erb custom_import: templates/terraform/custom_import/compute_region_network_endpoint.go.erb @@ -123,3 +137,17 @@ properties: Fully qualified domain name of network endpoint. This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT. + - !ruby/object:Api::Type::Integer + name: 'clientDestinationPort' + custom_flatten: templates/terraform/custom_flatten/float64_to_int.go.erb + description: | + Client destination port for the `GCE_VM_IP_PORTMAP` NEG. + min_version: 'beta' + - !ruby/object:Api::Type::ResourceRef + name: 'instance' + resource: 'Instance' + imports: 'name' + min_version: 'beta' + description: | + The name for a specific VM instance that the IP address belongs to. + This is required for network endpoints of type GCE_VM_IP_PORTMAP. diff --git a/mmv1/products/compute/RegionNetworkEndpointGroup.yaml b/mmv1/products/compute/RegionNetworkEndpointGroup.yaml index 177ce9001bf1..7a9ac6a9376b 100644 --- a/mmv1/products/compute/RegionNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/RegionNetworkEndpointGroup.yaml @@ -24,7 +24,8 @@ references: !ruby/object:Api::Resource::ReferenceLinks 'Internet NEGs Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/internet-neg-concepts' api: 'https://cloud.google.com/compute/docs/reference/rest/beta/regionNetworkEndpointGroups' description: | - A regional NEG that can support Serverless Products and proxying traffic to external backends. + A regional NEG that can support Serverless Products, proxying traffic to + external backends and providing traffic to the PSC port mapping endpoints. Recreating a region network endpoint group that's in use by another resource will give a `resourceInUseByAnotherResource` error. Use `lifecycle.create_before_destroy` @@ -102,6 +103,14 @@ examples: vars: neg_name: 'ip-port-neg' network_name: 'network' + - !ruby/object:Provider::Terraform::Examples + name: 'region_network_endpoint_group_portmap' + primary_resource_id: 'region_network_endpoint_group_portmap' + min_version: 'beta' + vars: + network_name: 'network' + subnetwork_name: 'subnetwork' + neg_name: 'portmap-neg' parameters: - !ruby/object:Api::Type::ResourceRef name: 'region' @@ -139,6 +148,7 @@ properties: - :PRIVATE_SERVICE_CONNECT - :INTERNET_IP_PORT - :INTERNET_FQDN_PORT + - :GCE_VM_IP_PORTMAP default_value: :SERVERLESS - !ruby/object:Api::Type::String name: 'pscTargetService' diff --git a/mmv1/templates/terraform/examples/region_network_endpoint_group_portmap.tf.erb b/mmv1/templates/terraform/examples/region_network_endpoint_group_portmap.tf.erb new file mode 100644 index 000000000000..f44bd8ee4c81 --- /dev/null +++ b/mmv1/templates/terraform/examples/region_network_endpoint_group_portmap.tf.erb @@ -0,0 +1,22 @@ +resource "google_compute_region_network_endpoint_group" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['neg_name'] %>" + region = "us-central1" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + + network_endpoint_type = "GCE_VM_IP_PORTMAP" + provider = google-beta +} + +resource "google_compute_network" "default" { + name = "<%= ctx[:vars]['network_name'] %>" + provider = google-beta +} + +resource "google_compute_subnetwork" "default" { + name = "<%= ctx[:vars]['subnetwork_name'] %>" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id + provider = google-beta +} diff --git a/mmv1/templates/terraform/examples/region_network_endpoint_portmap.tf.erb b/mmv1/templates/terraform/examples/region_network_endpoint_portmap.tf.erb new file mode 100644 index 000000000000..f39e45d604d4 --- /dev/null +++ b/mmv1/templates/terraform/examples/region_network_endpoint_portmap.tf.erb @@ -0,0 +1,58 @@ +resource "google_compute_network" "default" { + name = "<%= ctx[:vars]['network_name'] %>" + auto_create_subnetworks = false + provider = google-beta +} + +resource "google_compute_subnetwork" "default" { + name = "<%= ctx[:vars]['subnetwork_name'] %>" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id + provider = google-beta +} + +resource "google_compute_region_network_endpoint_group" default { + name = "<%= ctx[:vars]['neg_name'] %>" + region = "us-central1" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + + network_endpoint_type = "GCE_VM_IP_PORTMAP" + provider = google-beta +} + +resource "google_compute_region_network_endpoint" "<%= ctx[:primary_resource_id] %>" { + region_network_endpoint_group = google_compute_region_network_endpoint_group.default.name + region = "us-central1" + instance = google_compute_instance.default.self_link + port = 80 + ip_address = google_compute_instance.default.network_interface[0].network_ip + client_destination_port = 8080 + provider = google-beta +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + provider = google-beta +} + +resource "google_compute_instance" "default" { + name = "<%= ctx[:vars]['instance_name'] %>" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.id + access_config { + } + } + provider = google-beta +} diff --git a/mmv1/templates/terraform/pre_delete/compute_region_network_endpoint.go.erb b/mmv1/templates/terraform/pre_delete/compute_region_network_endpoint.go.erb index 732a63059365..3a873c959b80 100644 --- a/mmv1/templates/terraform/pre_delete/compute_region_network_endpoint.go.erb +++ b/mmv1/templates/terraform/pre_delete/compute_region_network_endpoint.go.erb @@ -27,6 +27,26 @@ if fqdnProp != "" { toDelete["fqdn"] = fqdnProp } +<% unless version == 'ga' -%> +// Instance +instanceProp, err := expandNestedComputeRegionNetworkEndpointInstance(d.Get("instance"), d, config) +if err != nil { + return err +} +if instanceProp != "" { + toDelete["instance"] = instanceProp +} + +// Client Destination Port +clientDestinationPortProp, err := expandNestedComputeRegionNetworkEndpointClientDestinationPort(d.Get("client_destination_port"), d, config) +if err != nil { + return err +} +if clientDestinationPortProp != "" && d.Get("client_destination_port").(int) > 0 { + toDelete["clientDestinationPort"] = clientDestinationPortProp +} +<% end -%> + obj = map[string]interface{}{ "networkEndpoints": []map[string]interface{}{toDelete}, -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_network_endpoint_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_network_endpoint_test.go.erb index 3d853a6e6148..abbeaf7a2dc3 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_network_endpoint_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_network_endpoint_test.go.erb @@ -144,6 +144,110 @@ resource "google_compute_region_network_endpoint" "add2" { `, context) + testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context) } +<% unless version == 'ga' -%> + +func TestAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + negId := fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/tf-test-portmap-neg%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapExample(context), + }, + { + ResourceName: "google_compute_region_network_endpoint.region_network_endpoint_portmap", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "region", "region_network_endpoint_group"}, + }, + { + // Delete all endpoints + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapNoEndpointExample(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionNetworkEndpointWithPortsDestroyed(t, negId, "80"), + ), + }, + }, + }) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapNoEndpointExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "network%{random_suffix}" + auto_create_subnetworks = false + provider = google-beta +} + +resource "google_compute_subnetwork" "default" { + name = "subnetwork%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id + provider = google-beta +} + +resource "google_compute_region_network_endpoint_group" default { + name = "tf-test-portmap-neg%{random_suffix}" + region = "us-central1" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + + network_endpoint_type = "GCE_VM_IP_PORTMAP" + provider = google-beta +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + provider = google-beta +} + +resource "google_compute_instance" "default" { + name = "instance%{random_suffix}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.id + access_config { + } + } + provider = google-beta +} +`, context) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_network_endpoint" "region_network_endpoint_portmap" { + region_network_endpoint_group = google_compute_region_network_endpoint_group.default.name + region = "us-central1" + instance = google_compute_instance.default.self_link + port = 80 + ip_address = google_compute_instance.default.network_interface[0].network_ip + client_destination_port = 8080 + provider = google-beta +} +`, context) + testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapNoEndpointExample(context) +} +<% end -%> + func testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_region_network_endpoint_group" "neg" { From 897bb5f010d0911cb98406905b8734adbbe724c4 Mon Sep 17 00:00:00 2001 From: kkram01 Date: Fri, 7 Jun 2024 00:57:51 +0530 Subject: [PATCH 073/356] promote optimized field to GA (#10802) --- mmv1/products/vertexai/FeatureOnlineStore.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mmv1/products/vertexai/FeatureOnlineStore.yaml b/mmv1/products/vertexai/FeatureOnlineStore.yaml index d74e81bcf129..702626b7a1ad 100644 --- a/mmv1/products/vertexai/FeatureOnlineStore.yaml +++ b/mmv1/products/vertexai/FeatureOnlineStore.yaml @@ -145,7 +145,6 @@ properties: exactly_one_of: - bigtable - optimized - min_version: beta properties: [] # Meant to be an empty object with no properties - see here : https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.featureOnlineStores#Optimized # The fields below are necessary to include the "Optimized" transformation in the payload @@ -187,10 +186,13 @@ properties: - !ruby/object:Api::Type::NestedObject name: embeddingManagement description: | - The settings for embedding management in FeatureOnlineStore. Embedding management can only be used with BigTable. + The settings for embedding management in FeatureOnlineStore. Embedding management can only be set for BigTable. It is enabled by default for optimized storagetype. conflicts: - optimized min_version: beta + default_from_api: true + deprecation_message: >- + `embedding_management` is deprecated. This field is no longer needed anymore and embedding management is automatically enabled when specifying Optimized storage type properties: - !ruby/object:Api::Type::Boolean name: enabled From 8243ff70fc450398c422eed70cc747cb44bb20ce Mon Sep 17 00:00:00 2001 From: Salome Papiashvili Date: Thu, 6 Jun 2024 23:32:36 +0200 Subject: [PATCH 074/356] Adding Datasource: google_composer_user_workloads_secret (#10615) Co-authored-by: Stephen Lewis (Burrows) --- .../provider/provider_mmv1_resources.go.erb | 3 + ...ogle_composer_user_workloads_secret.go.erb | 64 +++++++++++ ...composer_user_workloads_secret_test.go.erb | 103 ++++++++++++++++++ ...mposer_user_workloads_secret.html.markdown | 60 ++++++++++ 4 files changed, 230 insertions(+) create mode 100644 mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret.go.erb create mode 100644 mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret_test.go.erb create mode 100644 mmv1/third_party/terraform/website/docs/d/composer_user_workloads_secret.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index b915b63e8e73..548737cf28c1 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -58,6 +58,9 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_cloud_run_v2_job": cloudrunv2.DataSourceGoogleCloudRunV2Job(), "google_cloud_run_v2_service": cloudrunv2.DataSourceGoogleCloudRunV2Service(), "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), + <% unless version == 'ga' -%> + "google_composer_user_workloads_secret": composer.DataSourceGoogleComposerUserWorkloadsSecret(), + <% end -%> "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), "google_compute_address": compute.DataSourceGoogleComputeAddress(), "google_compute_addresses": compute.DataSourceGoogleComputeAddresses(), diff --git a/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret.go.erb b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret.go.erb new file mode 100644 index 000000000000..99741363d83d --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret.go.erb @@ -0,0 +1,64 @@ +<% autogen_exception -%> +package composer + +<% unless version == 'ga' -%> +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComposerUserWorkloadsSecret() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComposerUserWorkloadsSecret().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "environment", "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleComposerUserWorkloadsSecretRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComposerUserWorkloadsSecretRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsSecrets/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // retrieve "data" in advance, because Read function won't do it. + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Get(id).Do() + if err != nil { + return err + } + + if err := d.Set("data", res.Data); err != nil { + return fmt.Errorf("Error setting UserWorkloadsSecret Data: %s", err) + } + + err = resourceComposerUserWorkloadsSecretRead(d, meta) + if err != nil { + return err + } + + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} +<% end -%> diff --git a/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret_test.go.erb b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret_test.go.erb new file mode 100644 index 000000000000..22535b32eb17 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_secret_test.go.erb @@ -0,0 +1,103 @@ +<% autogen_exception -%> +package composer_test + +<% unless version == 'ga' -%> +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceComposerUserWorkloadsSecret_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "env_name": fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)), + "secret_name": fmt.Sprintf("%s-%d", testComposerUserWorkloadsSecretPrefix, acctest.RandInt(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComposerUserWorkloadsSecret_basic(context), + Check: resource.ComposeTestCheckFunc( + checkSecretDataSourceMatchesResource(), + ), + }, + }, + }) +} + +func checkSecretDataSourceMatchesResource() resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources["data.google_composer_user_workloads_secret.test"] + if !ok { + return fmt.Errorf("can't find %s in state", "data.google_composer_user_workloads_secret.test") + } + rs, ok := s.RootModule().Resources["google_composer_user_workloads_secret.test"] + if !ok { + return fmt.Errorf("can't find %s in state", "google_composer_user_workloads_secret.test") + } + + dsAttr := ds.Primary.Attributes + rsAttr := rs.Primary.Attributes + errMsg := "" + + for k := range rsAttr { + if k == "%" || k == "data.%" { + continue + } + // ignore diff if it's due to secrets being masked. + if strings.HasPrefix(k, "data.") { + if _, ok := dsAttr[k]; !ok{ + errMsg += fmt.Sprintf("%s is defined in resource and not in datasource\n", k) + } + if dsAttr[k] == "**********" { + continue + } + } + if dsAttr[k] != rsAttr[k] { + errMsg += fmt.Sprintf("%s is %s; want %s\n", k, dsAttr[k], rsAttr[k]) + } + } + + if errMsg != "" { + return errors.New(errMsg) + } + + return nil + } +} + +func testAccDataSourceComposerUserWorkloadsSecret_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "test" { + name = "%{env_name}" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_secret" "test" { + environment = google_composer_environment.test.name + name = "%{secret_name}" + data = { + username: base64encode("username"), + password: base64encode("password"), + } +} +data "google_composer_user_workloads_secret" "test" { + name = google_composer_user_workloads_secret.test.name + environment = google_composer_environment.test.name +} +`, context) +} +<% end -%> diff --git a/mmv1/third_party/terraform/website/docs/d/composer_user_workloads_secret.html.markdown b/mmv1/third_party/terraform/website/docs/d/composer_user_workloads_secret.html.markdown new file mode 100644 index 000000000000..8eea28a0f52f --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/composer_user_workloads_secret.html.markdown @@ -0,0 +1,60 @@ +--- +subcategory: "Cloud Composer" +description: |- + User workloads Secret used by Airflow tasks that run with Kubernetes Executor or KubernetesPodOperator. +--- + +# google\_composer\_user\_workloads\_secret + +Provides access to Kubernetes Secret configuration for a given project, region and Composer Environment. + +~> **Warning:** This data source is in beta, and should be used with the terraform-provider-google-beta provider. +See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. + +## Example Usage + +```hcl +resource "google_composer_environment" "example" { + name = "example-environment" + config{ + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_secret" "example" { + environment = google_composer_environment.example.name + name = "example-secret" + data = { + username: base64encode("username"), + password: base64encode("password"), + } +} + +data "google_composer_user_workloads_secret" "example" { + environment = google_composer_environment.example.name + name = resource.google_composer_user_workloads_secret.example.name +} + +output "debug" { + value = data.google_composer_user_workloads_secret.example +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Name of the Secret. + +* `environment` - (Required) Environment where the Secret is stored. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + +* `region` - (Optional) The location or Compute Engine region of the environment. + +## Attributes Reference + +See [google_composer_user_workloads_secret](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/composer_user_workloads_secret) resource for details of the available attributes. From 04e7ef69f568eee1f5b8428fac8b171e1c447855 Mon Sep 17 00:00:00 2001 From: Dario Date: Thu, 6 Jun 2024 23:35:29 +0200 Subject: [PATCH 075/356] [discoveryengine] Add documentProcessingConfig field to DataStore resource (#10765) --- mmv1/products/discoveryengine/DataStore.yaml | 100 ++++++++++++++++++ ...atastore_document_processing_config.tf.erb | 20 ++++ ...tore_document_processing_config_ocr.tf.erb | 16 +++ 3 files changed, 136 insertions(+) create mode 100644 mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config.tf.erb create mode 100644 mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config_ocr.tf.erb diff --git a/mmv1/products/discoveryengine/DataStore.yaml b/mmv1/products/discoveryengine/DataStore.yaml index fad7d3e84d21..ecbc57dcbf89 100644 --- a/mmv1/products/discoveryengine/DataStore.yaml +++ b/mmv1/products/discoveryengine/DataStore.yaml @@ -63,6 +63,21 @@ examples: 'fmt.Sprintf("tf_test_data_store%s", context["random_suffix"])' vars: data_store_id: "data-store-id" + - !ruby/object:Provider::Terraform::Examples + name: "discoveryengine_datastore_document_processing_config" + primary_resource_id: 'document_processing_config' + primary_resource_name: + 'fmt.Sprintf("tf_test_data_store%s", context["random_suffix"])' + vars: + data_store_id: "data-store-id" + - !ruby/object:Provider::Terraform::Examples + name: "discoveryengine_datastore_document_processing_config_ocr" + skip_docs: true + primary_resource_id: 'document_processing_config_ocr' + primary_resource_name: + 'fmt.Sprintf("tf_test_data_store%s", context["random_suffix"])' + vars: + data_store_id: "data-store-id" parameters: - !ruby/object:Api::Type::String name: 'location' @@ -140,6 +155,91 @@ properties: - :PUBLIC_WEBSITE immutable: true required: true + - !ruby/object:Api::Type::NestedObject + name: 'documentProcessingConfig' + immutable: true + description: | + Configuration for Document understanding and enrichment. + required: false + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The full resource name of the Document Processing Config. Format: + `projects/{project}/locations/{location}/collections/{collection_id}/dataStores/{data_store_id}/documentProcessingConfig`. + output: true + required: false + - !ruby/object:Api::Type::NestedObject + name: 'defaultParsingConfig' + description: | + Configurations for default Document parser. If not specified, this resource + will be configured to use a default DigitalParsingConfig, and the default parsing + config will be applied to all file types for Document parsing. + required: false + properties: + - !ruby/object:Api::Type::NestedObject + name: 'digitalParsingConfig' + allow_empty_object: true + send_empty_value: true + exactly_one_of: + - default_parsing_config.0.digital_parsing_config + - default_parsing_config.0.ocr_parsing_config + description: | + Configurations applied to digital parser. + required: false + properties: [] + - !ruby/object:Api::Type::NestedObject + name: 'ocrParsingConfig' + exactly_one_of: + - default_parsing_config.0.digital_parsing_config + - default_parsing_config.0.ocr_parsing_config + description: | + Configurations applied to OCR parser. Currently it only applies to PDFs. + required: false + properties: + - !ruby/object:Api::Type::Boolean + name: 'useNativeText' + required: false + description: | + If true, will use native text instead of OCR text on pages containing native text. + + - !ruby/object:Api::Type::Map + name: 'parsingConfigOverrides' + description: | + Map from file type to override the default parsing configuration based on the file type. Supported keys: + * `pdf`: Override parsing config for PDF files, either digital parsing, ocr parsing or layout parsing is supported. + * `html`: Override parsing config for HTML files, only digital parsing and or layout parsing are supported. + * `docx`: Override parsing config for DOCX files, only digital parsing and or layout parsing are supported. + key_name: file_type + + value_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: 'digitalParsingConfig' + allow_empty_object: true + send_empty_value: true + exactly_one_of: + - default_parsing_config.0.digital_parsing_config + - default_parsing_config.0.ocr_parsing_config + description: | + Configurations applied to digital parser. + required: false + properties: [] + - !ruby/object:Api::Type::NestedObject + name: 'ocrParsingConfig' + exactly_one_of: + - default_parsing_config.0.digital_parsing_config + - default_parsing_config.0.ocr_parsing_config + description: | + Configurations applied to OCR parser. Currently it only applies to PDFs. + required: false + properties: + - !ruby/object:Api::Type::Boolean + name: 'useNativeText' + required: false + description: | + If true, will use native text instead of OCR text on pages containing native text. + - !ruby/object:Api::Type::Time name: "createTime" description: | diff --git a/mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config.tf.erb b/mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config.tf.erb new file mode 100644 index 000000000000..a43c68fa3374 --- /dev/null +++ b/mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config.tf.erb @@ -0,0 +1,20 @@ +resource "google_discovery_engine_data_store" "document_processing_config" { + location = "global" + data_store_id = "<%= ctx[:vars]['data_store_id'] %>" + display_name = "tf-test-structured-datastore" + industry_vertical = "GENERIC" + content_config = "NO_CONTENT" + solution_types = ["SOLUTION_TYPE_SEARCH"] + create_advanced_site_search = false + document_processing_config { + default_parsing_config { + digital_parsing_config {} + } + parsing_config_overrides { + file_type = "pdf" + ocr_parsing_config { + use_native_text = true + } + } + } +} diff --git a/mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config_ocr.tf.erb b/mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config_ocr.tf.erb new file mode 100644 index 000000000000..82c76e4ec2a8 --- /dev/null +++ b/mmv1/templates/terraform/examples/discoveryengine_datastore_document_processing_config_ocr.tf.erb @@ -0,0 +1,16 @@ +resource "google_discovery_engine_data_store" "document_processing_config_ocr" { + location = "global" + data_store_id = "<%= ctx[:vars]['data_store_id'] %>" + display_name = "tf-test-structured-datastore" + industry_vertical = "GENERIC" + content_config = "NO_CONTENT" + solution_types = ["SOLUTION_TYPE_SEARCH"] + create_advanced_site_search = false + document_processing_config { + default_parsing_config { + ocr_parsing_config { + use_native_text = true + } + } + } +} From 7a4b38409139d9806df7dfaa9f29012b17c461ca Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Thu, 6 Jun 2024 15:28:28 -0700 Subject: [PATCH 076/356] Make magician subcommands use RunE and return errors instead of calling os.Exit directly (#10891) --- .ci/magician/cmd/check_cassettes.go | 31 +++----- .ci/magician/cmd/community_checker.go | 14 ++-- .ci/magician/cmd/generate_comment.go | 25 +++---- .ci/magician/cmd/generate_downstream.go | 50 +++++-------- .ci/magician/cmd/membership_checker.go | 17 ++--- .ci/magician/cmd/request_reviewer.go | 25 +++---- .ci/magician/cmd/request_service_reviewers.go | 27 +++---- .ci/magician/cmd/test_terraform_vcr.go | 75 +++++++------------ .ci/magician/cmd/test_tgc.go | 13 ++-- .ci/magician/cmd/test_tgc_integration.go | 32 ++++---- .ci/magician/cmd/test_tpg.go | 16 ++-- 11 files changed, 128 insertions(+), 197 deletions(-) diff --git a/.ci/magician/cmd/check_cassettes.go b/.ci/magician/cmd/check_cassettes.go index e19f3f7c9ef3..87e8cf746765 100644 --- a/.ci/magician/cmd/check_cassettes.go +++ b/.ci/magician/cmd/check_cassettes.go @@ -43,37 +43,33 @@ var checkCassettesCmd = &cobra.Command{ ` + listCCEnvironmentVariables() + ` It prints a list of tests that failed in replaying mode along with all test output.`, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { env := make(map[string]string, len(ccEnvironmentVariables)) for _, ev := range ccEnvironmentVariables { val, ok := os.LookupEnv(ev) if !ok { - fmt.Printf("Did not provide %s environment variable\n", ev) - os.Exit(1) + return fmt.Errorf("did not provide %s environment variable", ev) } env[ev] = val } githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_DOWNSTREAMS") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_DOWNSTREAMS or GITHUB_TOKEN environment variables") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_DOWNSTREAMS or GITHUB_TOKEN environment variables") } rnr, err := exec.NewRunner() if err != nil { - fmt.Println("Error creating Runner: ", err) - os.Exit(1) + return fmt.Errorf("error creating Runner: %w", err) } ctlr := source.NewController(env["GOPATH"], "modular-magician", githubToken, rnr) vt, err := vcr.NewTester(env, rnr) if err != nil { - fmt.Println("Error creating VCR tester: ", err) - os.Exit(1) + return fmt.Errorf("error creating VCR tester: %w", err) } - execCheckCassettes(env["COMMIT_SHA"], vt, ctlr) + return execCheckCassettes(env["COMMIT_SHA"], vt, ctlr) }, } @@ -93,10 +89,9 @@ func listCCEnvironmentVariables() string { return result } -func execCheckCassettes(commit string, vt *vcr.Tester, ctlr *source.Controller) { +func execCheckCassettes(commit string, vt *vcr.Tester, ctlr *source.Controller) error { if err := vt.FetchCassettes(provider.Beta, "main", ""); err != nil { - fmt.Println("Error fetching cassettes: ", err) - os.Exit(1) + return fmt.Errorf("error fetching cassettes: %w", err) } providerRepo := &source.Repo{ @@ -105,8 +100,7 @@ func execCheckCassettes(commit string, vt *vcr.Tester, ctlr *source.Controller) } ctlr.SetPath(providerRepo) if err := ctlr.Clone(providerRepo); err != nil { - fmt.Println("Error cloning provider: ", err) - os.Exit(1) + return fmt.Errorf("error cloning provider: %w", err) } vt.SetRepoPath(provider.Beta, providerRepo.Path) @@ -115,8 +109,7 @@ func execCheckCassettes(commit string, vt *vcr.Tester, ctlr *source.Controller) fmt.Println("Error running VCR: ", err) } if err := vt.UploadLogs("vcr-check-cassettes", "", "", false, false, vcr.Replaying, provider.Beta); err != nil { - fmt.Println("Error uploading logs: ", err) - os.Exit(1) + return fmt.Errorf("error uploading logs: %w", err) } fmt.Println(len(result.FailedTests), " failed tests: ", result.FailedTests) // TODO(trodge) report these failures to bigquery @@ -124,9 +117,9 @@ func execCheckCassettes(commit string, vt *vcr.Tester, ctlr *source.Controller) fmt.Println(len(result.SkippedTests), " skipped tests: ", result.SkippedTests) if err := vt.Cleanup(); err != nil { - fmt.Println("Error cleaning up vcr tester: ", err) - os.Exit(1) + return fmt.Errorf("error cleaning up vcr tester: %w", err) } + return nil } func init() { diff --git a/.ci/magician/cmd/community_checker.go b/.ci/magician/cmd/community_checker.go index 2834009a5ebb..f86aabdfba58 100644 --- a/.ci/magician/cmd/community_checker.go +++ b/.ci/magician/cmd/community_checker.go @@ -19,7 +19,6 @@ import ( "fmt" "magician/cloudbuild" "magician/github" - "os" "github.com/spf13/cobra" ) @@ -42,7 +41,7 @@ var communityApprovalCmd = &cobra.Command{ 1. Trigger cloud presubmits with specific substitutions for the PR. 2. Remove the 'awaiting-approval' label from the PR. `, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { prNumber := args[0] fmt.Println("PR Number: ", prNumber) @@ -63,16 +62,15 @@ var communityApprovalCmd = &cobra.Command{ githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") } gh := github.NewClient(githubToken) cb := cloudbuild.NewClient() - execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) + return execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch, gh, cb) }, } -func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh GithubClient, cb CloudbuildClient) { +func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBranch, baseBranch string, gh GithubClient, cb CloudbuildClient) error { substitutions := map[string]string{ "BRANCH_NAME": branchName, "_PR_NUMBER": prNumber, @@ -85,13 +83,13 @@ func execCommunityChecker(prNumber, commitSha, branchName, headRepoUrl, headBran // (explicitly or via membership-checker) err := cb.TriggerMMPresubmitRuns(commitSha, substitutions) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } // in community-checker job: // remove awaiting-approval label from external contributor PRs gh.RemoveLabel(prNumber, "awaiting-approval") + return nil } func init() { diff --git a/.ci/magician/cmd/generate_comment.go b/.ci/magician/cmd/generate_comment.go index 1422e582d17f..68ef979a882e 100644 --- a/.ci/magician/cmd/generate_comment.go +++ b/.ci/magician/cmd/generate_comment.go @@ -102,13 +102,12 @@ var generateCommentCmd = &cobra.Command{ 5. Report the results in a PR comment. 6. Run unit tests for the missing test detector. `, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { env := make(map[string]string, len(gcEnvironmentVariables)) for _, ev := range gcEnvironmentVariables { val, ok := os.LookupEnv(ev) if !ok { - fmt.Printf("Did not provide %s environment variable\n", ev) - os.Exit(1) + return fmt.Errorf("did not provide %s environment variable", ev) } env[ev] = val } @@ -116,24 +115,21 @@ var generateCommentCmd = &cobra.Command{ for _, tokenName := range []string{"GITHUB_TOKEN_DOWNSTREAMS", "GITHUB_TOKEN_MAGIC_MODULES"} { val, ok := lookupGithubTokenOrFallback(tokenName) if !ok { - fmt.Printf("Did not provide %s or GITHUB_TOKEN environment variable\n", tokenName) - os.Exit(1) + return fmt.Errorf("did not provide %s or GITHUB_TOKEN environment variable", tokenName) } env[tokenName] = val } gh := github.NewClient(env["GITHUB_TOKEN_MAGIC_MODULES"]) rnr, err := exec.NewRunner() if err != nil { - fmt.Println("Error creating a runner: ", err) - os.Exit(1) + return fmt.Errorf("error creating a runner: %w", err) } ctlr := source.NewController(filepath.Join("workspace", "go"), "modular-magician", env["GITHUB_TOKEN_DOWNSTREAMS"], rnr) prNumber, err := strconv.Atoi(env["PR_NUMBER"]) if err != nil { - fmt.Println("Error parsing PR_NUMBER: ", err) - os.Exit(1) + return fmt.Errorf("error parsing PR_NUMBER: %w", err) } - execGenerateComment( + return execGenerateComment( prNumber, env["GITHUB_TOKEN_MAGIC_MODULES"], env["BUILD_ID"], @@ -155,7 +151,7 @@ func listGCEnvironmentVariables() string { return result } -func execGenerateComment(prNumber int, ghTokenMagicModules, buildId, buildStep, projectId, commitSha string, gh GithubClient, rnr ExecRunner, ctlr *source.Controller) { +func execGenerateComment(prNumber int, ghTokenMagicModules, buildId, buildStep, projectId, commitSha string, gh GithubClient, rnr ExecRunner, ctlr *source.Controller) error { errors := map[string][]string{"Other": []string{}} pullRequest, err := gh.GetPullRequest(strconv.Itoa(prNumber)) @@ -392,15 +388,14 @@ func execGenerateComment(prNumber int, ghTokenMagicModules, buildId, buildStep, // Post diff comment message, err := formatDiffComment(data) if err != nil { - fmt.Println("Error formatting message: ", err) fmt.Printf("Data: %v\n", data) - os.Exit(1) + return fmt.Errorf("error formatting message: %w", err) } if err := gh.PostComment(strconv.Itoa(prNumber), message); err != nil { - fmt.Printf("Error posting comment to PR %d: %v\n", prNumber, err) fmt.Println("Comment: ", message) - os.Exit(1) + return fmt.Errorf("error posting comment to PR %d: %w", prNumber, err) } + return nil } // Build the diff processor for tpg or tpgb diff --git a/.ci/magician/cmd/generate_downstream.go b/.ci/magician/cmd/generate_downstream.go index 79201986aba0..6d620ee6e3b4 100644 --- a/.ci/magician/cmd/generate_downstream.go +++ b/.ci/magician/cmd/generate_downstream.go @@ -41,13 +41,12 @@ var generateDownstreamCmd = &cobra.Command{ The following environment variables should be set: ` + listGDEnvironmentVariables(), - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { env := make(map[string]string, len(gdEnvironmentVariables)) for _, ev := range gdEnvironmentVariables { val, ok := os.LookupEnv(ev) if !ok { - fmt.Printf("Did not provide %s environment variable\n", ev) - os.Exit(1) + return fmt.Errorf("did not provide %s environment variable", ev) } env[ev] = val } @@ -65,28 +64,24 @@ var generateDownstreamCmd = &cobra.Command{ gh := github.NewClient(githubToken) rnr, err := exec.NewRunner() if err != nil { - fmt.Println("Error creating a runner: ", err) - os.Exit(1) + return fmt.Errorf("error creating a runner: %w", err) } ctlr := source.NewController(env["GOPATH"], "modular-magician", githubToken, rnr) oldToken := os.Getenv("GITHUB_TOKEN") if err := os.Setenv("GITHUB_TOKEN", githubToken); err != nil { - fmt.Println("Error setting GITHUB_TOKEN environment variable: ", err) - os.Exit(1) + return fmt.Errorf("error setting GITHUB_TOKEN environment variable: %w", err) } defer func() { if err := os.Setenv("GITHUB_TOKEN", oldToken); err != nil { fmt.Println("Error setting GITHUB_TOKEN environment variable: ", err) - os.Exit(1) } }() if len(args) != 4 { - fmt.Printf("Wrong number of arguments %d, expected 4\n", len(args)) - os.Exit(1) + return fmt.Errorf("wrong number of arguments %d, expected 4", len(args)) } - execGenerateDownstream(env["BASE_BRANCH"], args[0], args[1], args[2], args[3], gh, rnr, ctlr) + return execGenerateDownstream(env["BASE_BRANCH"], args[0], args[1], args[2], args[3], gh, rnr, ctlr) }, } @@ -98,7 +93,7 @@ func listGDEnvironmentVariables() string { return result } -func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh GithubClient, rnr ExecRunner, ctlr *source.Controller) { +func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh GithubClient, rnr ExecRunner, ctlr *source.Controller) error { if baseBranch == "" { baseBranch = "main" } @@ -106,8 +101,7 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G mmLocalPath := filepath.Join(rnr.GetCWD(), "..", "..") mmCopyPath := filepath.Join(mmLocalPath, "..", fmt.Sprintf("mm-%s-%s-%s", repo, version, command)) if _, err := rnr.Run("cp", []string{"-rp", mmLocalPath, mmCopyPath}, nil); err != nil { - fmt.Println("Error copying magic modules: ", err) - os.Exit(1) + return fmt.Errorf("error copying magic modules: %w", err) } mmRepo := &source.Repo{ Name: "magic-modules", @@ -116,36 +110,30 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G downstreamRepo, scratchRepo, commitMessage, err := cloneRepo(mmRepo, baseBranch, repo, version, command, ref, rnr, ctlr) if err != nil { - fmt.Println("Error cloning repo: ", err) - os.Exit(1) + return fmt.Errorf("error cloning repo: %w", err) } if err := rnr.PushDir(mmCopyPath); err != nil { - fmt.Println("Error changing directory to copied magic modules: ", err) - os.Exit(1) + return fmt.Errorf("error changing directory to copied magic modules: %w", err) } if err := setGitConfig(rnr); err != nil { - fmt.Println("Error setting config: ", err) - os.Exit(1) + return fmt.Errorf("error setting config: %w", err) } if err := runMake(downstreamRepo, command, rnr); err != nil { - fmt.Println("Error running make: ", err) - os.Exit(1) + return fmt.Errorf("error running make: %w", err) } var pullRequest *github.PullRequest if command == "downstream" { pullRequest, err = getPullRequest(baseBranch, ref, gh) if err != nil { - fmt.Println("Error getting pull request: ", err) - os.Exit(1) + return fmt.Errorf("error getting pull request: %w", err) } if repo == "terraform" { if err := addChangelogEntry(pullRequest, rnr); err != nil { - fmt.Println("Error adding changelog entry: ", err) - os.Exit(1) + return fmt.Errorf("error adding changelog entry: %w", err) } } } @@ -159,16 +147,15 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G } if _, err := rnr.Run("git", []string{"push", ctlr.URL(scratchRepo), scratchRepo.Branch, "-f"}, nil); err != nil { - fmt.Println("Error pushing commit: ", err) - os.Exit(1) + return fmt.Errorf("error pushing commit: %w", err) } if commitErr == nil && command == "downstream" { if err := mergePullRequest(downstreamRepo, scratchRepo, scratchCommitSha, pullRequest, rnr, gh); err != nil { - fmt.Println("Error merging pull request: ", err) - os.Exit(1) + return fmt.Errorf("error merging pull request: %w", err) } } + return nil } func cloneRepo(mmRepo *source.Repo, baseBranch, repo, version, command, ref string, rnr ExecRunner, ctlr *source.Controller) (*source.Repo, *source.Repo, string, error) { @@ -323,8 +310,7 @@ func createCommit(scratchRepo *source.Repo, commitMessage string, rnr ExecRunner commitSha, err := rnr.Run("git", []string{"rev-parse", "HEAD"}, nil) if err != nil { - fmt.Println("Error retrieving commit sha: ", err) - os.Exit(1) + return "", fmt.Errorf("error retrieving commit sha: %w", err) } commitSha = strings.TrimSpace(commitSha) diff --git a/.ci/magician/cmd/membership_checker.go b/.ci/magician/cmd/membership_checker.go index 12a7a020ff02..7e149a122dad 100644 --- a/.ci/magician/cmd/membership_checker.go +++ b/.ci/magician/cmd/membership_checker.go @@ -19,7 +19,6 @@ import ( "fmt" "magician/cloudbuild" "magician/github" - "os" "github.com/spf13/cobra" ) @@ -45,7 +44,7 @@ var membershipCheckerCmd = &cobra.Command{ `, // This can change to cobra.ExactArgs(2) after at least a 2-week soak Args: cobra.RangeArgs(2, 6), - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { prNumber := args[0] fmt.Println("PR Number: ", prNumber) @@ -54,20 +53,18 @@ var membershipCheckerCmd = &cobra.Command{ githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") } gh := github.NewClient(githubToken) cb := cloudbuild.NewClient() - execMembershipChecker(prNumber, commitSha, gh, cb) + return execMembershipChecker(prNumber, commitSha, gh, cb) }, } -func execMembershipChecker(prNumber, commitSha string, gh GithubClient, cb CloudbuildClient) { +func execMembershipChecker(prNumber, commitSha string, gh GithubClient, cb CloudbuildClient) error { pullRequest, err := gh.GetPullRequest(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } author := pullRequest.User.Login @@ -79,12 +76,12 @@ func execMembershipChecker(prNumber, commitSha string, gh GithubClient, cb Cloud if trusted { err = cb.ApproveCommunityChecker(prNumber, commitSha) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } } else { gh.AddLabels(prNumber, []string{"awaiting-approval"}) } + return nil } func init() { diff --git a/.ci/magician/cmd/request_reviewer.go b/.ci/magician/cmd/request_reviewer.go index f4d2122a6424..e3981467535c 100644 --- a/.ci/magician/cmd/request_reviewer.go +++ b/.ci/magician/cmd/request_reviewer.go @@ -45,24 +45,22 @@ var requestReviewerCmd = &cobra.Command{ c. As appropriate, posts a welcome comment on the PR. `, Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { prNumber := args[0] fmt.Println("PR Number: ", prNumber) githubToken, ok := os.LookupEnv("GITHUB_TOKEN") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN environment variable") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN environment variable") } gh := github.NewClient(githubToken) - execRequestReviewer(prNumber, gh) + return execRequestReviewer(prNumber, gh) }, } -func execRequestReviewer(prNumber string, gh GithubClient) { +func execRequestReviewer(prNumber string, gh GithubClient) error { pullRequest, err := gh.GetPullRequest(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } author := pullRequest.User.Login @@ -71,14 +69,12 @@ func execRequestReviewer(prNumber string, gh GithubClient) { requestedReviewers, err := gh.GetPullRequestRequestedReviewers(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } previousReviewers, err := gh.GetPullRequestPreviousReviewers(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } reviewersToRequest, newPrimaryReviewer := github.ChooseCoreReviewers(requestedReviewers, previousReviewers) @@ -86,8 +82,7 @@ func execRequestReviewer(prNumber string, gh GithubClient) { if len(reviewersToRequest) > 0 { err = gh.RequestPullRequestReviewers(prNumber, reviewersToRequest) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } } @@ -95,11 +90,11 @@ func execRequestReviewer(prNumber string, gh GithubClient) { comment := github.FormatReviewerComment(newPrimaryReviewer) err = gh.PostComment(prNumber, comment) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } } } + return nil } func init() { diff --git a/.ci/magician/cmd/request_service_reviewers.go b/.ci/magician/cmd/request_service_reviewers.go index ed7a713b296c..267d182debd5 100644 --- a/.ci/magician/cmd/request_service_reviewers.go +++ b/.ci/magician/cmd/request_service_reviewers.go @@ -19,7 +19,6 @@ import ( "fmt" "magician/github" "math/rand" - "os" "strings" "github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler/labeler" @@ -36,17 +35,16 @@ var requestServiceReviewersCmd = &cobra.Command{ If a PR has more than 3 service labels, the command will not do anything. `, Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { prNumber := args[0] fmt.Println("PR Number: ", prNumber) githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variable") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variable") } gh := github.NewClient(githubToken) - execRequestServiceReviewers(prNumber, gh, labeler.EnrolledTeamsYaml) + return execRequestServiceReviewers(prNumber, gh, labeler.EnrolledTeamsYaml) }, } @@ -55,29 +53,25 @@ type LabelData struct { Team string `yaml:"team,omitempty"` } -func execRequestServiceReviewers(prNumber string, gh GithubClient, enrolledTeamsYaml []byte) { +func execRequestServiceReviewers(prNumber string, gh GithubClient, enrolledTeamsYaml []byte) error { pullRequest, err := gh.GetPullRequest(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } enrolledTeams := make(map[string]LabelData) if err := yaml.Unmarshal(enrolledTeamsYaml, &enrolledTeams); err != nil { - fmt.Printf("Error unmarshalling enrolled teams yaml: %s", err) - os.Exit(1) + return fmt.Errorf("error unmarshalling enrolled teams yaml: %w", err) } requestedReviewers, err := gh.GetPullRequestRequestedReviewers(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } previousReviewers, err := gh.GetPullRequestPreviousReviewers(prNumber) if err != nil { - fmt.Println(err) - os.Exit(1) + return err } // If more than three service labels are impacted, don't request reviews. @@ -96,7 +90,7 @@ func execRequestServiceReviewers(prNumber string, gh GithubClient, enrolledTeams if teamCount > 3 { fmt.Println("Provider-wide change (>3 services impacted); not requesting service team reviews") - return + return nil } // For each service team, check if one of the team members is already a reviewer. Rerequest @@ -150,8 +144,9 @@ func execRequestServiceReviewers(prNumber string, gh GithubClient, enrolledTeams exitCode = 1 } if exitCode != 0 { - os.Exit(1) + return fmt.Errorf("exit code = %d", exitCode) } + return nil } func init() { diff --git a/.ci/magician/cmd/test_terraform_vcr.go b/.ci/magician/cmd/test_terraform_vcr.go index 46fe634139f8..9bc4d850e996 100644 --- a/.ci/magician/cmd/test_terraform_vcr.go +++ b/.ci/magician/cmd/test_terraform_vcr.go @@ -44,13 +44,12 @@ var testTerraformVCRCmd = &cobra.Command{ Use: "test-terraform-vcr", Short: "Run vcr tests for affected packages", Long: `This command runs on new pull requests to replay VCR cassettes and re-record failing cassettes.`, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { env := make(map[string]string, len(ttvEnvironmentVariables)) for _, ev := range ttvEnvironmentVariables { val, ok := os.LookupEnv(ev) if !ok { - fmt.Printf("Did not provide %s environment variable\n", ev) - os.Exit(1) + return fmt.Errorf("did not provide %s environment variable", ev) } env[ev] = val } @@ -58,8 +57,7 @@ var testTerraformVCRCmd = &cobra.Command{ for _, tokenName := range []string{"GITHUB_TOKEN_DOWNSTREAMS", "GITHUB_TOKEN_MAGIC_MODULES"} { val, ok := lookupGithubTokenOrFallback(tokenName) if !ok { - fmt.Printf("Did not provide %s or GITHUB_TOKEN environment variable\n", tokenName) - os.Exit(1) + return fmt.Errorf("did not provide %s or GITHUB_TOKEN environment variable", tokenName) } env[tokenName] = val } @@ -72,26 +70,24 @@ var testTerraformVCRCmd = &cobra.Command{ gh := github.NewClient(env["GITHUB_TOKEN_MAGIC_MODULES"]) rnr, err := exec.NewRunner() if err != nil { - fmt.Println("Error creating a runner: ", err) - os.Exit(1) + return fmt.Errorf("error creating a runner: %w", err) } ctlr := source.NewController(env["GOPATH"], "modular-magician", env["GITHUB_TOKEN_DOWNSTREAMS"], rnr) vt, err := vcr.NewTester(env, rnr) if err != nil { - fmt.Println("Error creating VCR tester: ", err) + return fmt.Errorf("error creating VCR tester: %w", err) } if len(args) != 5 { - fmt.Printf("Wrong number of arguments %d, expected 5\n", len(args)) - os.Exit(1) + return fmt.Errorf("wrong number of arguments %d, expected 5", len(args)) } - execTestTerraformVCR(args[0], args[1], args[2], args[3], args[4], baseBranch, gh, rnr, ctlr, vt) + return execTestTerraformVCR(args[0], args[1], args[2], args[3], args[4], baseBranch, gh, rnr, ctlr, vt) }, } -func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, baseBranch string, gh GithubClient, rnr ExecRunner, ctlr *source.Controller, vt *vcr.Tester) { +func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, baseBranch string, gh GithubClient, rnr ExecRunner, ctlr *source.Controller, vt *vcr.Tester) error { newBranch := "auto-pr-" + prNumber oldBranch := newBranch + "-old" @@ -109,49 +105,42 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, for _, repo := range []*source.Repo{tpgRepo, tpgbRepo} { ctlr.SetPath(repo) if err := ctlr.Clone(repo); err != nil { - fmt.Println("Error cloning repo: ", err) - os.Exit(1) + return fmt.Errorf("error cloning repo: %w", err) } if err := ctlr.Fetch(repo, oldBranch); err != nil { - fmt.Println("Failed to fetch old branch: ", err) - os.Exit(1) + return fmt.Errorf("failed to fetch old branch: %w", err) } changedFiles, err := ctlr.DiffNameOnly(repo, oldBranch, newBranch) if err != nil { - fmt.Println("Failed to compute name-only diff: ", err) - os.Exit(1) + return fmt.Errorf("failed to compute name-only diff: %w", err) } repo.ChangedFiles = changedFiles repo.UnifiedZeroDiff, err = ctlr.DiffUnifiedZero(repo, oldBranch, newBranch) if err != nil { - fmt.Println("Failed to compute unified=0 diff: ", err) - os.Exit(1) + return fmt.Errorf("failed to compute unified=0 diff: %w", err) } } vt.SetRepoPath(provider.Beta, tpgbRepo.Path) if err := rnr.PushDir(tpgbRepo.Path); err != nil { - fmt.Println("Error changing to tpgbRepo dir: ", err) - os.Exit(1) + return fmt.Errorf("error changing to tpgbRepo dir: %w", err) } services, runFullVCR := modifiedPackages(tpgbRepo.ChangedFiles) if len(services) == 0 && !runFullVCR { fmt.Println("Skipping tests: No go files or test fixtures changed") - os.Exit(0) + return nil } fmt.Println("Running tests: Go files or test fixtures changed") if err := vt.FetchCassettes(provider.Beta, baseBranch, prNumber); err != nil { - fmt.Println("Error fetching cassettes: ", err) - os.Exit(1) + return fmt.Errorf("error fetching cassettes: %w", err) } buildStatusTargetURL := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s;step=%s?project=%s", buildID, buildStep, projectID) if err := gh.PostBuildStatus(prNumber, "VCR-test", "pending", buildStatusTargetURL, mmCommitSha); err != nil { - fmt.Println("Error posting pending status: ", err) - os.Exit(1) + return fmt.Errorf("error posting pending status: %w", err) } replayingResult, affectedServicesComment, testDirs, replayingErr := runReplaying(runFullVCR, services, vt) @@ -161,15 +150,13 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, } if err := vt.UploadLogs("ci-vcr-logs", prNumber, buildID, false, false, vcr.Replaying, provider.Beta); err != nil { - fmt.Println("Error uploading replaying logs: ", err) - os.Exit(1) + return fmt.Errorf("error uploading replaying logs: %w", err) } if hasPanics, err := handlePanics(prNumber, buildID, buildStatusTargetURL, mmCommitSha, replayingResult, vcr.Replaying, gh); err != nil { - fmt.Println("Error handling panics: ", err) - os.Exit(1) + return fmt.Errorf("error handling panics: %w", err) } else if hasPanics { - os.Exit(0) + return nil } failedTestsPattern := strings.Join(replayingResult.FailedTests, "|") @@ -222,8 +209,7 @@ Tests were added that are GA-only additions and require manual runs: [Get to know how VCR tests work](https://googlecloudplatform.github.io/magic-modules/docs/getting-started/contributing/#general-contributing-steps)`, len(replayingResult.FailedTests), failedTestsPattern) if err := gh.PostComment(prNumber, comment); err != nil { - fmt.Println("Error posting comment: ", err) - os.Exit(1) + return fmt.Errorf("error posting comment: %w", err) } recordingResult, recordingErr := vt.RunParallel(vcr.Recording, provider.Beta, testDirs, replayingResult.FailedTests) @@ -234,20 +220,17 @@ Tests were added that are GA-only additions and require manual runs: } if err := vt.UploadCassettes("ci-vcr-cassettes", prNumber, provider.Beta); err != nil { - fmt.Println("Error uploading cassettes: ", err) - os.Exit(1) + return fmt.Errorf("error uploading cassettes: %w", err) } if err := vt.UploadLogs("ci-vcr-logs", prNumber, buildID, true, false, vcr.Recording, provider.Beta); err != nil { - fmt.Println("Error uploading recording logs: ", err) - os.Exit(1) + return fmt.Errorf("error uploading recording logs: %w", err) } if hasPanics, err := handlePanics(prNumber, buildID, buildStatusTargetURL, mmCommitSha, recordingResult, vcr.Recording, gh); err != nil { - fmt.Println("Error handling panics: ", err) - os.Exit(1) + return fmt.Errorf("error handling panics: %w", err) } else if hasPanics { - os.Exit(0) + return nil } comment = "" @@ -264,8 +247,7 @@ Tests were added that are GA-only additions and require manual runs: } if err := vt.UploadLogs("ci-vcr-logs", prNumber, buildID, true, true, vcr.Replaying, provider.Beta); err != nil { - fmt.Println("Error uploading recording logs: ", err) - os.Exit(1) + return fmt.Errorf("error uploading recording logs: %w", err) } if len(replayingAfterRecordingResult.FailedTests) > 0 { @@ -321,14 +303,13 @@ Please fix these to complete your PR. If you believe these test failures to be i comment += fmt.Sprintf("View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/build-log/replaying_test.log)", prNumber, buildID) } if err := gh.PostComment(prNumber, comment); err != nil { - fmt.Println("Error posting comment: ", err) - os.Exit(1) + return fmt.Errorf("error posting comment: %w", err) } if err := gh.PostBuildStatus(prNumber, "VCR-test", testState, buildStatusTargetURL, mmCommitSha); err != nil { - fmt.Println("Error posting build status: ", err) - os.Exit(1) + return fmt.Errorf("error posting build status: %w", err) } + return nil } var addedTestsRegexp = regexp.MustCompile(`(?m)^\+func (Test\w+)\(t \*testing.T\) {`) diff --git a/.ci/magician/cmd/test_tgc.go b/.ci/magician/cmd/test_tgc.go index 5f000d731b15..bbf746a975a2 100644 --- a/.ci/magician/cmd/test_tgc.go +++ b/.ci/magician/cmd/test_tgc.go @@ -32,31 +32,30 @@ var testTGCCmd = &cobra.Command{ 1. COMMIT_SHA 2. PR_NUMBER `, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { commit := os.Getenv("COMMIT_SHA") pr := os.Getenv("PR_NUMBER") githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") } gh := github.NewClient(githubToken) - execTestTGC(commit, pr, gh) + return execTestTGC(commit, pr, gh) }, } -func execTestTGC(commit, pr string, gh ttGithub) { +func execTestTGC(commit, pr string, gh ttGithub) error { if err := gh.CreateWorkflowDispatchEvent("test-tgc.yml", map[string]any{ "owner": "modular-magician", "repo": "terraform-google-conversion", "branch": "auto-pr-" + pr, "sha": commit, }); err != nil { - fmt.Printf("Error creating workflow dispatch event: %v\n", err) - os.Exit(1) + return fmt.Errorf("error creating workflow dispatch event: %w", err) } + return nil } func init() { diff --git a/.ci/magician/cmd/test_tgc_integration.go b/.ci/magician/cmd/test_tgc_integration.go index 04881ff62d23..f2d1cd334b34 100644 --- a/.ci/magician/cmd/test_tgc_integration.go +++ b/.ci/magician/cmd/test_tgc_integration.go @@ -20,22 +20,20 @@ var testTGCIntegrationCmd = &cobra.Command{ 1. GOPATH 2. GITHUB_TOKEN_MAGIC_MODULES `, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { goPath, ok := os.LookupEnv("GOPATH") if !ok { - fmt.Println("Did not provide GOPATH environment variable") - os.Exit(1) + return fmt.Errorf("did not provide GOPATH environment variable") } githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") } rnr, err := exec.NewRunner() if err != nil { - fmt.Println("Error creating runner: ", err) + return fmt.Errorf("error creating runner: %w", err) os.Exit(1) } @@ -43,11 +41,11 @@ var testTGCIntegrationCmd = &cobra.Command{ gh := github.NewClient(githubToken) - execTestTGCIntegration(args[0], args[1], args[2], args[3], args[4], args[5], "modular-magician", rnr, ctlr, gh) + return execTestTGCIntegration(args[0], args[1], args[2], args[3], args[4], args[5], "modular-magician", rnr, ctlr, gh) }, } -func execTestTGCIntegration(prNumber, mmCommit, buildID, projectID, buildStep, ghRepo, githubUsername string, rnr ExecRunner, ctlr *source.Controller, gh GithubClient) { +func execTestTGCIntegration(prNumber, mmCommit, buildID, projectID, buildStep, ghRepo, githubUsername string, rnr ExecRunner, ctlr *source.Controller, gh GithubClient) error { newBranch := "auto-pr-" + prNumber repo := &source.Repo{ Name: ghRepo, @@ -55,17 +53,14 @@ func execTestTGCIntegration(prNumber, mmCommit, buildID, projectID, buildStep, g } ctlr.SetPath(repo) if err := ctlr.Clone(repo); err != nil { - fmt.Println("Error cloning repo: ", err) - os.Exit(1) + return fmt.Errorf("error cloning repo: %w", err) } if err := rnr.PushDir(repo.Path); err != nil { - fmt.Println("Error changing to repo dir: ", err) - os.Exit(1) + return fmt.Errorf("error changing to repo dir: %w", err) } diffs, err := rnr.Run("git", []string{"diff", "--name-only", "HEAD~1"}, nil) if err != nil { - fmt.Println("Error diffing repo: ", err) - os.Exit(1) + return fmt.Errorf("error diffing repo: %w", err) } hasGoFiles := false for _, diff := range strings.Split(diffs, "\n") { @@ -76,15 +71,14 @@ func execTestTGCIntegration(prNumber, mmCommit, buildID, projectID, buildStep, g } if !hasGoFiles { fmt.Println("Skipping tests: No go files changed") - os.Exit(0) + return nil } fmt.Println("Running tests: Go files changed") targetURL := fmt.Sprintf("https://console.cloud.google.com/cloud-build/builds;region=global/%s;step=%s?project=%s", buildID, buildStep, projectID) if err := gh.PostBuildStatus(prNumber, ghRepo+"-test-integration", "pending", targetURL, mmCommit); err != nil { - fmt.Println("Error posting build status: ", err) - os.Exit(1) + return fmt.Errorf("error posting build status: %w", err) } if _, err := rnr.Run("go", []string{"mod", "edit", "-replace", fmt.Sprintf("github.com/hashicorp/terraform-provider-google-beta=github.com/%s/terraform-provider-google-beta@%s", githubUsername, newBranch)}, nil); err != nil { @@ -104,9 +98,9 @@ func execTestTGCIntegration(prNumber, mmCommit, buildID, projectID, buildStep, g } if err := gh.PostBuildStatus(prNumber, ghRepo+"-test-integration", state, targetURL, mmCommit); err != nil { - fmt.Println("Error posting build status: ", err) - os.Exit(1) + return fmt.Errorf("error posting build status: %w", err) } + return nil } func init() { diff --git a/.ci/magician/cmd/test_tpg.go b/.ci/magician/cmd/test_tpg.go index 260b5b2a7466..181644dbfcf9 100644 --- a/.ci/magician/cmd/test_tpg.go +++ b/.ci/magician/cmd/test_tpg.go @@ -37,31 +37,29 @@ var testTPGCmd = &cobra.Command{ 2. COMMIT_SHA 3. PR_NUMBER `, - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error { version := os.Getenv("VERSION") commit := os.Getenv("COMMIT_SHA") pr := os.Getenv("PR_NUMBER") githubToken, ok := lookupGithubTokenOrFallback("GITHUB_TOKEN_MAGIC_MODULES") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN_MAGIC_MODULES or GITHUB_TOKEN environment variables") } gh := github.NewClient(githubToken) - execTestTPG(version, commit, pr, gh) + return execTestTPG(version, commit, pr, gh) }, } -func execTestTPG(version, commit, pr string, gh ttGithub) { +func execTestTPG(version, commit, pr string, gh ttGithub) error { var repo string if version == "ga" { repo = "terraform-provider-google" } else if version == "beta" { repo = "terraform-provider-google-beta" } else { - fmt.Println("invalid version specified") - os.Exit(1) + return fmt.Errorf("invalid version specified") } if err := gh.CreateWorkflowDispatchEvent("test-tpg.yml", map[string]any{ @@ -70,9 +68,9 @@ func execTestTPG(version, commit, pr string, gh ttGithub) { "branch": "auto-pr-" + pr, "sha": commit, }); err != nil { - fmt.Printf("Error creating workflow dispatch event: %v\n", err) - os.Exit(1) + return fmt.Errorf("error creating workflow dispatch event: %w", err) } + return nil } func init() { From 2e07006ea70a2a397d0e24e8af7453dfe145328e Mon Sep 17 00:00:00 2001 From: Jbodeau Date: Thu, 6 Jun 2024 20:23:58 -0400 Subject: [PATCH 077/356] Add intercept_children property to log sink documentation. (#10875) --- .../terraform/website/docs/r/logging_folder_sink.html.markdown | 3 +++ .../website/docs/r/logging_organization_sink.html.markdown | 3 +++ 2 files changed, 6 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown index 88b4feb2e427..3741d6632d54 100644 --- a/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/logging_folder_sink.html.markdown @@ -77,6 +77,9 @@ The following arguments are supported: * `include_children` - (Optional) Whether or not to include children folders in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided folder are included. +* `intercept_children` - (Optional) Whether or not to intercept logs from child projects. If true, matching logs will not + match with sinks in child resources, except _Required sinks. This sink will be visible to child resources when listing sinks. + * `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure [documented below](#nested_bigquery_options). * `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both `filter` and one of `exclusions.filter`, it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions). diff --git a/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown index 926a8a3c6cd2..a0a1fc869b0c 100644 --- a/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/logging_organization_sink.html.markdown @@ -67,6 +67,9 @@ The following arguments are supported: * `include_children` - (Optional) Whether or not to include children organizations in the sink export. If true, logs associated with child projects are also exported; otherwise only logs relating to the provided organization are included. +* `intercept_children` - (Optional) Whether or not to intercept logs from child projects. If true, matching logs will not + match with sinks in child resources, except _Required sinks. This sink will be visible to child resources when listing sinks. + * `bigquery_options` - (Optional) Options that affect sinks exporting data to BigQuery. Structure [documented below](#nested_bigquery_options). * `exclusions` - (Optional) Log entries that match any of the exclusion filters will not be exported. If a log entry is matched by both `filter` and one of `exclusions.filter`, it will not be exported. Can be repeated multiple times for multiple exclusions. Structure is [documented below](#nested_exclusions). From 46fd4d36756b64e1b279f962a8b7b7f8a606d896 Mon Sep 17 00:00:00 2001 From: Jesse DeJong Date: Fri, 7 Jun 2024 09:59:54 -0400 Subject: [PATCH 078/356] Add Managed Kafka Cluster resource and tests (#10773) --- mmv1/products/managedkafka/Cluster.yaml | 151 ++++++++++++++++++ mmv1/products/managedkafka/product.yaml | 9 ++ .../managedkafka_cluster_basic.tf.erb | 27 ++++ .../examples/managedkafka_cluster_cmek.tf.erb | 54 +++++++ .../components/inputs/services_beta.kt | 5 + .../components/inputs/services_ga.kt | 5 + ...resource_managed_kafka_cluster_test.go.erb | 111 +++++++++++++ 7 files changed, 362 insertions(+) create mode 100644 mmv1/products/managedkafka/Cluster.yaml create mode 100644 mmv1/products/managedkafka/product.yaml create mode 100644 mmv1/templates/terraform/examples/managedkafka_cluster_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/managedkafka_cluster_cmek.tf.erb create mode 100644 mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_cluster_test.go.erb diff --git a/mmv1/products/managedkafka/Cluster.yaml b/mmv1/products/managedkafka/Cluster.yaml new file mode 100644 index 000000000000..65a10796105f --- /dev/null +++ b/mmv1/products/managedkafka/Cluster.yaml @@ -0,0 +1,151 @@ +--- !ruby/object:Api::Resource +base_url: projects/{{project}}/locations/{{location}}/clusters +create_url: projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}} +self_link: projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}} +id_format: projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}} +import_format: + - projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}} +name: Cluster +description: An Apache Kafka for BigQuery cluster. +min_version: beta +update_verb: :PATCH +update_mask: true +autogen_async: true +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 60 + update_minutes: 30 + delete_minutes: 30 +examples: + - !ruby/object:Provider::Terraform::Examples + name: "managedkafka_cluster_basic" + primary_resource_id: "example" + min_version: beta + vars: + cluster_id: 'my-cluster' + key_name: 'example-key' + keyring_name: 'example-keyring' + - !ruby/object:Provider::Terraform::Examples + name: 'managedkafka_cluster_cmek' + primary_resource_id: 'example' + min_version: beta + external_providers: ["time"] + vars: + cluster_id: 'my-cluster' + key_name: 'example-key' + key_ring_name: 'example-key-ring' +properties: + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: networkConfigs + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: subnet + description: "Name of the VPC subnet from which the cluster is accessible. Both broker and + bootstrap server IP addresses and DNS entries are automatically created + in the subnet. The subnet must be located in the same region as the + cluster. The project may differ. A minimum of 1 subnet is required. + A maximum of 10 subnets can be specified. The name of the subnet must be + in the format `projects/PROJECT_ID/regions/REGION/subnetworks/SUBNET`." + required: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + description: "Virtual Private Cloud (VPC) networks that must be granted + direct access to the Kafka cluster. Minimum of 1 network is required. Maximum + of 10 networks can be specified." + required: true + name: accessConfig + description: "The configuration of access to the Kafka cluster." + required: true + - !ruby/object:Api::Type::String + name: kmsKey + description: "The Cloud KMS Key name to use for encryption. + The key must be located in the same region as the cluster and cannot be changed. + Must be in the format `projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`." + immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + name: gcpConfig + description: "Configuration properties for a Kafka cluster deployed to Google Cloud Platform." + required: true + - !ruby/object:Api::Type::String + name: name + description: "The name of the cluster. Structured like: `projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID`." + output: true + - !ruby/object:Api::Type::String + name: createTime + description: "The time when the cluster was created." + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: "The time when the cluster was last updated." + output: true + - !ruby/object:Api::Type::KeyValueLabels + name: labels + description: "List of label KEY=VALUE pairs to add. Keys must start with a lowercase + character and contain only hyphens (-), underscores (\_), lowercase + characters, and numbers. Values must contain only hyphens (-), + underscores (\_), lowercase characters, and numbers." + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: vcpuCount + description: "The number of vCPUs to provision for the cluster. The minimum is 3." + required: true + - !ruby/object:Api::Type::String + name: memoryBytes + description: "The memory to provision for the cluster in bytes. The value must be + between 1 GiB and 8 GiB per vCPU. Ex. 1024Mi, 4Gi." + required: true + name: capacityConfig + description: "A capacity configuration of a Kafka cluster." + required: true + - !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: mode + description: "The rebalance behavior for the cluster. When not specified, + defaults to `NO_REBALANCE`. Possible values: `MODE_UNSPECIFIED`, `NO_REBALANCE`, `AUTO_REBALANCE_ON_SCALE_UP`." + name: rebalanceConfig + description: "Defines rebalancing behavior of a Kafka cluster." + - !ruby/object:Api::Type::String + name: state + description: "The current state of the cluster. Possible values: `STATE_UNSPECIFIED`, `CREATING`, `ACTIVE`, `DELETING`." + output: true +parameters: + - !ruby/object:Api::Type::String + name: location + description: "ID of the location of the Apache Kafka for BigQuery resource. See + https://cloud.google.com/managed-kafka/docs/locations for a list of + supported locations." + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: clusterId + description: "The ID to use for the cluster, which will become the final + component of the cluster's name. The ID must be 1-63 characters long, and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` to comply with RFC 1035. This + value is structured like: `my-cluster-id`." + url_param_only: true + required: true + immutable: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + path: name + base_url: "{{op_id}}" + wait_ms: 1000 + timeouts: + result: !ruby/object:Api::OpAsync::Result + path: response + resource_inside_response: true + status: !ruby/object:Api::OpAsync::Status + path: done + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: error + message: message diff --git a/mmv1/products/managedkafka/product.yaml b/mmv1/products/managedkafka/product.yaml new file mode 100644 index 000000000000..46d2c6d023e8 --- /dev/null +++ b/mmv1/products/managedkafka/product.yaml @@ -0,0 +1,9 @@ +--- !ruby/object:Api::Product +versions: + - !ruby/object:Api::Product::Version + base_url: https://managedkafka.googleapis.com/v1/ + name: beta +name: ManagedKafka +display_name: Managed Kafka +scopes: + - https://www.googleapis.com/auth/cloud-platform diff --git a/mmv1/templates/terraform/examples/managedkafka_cluster_basic.tf.erb b/mmv1/templates/terraform/examples/managedkafka_cluster_basic.tf.erb new file mode 100644 index 000000000000..98ca01826485 --- /dev/null +++ b/mmv1/templates/terraform/examples/managedkafka_cluster_basic.tf.erb @@ -0,0 +1,27 @@ +resource "google_managed_kafka_cluster" "<%= ctx[:primary_resource_id] %>" { + cluster_id = "<%= ctx[:vars]['cluster_id'] %>" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + rebalance_config { + mode = "NO_REBALANCE" + } + labels = { + key = "value" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} diff --git a/mmv1/templates/terraform/examples/managedkafka_cluster_cmek.tf.erb b/mmv1/templates/terraform/examples/managedkafka_cluster_cmek.tf.erb new file mode 100644 index 000000000000..05d6d0829ba8 --- /dev/null +++ b/mmv1/templates/terraform/examples/managedkafka_cluster_cmek.tf.erb @@ -0,0 +1,54 @@ +resource "google_managed_kafka_cluster" "<%= ctx[:primary_resource_id] %>" { + cluster_id = "<%= ctx[:vars]['cluster_id'] %>" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + kms_key = google_kms_crypto_key.key.id + } + + provider = google-beta +} + +resource "google_project_service_identity" "kafka_service_identity" { + project = data.google_project.project.project_id + service = "managedkafka.googleapis.com" + + provider = google-beta +} + +resource "google_kms_crypto_key" "key" { + name = "<%= ctx[:vars]['key_name'] %>" + key_ring = google_kms_key_ring.key_ring.id + + provider = google-beta +} + +resource "google_kms_key_ring" "key_ring" { + name = "<%= ctx[:vars]['key_ring_name'] %>" + location = "us-central1" + + provider = google-beta +} + +resource "google_kms_crypto_key_iam_binding" "crypto_key_binding" { + crypto_key_id = google_kms_crypto_key.key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-managedkafka.iam.gserviceaccount.com", + ] + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index 8360ae4c1224..7804a4a11ad7 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -486,6 +486,11 @@ var ServicesListBeta = mapOf( "displayName" to "Looker", "path" to "./google-beta/services/looker" ), + "managedkafka" to mapOf( + "name" to "managedkafka", + "displayName" to "Managedkafka", + "path" to "./google-beta/services/managedkafka" + ), "memcache" to mapOf( "name" to "memcache", "displayName" to "Memcache", diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index b29b26b26855..76cf658afd9b 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -481,6 +481,11 @@ var ServicesListGa = mapOf( "displayName" to "Looker", "path" to "./google/services/looker" ), + "managedkafka" to mapOf( + "name" to "managedkafka", + "displayName" to "Managedkafka", + "path" to "./google/services/managedkafka" + ), "memcache" to mapOf( "name" to "memcache", "displayName" to "Memcache", diff --git a/mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_cluster_test.go.erb b/mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_cluster_test.go.erb new file mode 100644 index 000000000000..abc2cc96b6bb --- /dev/null +++ b/mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_cluster_test.go.erb @@ -0,0 +1,111 @@ +<% autogen_exception -%> +package managedkafka_test +<% unless version == 'ga' -%> + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccManagedKafkaCluster_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckManagedKafkaClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccManagedKafkaCluster_basic(context), + }, + { + ResourceName: "google_managed_kafka_cluster.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_id", "labels", "location", "terraform_labels"}, + }, + { + Config: testAccManagedKafkaCluster_update(context), + }, + { + ResourceName: "google_managed_kafka_cluster.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_id", "labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccManagedKafkaCluster_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + rebalance_config { + mode = "NO_REBALANCE" + } + labels = { + key = "value" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} + +func testAccManagedKafkaCluster_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 4 + memory_bytes = 4512135122 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + rebalance_config { + mode = "AUTO_REBALANCE_ON_SCALE_UP" + } + labels = { + key = "new-value" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} +<% else %> +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +<% end -%> From e893c265f82e009821c0b3a7517e77fa243f401f Mon Sep 17 00:00:00 2001 From: Damon Date: Fri, 7 Jun 2024 08:43:05 -0700 Subject: [PATCH 079/356] Use data google_kms(_key_ring | _crypto_key) instead of resource (#10510) Co-authored-by: Stephen Lewis (Burrows) --- ...rce_dataflow_flex_template_job_test.go.erb | 22 +++++-------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb index 507e76014492..5eba332b9e79 100644 --- a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb +++ b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb @@ -299,8 +299,9 @@ func TestAccDataflowFlexTemplateJob_withKmsKey(t *testing.T) { randStr := acctest.RandString(t, 10) job := "tf-test-dataflow-job-" + randStr - key_ring := "tf-test-dataflow-kms-ring-" + randStr - crypto_key := "tf-test-dataflow-kms-key-" + randStr + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + keyRing := kms.KeyRing.Name + cryptoKey := kms.CryptoKey.Name bucket := "tf-test-dataflow-bucket-" + randStr topic := "tf-test-topic" + randStr @@ -318,7 +319,7 @@ func TestAccDataflowFlexTemplateJob_withKmsKey(t *testing.T) { CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccDataflowFlexTemplateJob_kms(job, key_ring, crypto_key, bucket, topic), + Config: testAccDataflowFlexTemplateJob_kms(job, keyRing, cryptoKey, bucket, topic), Check: resource.ComposeTestCheckFunc( testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_kms", false), ), @@ -1331,17 +1332,6 @@ resource "google_storage_bucket_object" "schema" { EOF } -resource "google_kms_key_ring" "keyring" { - name = "%s" - location = "global" -} - -resource "google_kms_crypto_key" "crypto_key" { - name = "%s" - key_ring = google_kms_key_ring.keyring.id - rotation_period = "100000s" -} - data "google_storage_bucket_object" "flex_template" { name = "latest/flex/Streaming_Data_Generator" bucket = "dataflow-templates" @@ -1358,10 +1348,10 @@ resource "google_dataflow_flex_template_job" "flex_job_kms" { labels = { "my_labels" = "value" } - kms_key_name = google_kms_crypto_key.crypto_key.id + kms_key_name = "%s" } -`, topicName, bucket, key_ring, crypto_key, job) +`, topicName, bucket, crypto_key, job) } func testAccDataflowFlexTemplateJob_additionalExperiments(job, bucket, topicName string, experiments []string) string { From 853a43a3332bed12600621c8e5825e2231342ec5 Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Fri, 7 Jun 2024 09:01:23 -0700 Subject: [PATCH 080/356] remove os.Exit in magician subcommands (#10901) --- .ci/magician/cmd/generate_downstream.go | 2 +- .ci/magician/cmd/scheduled_pr_reminders.go | 3 +-- .ci/magician/cmd/test_tgc_integration.go | 1 - .ci/magician/cmd/wait_for_commit.go | 4 +--- 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/.ci/magician/cmd/generate_downstream.go b/.ci/magician/cmd/generate_downstream.go index 6d620ee6e3b4..0279bcd1335c 100644 --- a/.ci/magician/cmd/generate_downstream.go +++ b/.ci/magician/cmd/generate_downstream.go @@ -142,7 +142,7 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G if commitErr != nil { fmt.Println("Error creating commit: ", commitErr) if !strings.Contains(commitErr.Error(), "nothing to commit") { - os.Exit(1) + return fmt.Errorf("error creating commit: %w", commitErr) } } diff --git a/.ci/magician/cmd/scheduled_pr_reminders.go b/.ci/magician/cmd/scheduled_pr_reminders.go index 7721e6dff65a..0201f0da8807 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders.go +++ b/.ci/magician/cmd/scheduled_pr_reminders.go @@ -63,8 +63,7 @@ var scheduledPrReminders = &cobra.Command{ RunE: func(cmd *cobra.Command, args []string) error { githubToken, ok := os.LookupEnv("GITHUB_TOKEN") if !ok { - fmt.Println("Did not provide GITHUB_TOKEN environment variable") - os.Exit(1) + return fmt.Errorf("did not provide GITHUB_TOKEN environment variable") } gh := github.NewClient(nil).WithAuthToken(githubToken) return execScheduledPrReminders(gh) diff --git a/.ci/magician/cmd/test_tgc_integration.go b/.ci/magician/cmd/test_tgc_integration.go index f2d1cd334b34..f5b6dfb68e0b 100644 --- a/.ci/magician/cmd/test_tgc_integration.go +++ b/.ci/magician/cmd/test_tgc_integration.go @@ -34,7 +34,6 @@ var testTGCIntegrationCmd = &cobra.Command{ rnr, err := exec.NewRunner() if err != nil { return fmt.Errorf("error creating runner: %w", err) - os.Exit(1) } ctlr := source.NewController(goPath, "modular-magician", githubToken, rnr) diff --git a/.ci/magician/cmd/wait_for_commit.go b/.ci/magician/cmd/wait_for_commit.go index 7aec749f2449..cea9a023032f 100644 --- a/.ci/magician/cmd/wait_for_commit.go +++ b/.ci/magician/cmd/wait_for_commit.go @@ -4,7 +4,6 @@ import ( "fmt" "magician/exec" "magician/source" - "os" "strings" "time" @@ -33,8 +32,7 @@ var waitForCommitCmd = &cobra.Command{ rnr, err := exec.NewRunner() if err != nil { - fmt.Println("Error creating Runner: ", err) - os.Exit(1) + return fmt.Errorf("error creating Runner: %w", err) } return execWaitForCommit(syncBranchPrefix, baseBranch, sha, rnr) From 0f4f6b59b3891369012a3fb864015417766ba4b7 Mon Sep 17 00:00:00 2001 From: roop2 <161707562+roop2@users.noreply.github.com> Date: Fri, 7 Jun 2024 21:33:59 +0530 Subject: [PATCH 081/356] Adding support for google_netapp_backup resource in netapp volumes (#10858) --- mmv1/products/netapp/Backup.yaml | 133 ++++++++++++++ .../terraform/examples/netapp_backup.tf.erb | 36 ++++ .../netapp/resource_netapp_backup_test.go | 162 ++++++++++++++++++ 3 files changed, 331 insertions(+) create mode 100644 mmv1/products/netapp/Backup.yaml create mode 100644 mmv1/templates/terraform/examples/netapp_backup.tf.erb create mode 100644 mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go diff --git a/mmv1/products/netapp/Backup.yaml b/mmv1/products/netapp/Backup.yaml new file mode 100644 index 000000000000..3fe4d2693665 --- /dev/null +++ b/mmv1/products/netapp/Backup.yaml @@ -0,0 +1,133 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'backup' +description: | + NetApp Volumes supports volume backups, which are copies of your volumes + stored independently from the volume. Backups are stored in backup vaults, + which are containers for backups. If a volume is lost or deleted, you can + use backups to restore your data to a new volume. + + When you create the first backup of a volume, all of the volume's used + data is sent to the backup vault. Subsequent backups of the same volume + only include data that has changed from the previous backup. This allows + for fast incremental-forever backups and reduces the required capacity + inside the backup vault. + + You can create manual and scheduled backups. Manual backups can be taken + from a volume or from an existing volume snapshot. Scheduled backups + require a backup policy. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Documentation': 'https://cloud.google.com/netapp/volumes/docs/protect-data/about-volume-backups' + api: 'https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.backupVaults.backups' +base_url: projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups +self_link: projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} +create_url: projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups?backupId={{name}} +create_verb: :POST +update_url: projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} +update_verb: :PATCH +update_mask: true +delete_url: projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}} +delete_verb: :DELETE +autogen_async: true +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' +id_format: 'projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}' +import_format: ['projects/{{project}}/locations/{{location}}/backupVaults/{{vault_name}}/backups/{{name}}'] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'netapp_backup' + primary_resource_id: 'test_backup' + vars: + pool_name: 'backup-pool' + volume_name: 'backup-volume' + backup_vault_name: 'backup-vault' + backup_name: 'test-backup' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog"))' +parameters: + - !ruby/object:Api::Type::String + name: 'location' + required: true + immutable: true + url_param_only: true + description: | + Location of the backup. + - !ruby/object:Api::Type::String + name: 'vault_name' + required: true + immutable: true + url_param_only: true + description: | + Name of the backup vault to store the backup in. + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name of the backup. Needs to be unique per location. + required: true + immutable: true + url_param_only: true +properties: + - !ruby/object:Api::Type::String + name: 'state' + description: | + The state of the Backup Vault. Possible Values : [STATE_UNSPECIFIED, CREATING, UPLOADING, READY, DELETING, ERROR, UPDATING] + output: true + - !ruby/object:Api::Type::String + name: 'description' + description: | + A description of the backup with 2048 characters or less. Requests with longer descriptions will be rejected. + required: false + - !ruby/object:Api::Type::String + name: 'volumeUsageBytes' + description: | + Size of the file system when the backup was created. When creating a new volume from the backup, the volume capacity will have to be at least as big. + output: true + - !ruby/object:Api::Type::String + name: 'backupType' + description: | + Type of backup, manually created or created by a backup policy. Possible Values : [TYPE_UNSPECIFIED, MANUAL, SCHEDULED] + output: true + - !ruby/object:Api::Type::String + name: 'sourceVolume' + description: | + ID of volumes this backup belongs to. Format: `projects/{{projects_id}}/locations/{{location}}/volumes/{{name}}`` + immutable: true + diff_suppress_func: tpgresource.ProjectNumberDiffSuppress + - !ruby/object:Api::Type::String + name: 'createTime' + description: | + Create time of the backup. A timestamp in RFC3339 UTC "Zulu" format. Examples: "2023-06-22T09:13:01.617Z". + output: true + - !ruby/object:Api::Type::KeyValueLabels + name: labels + description: | + Labels as key value pairs. Example: `{ "owner": "Bob", "department": "finance", "purpose": "testing" }`. + required: false + - !ruby/object:Api::Type::String + name: 'chainStorageBytes' + description: | + Backups of a volume build incrementally on top of each other. They form a "backup chain". + Total size of all backups in a chain in bytes = baseline backup size + sum(incremental backup size) + output: true + - !ruby/object:Api::Type::String + name: 'sourceSnapshot' + description: | + If specified, backup will be created from the given snapshot. If not specified, + there will be a new snapshot taken to initiate the backup creation. + Format: `projects/{{projectId}}/locations/{{location}}/volumes/{{volumename}}/snapshots/{{snapshotname}}`` + required: false + diff_suppress_func: tpgresource.ProjectNumberDiffSuppress diff --git a/mmv1/templates/terraform/examples/netapp_backup.tf.erb b/mmv1/templates/terraform/examples/netapp_backup.tf.erb new file mode 100644 index 000000000000..e772b10d703e --- /dev/null +++ b/mmv1/templates/terraform/examples/netapp_backup.tf.erb @@ -0,0 +1,36 @@ +data "google_compute_network" "default" { + name = "<%= ctx[:vars]['network_name'] %>" +} + +resource "google_netapp_storage_pool" "default" { + name = "<%= ctx[:vars]['pool_name'] %>" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "default" { + name = "<%= ctx[:vars]['volume_name'] %>" + location = google_netapp_storage_pool.default.location + capacity_gib = "100" + share_name = "<%= ctx[:vars]['volume_name'] %>" + storage_pool = google_netapp_storage_pool.default.name + protocols = ["NFSV3"] + deletion_policy = "FORCE" + backup_config { + backup_vault = google_netapp_backup_vault.default.id + } +} + +resource "google_netapp_backup_vault" "default" { + name = "<%= ctx[:vars]['backup_vault_name'] %>" + location = google_netapp_storage_pool.default.location +} + +resource "google_netapp_backup" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['backup_name'] %>" + location = google_netapp_backup_vault.default.location + vault_name = google_netapp_backup_vault.default.name + source_volume = google_netapp_volume.default.id +} diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go new file mode 100644 index 000000000000..d12267b66a17 --- /dev/null +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_backup_test.go @@ -0,0 +1,162 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package netapp_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetappbackup_netappBackupFull_update(t *testing.T) { + context := map[string]interface{}{ + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetappbackupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetappbackup_netappBackupFromVolumeSnapshot(context), + }, + { + ResourceName: "google_netapp_backup.test_backup", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "name", "terraform_labels", "vault_name"}, + }, + { + Config: testAccNetappbackup_netappBackupFromVolumeSnapshot_update(context), + }, + { + ResourceName: "google_netapp_backup.test_backup", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "name", "terraform_labels", "vault_name"}, + }, + }, + }) +} + +func testAccNetappbackup_netappBackupFromVolumeSnapshot(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_network" "default" { + name = "%{network_name}" +} + +resource "google_netapp_storage_pool" "default" { + name = "tf-test-backup-pool%{random_suffix}" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "default" { + name = "tf-test-backup-volume%{random_suffix}" + location = google_netapp_storage_pool.default.location + capacity_gib = "100" + share_name = "tf-test-backup-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default.name + protocols = ["NFSV3"] + deletion_policy = "FORCE" + backup_config { + backup_vault = google_netapp_backup_vault.default.id + } +} + +resource "google_netapp_backup_vault" "default" { + name = "tf-test-backup-vault%{random_suffix}" + location = google_netapp_storage_pool.default.location +} + +resource "google_netapp_volume_snapshot" "default" { + depends_on = [google_netapp_volume.default] + location = google_netapp_volume.default.location + volume_name = google_netapp_volume.default.name + description = "This is a test description" + name = "testvolumesnap%{random_suffix}" + labels = { + key= "test" + value= "snapshot" + } + } + +resource "google_netapp_backup" "test_backup" { + name = "tf-test-test-backup%{random_suffix}" + description = "This is a test backup" + source_volume = google_netapp_volume.default.id + location = google_netapp_backup_vault.default.location + vault_name = google_netapp_backup_vault.default.name + source_snapshot = google_netapp_volume_snapshot.default.id + labels = { + key= "test" + value= "backup" + } +} +`, context) +} + +func testAccNetappbackup_netappBackupFromVolumeSnapshot_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_network" "default" { + name = "%{network_name}" +} + +resource "google_netapp_storage_pool" "default" { + name = "tf-test-backup-pool%{random_suffix}" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "2048" + network = data.google_compute_network.default.id +} + +resource "google_netapp_volume" "default" { + name = "tf-test-backup-volume%{random_suffix}" + location = google_netapp_storage_pool.default.location + capacity_gib = "100" + share_name = "tf-test-backup-volume%{random_suffix}" + storage_pool = google_netapp_storage_pool.default.name + protocols = ["NFSV3"] + deletion_policy = "FORCE" + backup_config { + backup_vault = google_netapp_backup_vault.default.id + } +} + +resource "google_netapp_backup_vault" "default" { + name = "tf-test-backup-vault%{random_suffix}" + location = google_netapp_storage_pool.default.location +} + +resource "google_netapp_volume_snapshot" "default" { + depends_on = [google_netapp_volume.default] + location = google_netapp_volume.default.location + volume_name = google_netapp_volume.default.name + description = "This is a test description" + name = "testvolumesnap%{random_suffix}" + labels = { + key= "test" + value= "snapshot" + } + } + +resource "google_netapp_backup" "test_backup" { + name = "tf-test-test-backup%{random_suffix}" + description = "This is a test backup" + source_volume = google_netapp_volume.default.id + location = google_netapp_backup_vault.default.location + vault_name = google_netapp_backup_vault.default.name + source_snapshot = google_netapp_volume_snapshot.default.id + labels = { + key= "test_update" + value= "backup_update" + } +} +`, context) +} From b6e006413abd217d5235fcb889c44b7b454cf1a0 Mon Sep 17 00:00:00 2001 From: Hamza Hassan <43001514+Hamzawy63@users.noreply.github.com> Date: Fri, 7 Jun 2024 18:40:37 +0200 Subject: [PATCH 082/356] Fix inaccurate documentation for TargetHttpsProxy resource (#10874) Co-authored-by: Hamza Hassan --- mmv1/products/compute/RegionTargetHttpsProxy.yaml | 3 +-- mmv1/products/compute/TargetHttpsProxy.yaml | 6 ++++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mmv1/products/compute/RegionTargetHttpsProxy.yaml b/mmv1/products/compute/RegionTargetHttpsProxy.yaml index 94cc7c0aa635..5f7d590b6ebc 100644 --- a/mmv1/products/compute/RegionTargetHttpsProxy.yaml +++ b/mmv1/products/compute/RegionTargetHttpsProxy.yaml @@ -140,8 +140,7 @@ properties: name: 'certificateManagerCertificates' description: | URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. - Currently, you may specify up to 15 certificates. Certificate manager certificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. - sslCertificates and certificateManagerCertificates fields can not be defined together. + sslCertificates and certificateManagerCertificates can't be defined together. Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}` or just the self_link `projects/{project}/locations/{location}/certificates/{resourceName}` update_verb: :POST update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates' diff --git a/mmv1/products/compute/TargetHttpsProxy.yaml b/mmv1/products/compute/TargetHttpsProxy.yaml index 5356091825d0..50d1983c027b 100644 --- a/mmv1/products/compute/TargetHttpsProxy.yaml +++ b/mmv1/products/compute/TargetHttpsProxy.yaml @@ -134,7 +134,8 @@ properties: name: 'certificateManagerCertificates' description: | URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. - Currently, you may specify up to 15 certificates. Certificate manager certificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + Certificate manager certificates only apply when the load balancing scheme is set to INTERNAL_MANAGED. + For EXTERNAL and EXTERNAL_MANAGED, use certificate_map instead. sslCertificates and certificateManagerCertificates fields can not be defined together. Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}` or just the self_link `projects/{project}/locations/{location}/certificates/{resourceName}` update_verb: :POST @@ -164,7 +165,8 @@ properties: name: 'certificateMap' description: | A reference to the CertificateMap resource uri that identifies a certificate map - associated with the given target proxy. This field can only be set for global target proxies. + associated with the given target proxy. This field is only supported for EXTERNAL and EXTERNAL_MANAGED load balancing schemes. + For INTERNAL_MANAGED, use certificate_manager_certificates instead. Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}`. update_verb: :POST update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setCertificateMap' From 33f138e105486e8eb73c57a9f6f26e6a32b01058 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 7 Jun 2024 17:52:58 +0100 Subject: [PATCH 083/356] Fix issue in TestAccBigtableInstance_forceDestroyBackups test (#10905) --- .../services/bigtable/resource_bigtable_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go index f1d0cc9323d0..65ae6dc5b2bf 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_instance_test.go @@ -890,7 +890,7 @@ EOT check "health_check_1" { assert { - condition = data.http.make_backups_1.status_code == 200 + condition = data.http.make_backup_1.status_code == 200 error_message = "HTTP request to create a backup returned a non-200 status code" } } From dd18002306d2f9d5fc4c062b3b1bc96e149d97a4 Mon Sep 17 00:00:00 2001 From: vijaykanthm Date: Fri, 7 Jun 2024 09:53:24 -0700 Subject: [PATCH 084/356] Add Resource Folder Security Health Analytics Custom Module (#10839) --- ...erSecurityHealthAnalyticsCustomModule.yaml | 228 +++++++++++++++++ ...ealth_analytics_custom_module_basic.tf.erb | 24 ++ ...health_analytics_custom_module_full.tf.erb | 38 +++ ...ity_health_analytics_custom_module_test.go | 234 ++++++++++++++++++ 4 files changed, 524 insertions(+) create mode 100644 mmv1/products/securitycentermanagement/FolderSecurityHealthAnalyticsCustomModule.yaml create mode 100644 mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_folder_security_health_analytics_custom_module_test.go diff --git a/mmv1/products/securitycentermanagement/FolderSecurityHealthAnalyticsCustomModule.yaml b/mmv1/products/securitycentermanagement/FolderSecurityHealthAnalyticsCustomModule.yaml new file mode 100644 index 000000000000..504a102779ad --- /dev/null +++ b/mmv1/products/securitycentermanagement/FolderSecurityHealthAnalyticsCustomModule.yaml @@ -0,0 +1,228 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'FolderSecurityHealthAnalyticsCustomModule' +description: | + Represents an instance of a Security Health Analytics custom module, including + its full module name, display name, enablement state, and last updated time. + You can create a custom module at the organization, folder, or project level. + Custom modules that you create at the organization or folder level are inherited + by the child folders and projects. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Overview of custom modules for Security Health Analytics': 'https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview' + api: 'https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/folders.locations.securityHealthAnalyticsCustomModules' +base_url: 'folders/{{folder}}/locations/{{location}}/securityHealthAnalyticsCustomModules' +self_link: 'folders/{{folder}}/locations/{{location}}/securityHealthAnalyticsCustomModules/{{name}}' +mutex: 'folders/{{folder}}/locations/{{location}}/securityHealthAnalyticsCustomModules' +update_verb: :PATCH +update_mask: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_folder_security_health_analytics_custom_module_basic" + primary_resource_id: "example" + external_providers: ["random", "time"] + skip_test: true + vars: + folder_display_name: "folder-name" + display_name: basic_custom_module + sleep: "" + test_env_vars: + org_id: :ORG_ID + test_vars_overrides: + sleep: "true" + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_folder_security_health_analytics_custom_module_full" + primary_resource_id: "example" + external_providers: ["random", "time"] + skip_test: true + vars: + folder_display_name: "folder-name" + display_name: full_custom_module + sleep: "" + test_env_vars: + org_id: :ORG_ID + test_vars_overrides: + sleep: "true" + +parameters: + - !ruby/object:Api::Type::String + name: 'folder' + immutable: true + required: true + url_param_only: true + description: | + Numerical ID of the parent folder. + + - !ruby/object:Api::Type::String + name: 'location' + immutable: true + required: false + url_param_only: true + default_value: 'global' + description: | + Location ID of the parent organization. If not provided, 'global' will be used as the default location. + +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb + description: | + The resource name of the custom module. Its format is "folders/{folder}/locations/{location}/securityHealthAnalyticsCustomModules/{securityHealthAnalyticsCustomModule}". + The id {securityHealthAnalyticsCustomModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits. + - !ruby/object:Api::Type::String + name: 'displayName' + immutable: true + # API error for invalid display names is just "INVALID_ARGUMENT" with no details + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateRegexp(`^[a-z][\w_]{0,127}$`)' + description: | + The display name of the Security Health Analytics custom module. This + display name becomes the finding category for all findings that are + returned by this custom module. The display name must be between 1 and + 128 characters, start with a lowercase letter, and contain alphanumeric + characters or underscores only. + - !ruby/object:Api::Type::Enum + name: 'enablementState' + description: | + The enablement state of the custom module. + values: + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::String + name: 'updateTime' + output: true + description: | + The time at which the custom module was last updated. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: 'lastEditor' + output: true + description: | + The editor that last updated the custom module. + - !ruby/object:Api::Type::String + name: 'ancestorModule' + output: true + description: | + If empty, indicates that the custom module was created in the organization, folder, + or project in which you are viewing the custom module. Otherwise, ancestor_module + specifies the organization or folder from which the custom module is inherited. + - !ruby/object:Api::Type::NestedObject + name: 'customConfig' + description: | + The user specified custom configuration for the module. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'predicate' + description: | + The CEL expression to evaluate to produce findings. When the expression evaluates + to true against a resource, a finding is generated. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'customOutput' + description: | + Custom output properties. + properties: + - !ruby/object:Api::Type::Array + name: 'properties' + description: | + A list of custom output properties to add to the finding. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the property for the custom output. + - !ruby/object:Api::Type::NestedObject + name: 'valueExpression' + description: | + The CEL expression for the custom output. A resource property can be specified + to return the value of the property or a text string enclosed in quotation marks. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'resourceSelector' + description: | + The resource types that the custom module operates on. Each custom module + can specify up to 5 resource types. + properties: + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + required: true + description: | + The resource types to run the detector on. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'severity' + description: | + The severity to assign to findings generated by the module. + values: + - :CRITICAL + - :HIGH + - :MEDIUM + - :LOW + - !ruby/object:Api::Type::String + name: 'description' + description: | + Text that describes the vulnerability or misconfiguration that the custom + module detects. This explanation is returned with each finding instance to + help investigators understand the detected issue. The text must be enclosed in quotation marks. + - !ruby/object:Api::Type::String + name: 'recommendation' + description: | + An explanation of the recommended steps that security teams can take to resolve + the detected issue. This explanation is returned with each finding generated by + this module in the nextSteps property of the finding JSON. diff --git a/mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_basic.tf.erb b/mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_basic.tf.erb new file mode 100644 index 000000000000..9b773df17903 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_basic.tf.erb @@ -0,0 +1,24 @@ +resource "google_folder" "folder" { + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + display_name = "<%= ctx[:vars]['folder_display_name'] %>" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "<%= ctx[:primary_resource_id] %>" { + folder = google_folder.folder.folder_id + location = "global" + display_name = "<%= ctx[:vars]['display_name'] %>" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_full.tf.erb b/mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_full.tf.erb new file mode 100644 index 000000000000..0108ada4cd1a --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_folder_security_health_analytics_custom_module_full.tf.erb @@ -0,0 +1,38 @@ +resource "google_folder" "folder" { + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + display_name = "<%= ctx[:vars]['folder_display_name'] %>" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "<%= ctx[:primary_resource_id] %>" { + folder = google_folder.folder.folder_id + location = "global" + display_name = "<%= ctx[:vars]['display_name'] %>" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_folder_security_health_analytics_custom_module_test.go b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_folder_security_health_analytics_custom_module_test.go new file mode 100644 index 000000000000..16c1d57ca527 --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_folder_security_health_analytics_custom_module_test.go @@ -0,0 +1,234 @@ +package securitycentermanagement_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Custom Module tests cannot be run in parallel without running into 409 Conflict reponses. +// Run them as individual steps of an update test instead. +func TestAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "location": "global", + "sleep": true, + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + CheckDestroy: testAccCheckSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule_sccFolderCustomModuleBasicExample(context), + }, + { + ResourceName: "google_scc_management_folder_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"folder", "location"}, + }, + { + Config: testAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule_sccFolderCustomModuleFullExample(context), + }, + { + ResourceName: "google_scc_management_folder_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"folder", "location"}, + }, + { + Config: testAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule_sccFolderCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_management_folder_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"folder", "location"}, + }, + }, + }) +} + +func testAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule_sccFolderCustomModuleBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder-name%{random_suffix}" +} + +resource "time_sleep" "wait_1_minute" { + depends_on = [google_folder.folder] + + create_duration = "2m" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "example" { + folder = google_folder.folder.folder_id + location = "%{location}" + display_name = "tf_test_basic_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } + + + depends_on = [time_sleep.wait_1_minute] +} +`, context) +} + +func testAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule_sccFolderCustomModuleFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder-name%{random_suffix}" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "example" { + folder = google_folder.folder.folder_id + location = "%{location}" + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +`, context) +} + +func testAccSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule_sccFolderCustomModuleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder-name%{random_suffix}" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "example" { + folder = google_folder.folder.folder_id + location = "%{location}" + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "DISABLED" + custom_config { + predicate { + expression = "resource.name == \"updated-name\"" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + custom_output { + properties { + name = "violation" + value_expression { + expression = "resource.name" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + } + } + resource_selector { + resource_types = [ + "compute.googleapis.com/Instance", + ] + } + severity = "CRITICAL" + description = "Updated description of the custom module" + recommendation = "Updated steps to resolve violation" + } +} +`, context) +} + +func testAccCheckSecurityCenterManagementFolderSecurityHealthAnalyticsCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_scc_management_folder_security_health_analytics_custom_module" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + location := rs.Primary.Attributes["location"] + + url, err := tpgresource.ReplaceVarsForTest(config, rs, fmt.Sprintf( + "{{SecurityCenterBasePath}}folders/{{folder}}/locations/%s/securityHealthAnalyticsCustomModules/{{name}}", location)) + + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("SecurityCenterManagementFolderSecurityHealthAnalyticsCustomModule still exists at %s", url) + } + } + + return nil + } +} From 155307b2d0395f2996df98415b4bbdcdb370316b Mon Sep 17 00:00:00 2001 From: Matheus Guilherme Souza Aleixo <82680416+matheusaleixo-cit@users.noreply.github.com> Date: Fri, 7 Jun 2024 13:56:25 -0300 Subject: [PATCH 085/356] Added update support to sslPolicy field in region_target_https_proxy (#10877) --- .../compute/RegionTargetHttpsProxy.yaml | 10 +- ...pute_region_target_https_proxy_test.go.erb | 280 ++++++++++++++++++ 2 files changed, 285 insertions(+), 5 deletions(-) diff --git a/mmv1/products/compute/RegionTargetHttpsProxy.yaml b/mmv1/products/compute/RegionTargetHttpsProxy.yaml index 5f7d590b6ebc..00be3b0e6129 100644 --- a/mmv1/products/compute/RegionTargetHttpsProxy.yaml +++ b/mmv1/products/compute/RegionTargetHttpsProxy.yaml @@ -173,11 +173,11 @@ properties: A reference to the Region SslPolicy resource that will be associated with the TargetHttpsProxy resource. If not set, the TargetHttpsProxy resource will not have any SSL policy configured. - # 2022 May 28 - setSslPolicy method not yet listed - # https://cloud.google.com/compute/docs/reference/rest/beta/regionTargetHttpsProxies - # update_verb: :POST - # update_url: - # 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslPolicy' + update_id: 'sslPolicy' + fingerprint_name: 'fingerprint' + update_verb: :PATCH + update_url: + 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}' custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - !ruby/object:Api::Type::ResourceRef name: 'urlMap' diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_target_https_proxy_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_target_https_proxy_test.go.erb index edfabc83778b..5bea2bf23d2e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_target_https_proxy_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_target_https_proxy_test.go.erb @@ -5,6 +5,7 @@ import ( "fmt" "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -349,3 +350,282 @@ resource "google_compute_region_ssl_certificate" "foobar2" { } `, id, id, id, id, id, id, id, id, id, id) } + +func TestAccComputeRegionTargetHttpsProxy_addSslPolicy_withForwardingRule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "resource_suffix": acctest.RandString(t, 10), + "project_id": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # webscoket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # webscoket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] + ssl_policy = google_compute_region_ssl_policy.default.id +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_policy" "default" { + project = "%{project_id}" + region = "us-central1" + name = "ssl-policy-%{resource_suffix}" + + profile = "RESTRICTED" + min_tls_version = "TLS_1_2" +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} From 24c28ff5e340bfd4a12d4106f7832f04f94fc435 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 7 Jun 2024 10:04:29 -0700 Subject: [PATCH 086/356] Made network edge security service docs not force Portuguese (#10907) --- mmv1/products/compute/NetworkEdgeSecurityService.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/compute/NetworkEdgeSecurityService.yaml b/mmv1/products/compute/NetworkEdgeSecurityService.yaml index a5008336f155..7241dfd28fdb 100644 --- a/mmv1/products/compute/NetworkEdgeSecurityService.yaml +++ b/mmv1/products/compute/NetworkEdgeSecurityService.yaml @@ -25,7 +25,7 @@ update_mask: true skip_sweeper: true references: !ruby/object:Api::Resource::ReferenceLinks guides: - 'Official Documentation': 'https://cloud.google.com/armor/docs/advanced-network-ddos?hl=pt-br#activate_advanced_network_ddos_protection' + 'Official Documentation': 'https://cloud.google.com/armor/docs/advanced-network-ddos' api: 'https://cloud.google.com/compute/docs/reference/rest/v1/networkEdgeSecurityServices' description: | Google Cloud Armor network edge security service resource. From fb690b39ad8536b4c4c103642e1ddb4e5b2824a0 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Fri, 7 Jun 2024 10:26:45 -0700 Subject: [PATCH 087/356] Replace non-test usage of resource library with id and retry (#10830) --- mmv1/api/resource/examples.go | 2 +- .../compute/RegionSslCertificate.yaml | 6 ++-- mmv1/products/compute/SslCertificate.yaml | 6 ++-- mmv1/provider/terraform/examples.rb | 2 +- .../terraform/constants/agent_pool.go.erb | 8 ++--- .../clouddomains_registration.go.erb | 8 ++--- .../constants/datastream_stream.go.erb | 8 ++--- .../terraform/constants/go/agent_pool.go.tmpl | 8 ++--- .../go/clouddomains_registration.go.tmpl | 8 ++--- .../constants/go/datastream_stream.go.tmpl | 8 ++--- .../integration_connectors_connection.go.tmpl | 6 ++-- .../go/interconnect_attachment.go.tmpl | 6 ++-- .../constants/go/private_connection.go.tmpl | 10 +++--- .../integration_connectors_connection.go.erb | 6 ++-- .../constants/interconnect_attachment.go.erb | 6 ++-- .../terraform/constants/notebooks_instance.go | 6 ++-- .../constants/private_connection.go.erb | 10 +++--- .../terraform/constants/workbench_instance.go | 6 ++-- .../go/name_or_name_prefix.go.tmpl | 4 +-- .../custom_expand/name_or_name_prefix.go.erb | 4 +-- .../terraform/encoders/api_config.go.erb | 6 ++-- .../terraform/encoders/go/api_config.go.tmpl | 6 ++-- .../encoders/go/spanner_instance.go.tmpl | 2 +- .../terraform/encoders/go/workflow.go.tmpl | 6 ++-- .../encoders/spanner_instance.go.erb | 2 +- .../terraform/encoders/workflow.go.erb | 6 ++-- mmv1/templates/terraform/resource.erb | 3 +- mmv1/templates/tgc/resource_converter.go.erb | 3 +- ...urce_compute_disk_async_replication.go.erb | 20 ++++++------ .../compute/resource_compute_instance.go.erb | 4 +-- ...urce_compute_instance_group_manager.go.erb | 4 +-- .../resource_compute_instance_template.go.erb | 6 ++-- ...mpute_region_instance_group_manager.go.erb | 6 ++-- ...ce_compute_region_instance_template.go.erb | 6 ++-- .../resource_container_cluster.go.erb | 14 ++++----- .../resource_container_node_pool.go.erb | 31 ++++++++++--------- ...resource_dataflow_flex_template_job.go.erb | 24 +++++++------- .../dataflow/resource_dataflow_job.go.erb | 22 ++++++------- .../terraform/services/dns/dns_change.go | 8 ++--- .../resourcemanager/service_account_waiter.go | 6 ++-- .../sql/resource_sql_database_instance.go.erb | 4 +-- .../storage/resource_storage_bucket.go.erb | 8 ++--- .../resource_storage_transfer_job.go.erb | 6 ++-- .../terraform/tpgresource/common_operation.go | 6 ++-- .../terraform/transport/common_polling.go | 16 +++++----- .../terraform/transport/config.go.erb | 2 +- .../terraform/transport/retry_transport.go | 12 +++---- .../terraform/transport/retry_utils.go | 10 +++--- mmv1/third_party/tgc/sql_database_instance.go | 4 +-- 49 files changed, 192 insertions(+), 189 deletions(-) diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index 7c6c45530140..0c1159965942 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -148,7 +148,7 @@ type Examples struct { // If the example should be skipped during VCR testing. // This is the case when something about the resource or config causes VCR to fail for example - // a resource with a unique identifier generated within the resource via resource.UniqueId() + // a resource with a unique identifier generated within the resource via id.UniqueId() // Or a config with two fine grained resources that have a race condition during create SkipVcr bool `yaml:"skip_vcr"` diff --git a/mmv1/products/compute/RegionSslCertificate.yaml b/mmv1/products/compute/RegionSslCertificate.yaml index 0dd28ac101af..fc3e6c825658 100644 --- a/mmv1/products/compute/RegionSslCertificate.yaml +++ b/mmv1/products/compute/RegionSslCertificate.yaml @@ -53,7 +53,7 @@ examples: name: 'region_ssl_certificate_basic' primary_resource_id: 'default' - # Uses resource.UniqueId + # Uses id.UniqueId skip_vcr: true ignore_read_extra: - 'name_prefix' @@ -62,13 +62,13 @@ examples: external_providers: ["random", "time"] primary_resource_id: 'default' - # Uses resource.UniqueId + # Uses id.UniqueId skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'region_ssl_certificate_target_https_proxies' primary_resource_id: 'default' - # Uses resource.UniqueId + # Uses id.UniqueId skip_vcr: true vars: region_target_https_proxy_name: 'test-proxy' diff --git a/mmv1/products/compute/SslCertificate.yaml b/mmv1/products/compute/SslCertificate.yaml index 423602b771b9..0329f36edfb4 100644 --- a/mmv1/products/compute/SslCertificate.yaml +++ b/mmv1/products/compute/SslCertificate.yaml @@ -53,7 +53,7 @@ examples: name: 'ssl_certificate_basic' primary_resource_id: 'default' - # Uses resource.UniqueId + # Uses id.UniqueId skip_vcr: true ignore_read_extra: - 'name_prefix' @@ -62,13 +62,13 @@ examples: external_providers: ["random", "time"] primary_resource_id: 'default' - # Uses resource.UniqueId + # Uses id.UniqueId skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'ssl_certificate_target_https_proxies' primary_resource_id: 'default' - # Uses resource.UniqueId + # Uses id.UniqueId skip_vcr: true vars: target_https_proxy_name: 'test-proxy' diff --git a/mmv1/provider/terraform/examples.rb b/mmv1/provider/terraform/examples.rb index f044c4a8ba67..015eee763521 100644 --- a/mmv1/provider/terraform/examples.rb +++ b/mmv1/provider/terraform/examples.rb @@ -138,7 +138,7 @@ class Examples < Google::YamlValidator # If the example should be skipped during VCR testing. # This is the case when something about the resource or config causes VCR to fail for example - # a resource with a unique identifier generated within the resource via resource.UniqueId() + # a resource with a unique identifier generated within the resource via id.UniqueId() # Or a config with two fine grained resources that have a race condition during create attr_reader :skip_vcr diff --git a/mmv1/templates/terraform/constants/agent_pool.go.erb b/mmv1/templates/terraform/constants/agent_pool.go.erb index 3fc74c542878..c64ae88de421 100644 --- a/mmv1/templates/terraform/constants/agent_pool.go.erb +++ b/mmv1/templates/terraform/constants/agent_pool.go.erb @@ -2,20 +2,20 @@ // waitForAgentPoolReady waits for an agent pool to leave the // "CREATING" state and become "CREATED", to indicate that it's ready. func waitForAgentPoolReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceStorageTransferAgentPoolRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "CREATING" { - return resource.RetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) } else if state == "CREATED" { log.Printf("[DEBUG] AgentPool %q has state %q.", name, state) return nil } else { - return resource.NonRetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) } }) } diff --git a/mmv1/templates/terraform/constants/clouddomains_registration.go.erb b/mmv1/templates/terraform/constants/clouddomains_registration.go.erb index 7b4537357a70..565f10b887a4 100644 --- a/mmv1/templates/terraform/constants/clouddomains_registration.go.erb +++ b/mmv1/templates/terraform/constants/clouddomains_registration.go.erb @@ -2,17 +2,17 @@ // waitForRegistrationActive waits for a registration to leave the // "REGISTRATION_PENDING" state and become "ACTIVE" or any other state. func waitForRegistrationActive(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceClouddomainsRegistrationRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "REGISTRATION_PENDING" { - return resource.RetryableError(fmt.Errorf("Registration %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("Registration %q has state %q.", name, state)) } else if state == "REGISTRATION_FAILED" { - return resource.NonRetryableError(fmt.Errorf("Registration %q has failed with state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("Registration %q has failed with state %q.", name, state)) } else { log.Printf("[DEBUG] Registration %q has state %q.", name, state) return nil diff --git a/mmv1/templates/terraform/constants/datastream_stream.go.erb b/mmv1/templates/terraform/constants/datastream_stream.go.erb index 3470e6a67940..bf26549931bd 100644 --- a/mmv1/templates/terraform/constants/datastream_stream.go.erb +++ b/mmv1/templates/terraform/constants/datastream_stream.go.erb @@ -46,20 +46,20 @@ func resourceDatastreamStreamCustomDiff(_ context.Context, diff *schema.Resource <% unless compiler == "terraformgoogleconversion-codegen" -%> // waitForDatastreamStreamReady waits for an agent pool to reach a stable state to indicate that it's ready. func waitForDatastreamStreamReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceDatastreamStreamRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "STARTING" || state == "DRAINING" { - return resource.RetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) } else if state == "NOT_STARTED" || state == "RUNNING" || state == "PAUSED" { log.Printf("[DEBUG] Stream %q has state %q.", name, state) return nil } else { - return resource.NonRetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) } }) } diff --git a/mmv1/templates/terraform/constants/go/agent_pool.go.tmpl b/mmv1/templates/terraform/constants/go/agent_pool.go.tmpl index 4598ff353bb9..5f4bbdcfa820 100644 --- a/mmv1/templates/terraform/constants/go/agent_pool.go.tmpl +++ b/mmv1/templates/terraform/constants/go/agent_pool.go.tmpl @@ -2,20 +2,20 @@ // waitForAgentPoolReady waits for an agent pool to leave the // "CREATING" state and become "CREATED", to indicate that it's ready. func waitForAgentPoolReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceStorageTransferAgentPoolRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "CREATING" { - return resource.RetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) } else if state == "CREATED" { log.Printf("[DEBUG] AgentPool %q has state %q.", name, state) return nil } else { - return resource.NonRetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("AgentPool %q has state %q.", name, state)) } }) } diff --git a/mmv1/templates/terraform/constants/go/clouddomains_registration.go.tmpl b/mmv1/templates/terraform/constants/go/clouddomains_registration.go.tmpl index 6b467757db89..a544535a321e 100644 --- a/mmv1/templates/terraform/constants/go/clouddomains_registration.go.tmpl +++ b/mmv1/templates/terraform/constants/go/clouddomains_registration.go.tmpl @@ -2,17 +2,17 @@ // waitForRegistrationActive waits for a registration to leave the // "REGISTRATION_PENDING" state and become "ACTIVE" or any other state. func waitForRegistrationActive(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceClouddomainsRegistrationRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "REGISTRATION_PENDING" { - return resource.RetryableError(fmt.Errorf("Registration %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("Registration %q has state %q.", name, state)) } else if state == "REGISTRATION_FAILED" { - return resource.NonRetryableError(fmt.Errorf("Registration %q has failed with state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("Registration %q has failed with state %q.", name, state)) } else { log.Printf("[DEBUG] Registration %q has state %q.", name, state) return nil diff --git a/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl index 4e1f9f699de5..3f409f9794a5 100644 --- a/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl +++ b/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl @@ -44,20 +44,20 @@ func resourceDatastreamStreamCustomDiff(_ context.Context, diff *schema.Resource {{- if ne $.Compiler "terraformgoogleconversion-codegen" }} // waitForDatastreamStreamReady waits for an agent pool to reach a stable state to indicate that it's ready. func waitForDatastreamStreamReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceDatastreamStreamRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "STARTING" || state == "DRAINING" { - return resource.RetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) } else if state == "NOT_STARTED" || state == "RUNNING" || state == "PAUSED" { log.Printf("[DEBUG] Stream %q has state %q.", name, state) return nil } else { - return resource.NonRetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("Stream %q has state %q.", name, state)) } }) } diff --git a/mmv1/templates/terraform/constants/go/integration_connectors_connection.go.tmpl b/mmv1/templates/terraform/constants/go/integration_connectors_connection.go.tmpl index f1b3919ca5fc..d84dd8c0ab1b 100644 --- a/mmv1/templates/terraform/constants/go/integration_connectors_connection.go.tmpl +++ b/mmv1/templates/terraform/constants/go/integration_connectors_connection.go.tmpl @@ -2,16 +2,16 @@ // waitforConnectionReady waits for an connecion to leave the // "CREATING" state, to indicate that it's ready. func waitforConnectionReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceIntegrationConnectorsConnectionRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) status := d.Get("status").([]interface{}) state := status[0].(map[string]interface{})["state"] log.Printf("[DEBUG] Connection %q has state %v.", name, state) if state == "CREATING" || state == "UPDATING" { - return resource.RetryableError(fmt.Errorf("Connection %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("Connection %q has state %q.", name, state)) } log.Printf("[DEBUG] Connection %q has state %q.", name, state) return nil diff --git a/mmv1/templates/terraform/constants/go/interconnect_attachment.go.tmpl b/mmv1/templates/terraform/constants/go/interconnect_attachment.go.tmpl index a4e9b98a54e7..e247612fbff7 100644 --- a/mmv1/templates/terraform/constants/go/interconnect_attachment.go.tmpl +++ b/mmv1/templates/terraform/constants/go/interconnect_attachment.go.tmpl @@ -3,15 +3,15 @@ // "UNPROVISIONED" state, to indicate that it's either ready or awaiting partner // activity. func waitForAttachmentToBeProvisioned(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceComputeInterconnectAttachmentRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "UNPROVISIONED" { - return resource.RetryableError(fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) } log.Printf("InterconnectAttachment %q has state %q.", name, state) return nil diff --git a/mmv1/templates/terraform/constants/go/private_connection.go.tmpl b/mmv1/templates/terraform/constants/go/private_connection.go.tmpl index 158c51f89705..3d0da390dd8d 100644 --- a/mmv1/templates/terraform/constants/go/private_connection.go.tmpl +++ b/mmv1/templates/terraform/constants/go/private_connection.go.tmpl @@ -12,22 +12,22 @@ func extractError(d *schema.ResourceData) error { // waitForPrivateConnectionReady waits for a private connection state to become // CREATED, if the state is FAILED propegate the error to the user. func waitForPrivateConnectionReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceDatastreamPrivateConnectionRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "CREATING" { - return resource.RetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) } else if state == "CREATED" { log.Printf("[DEBUG] PrivateConnection %q has state %q.", name, state) return nil } else if state == "FAILED" { - return resource.NonRetryableError(extractError(d)) + return retry.NonRetryableError(extractError(d)) } else { - return resource.NonRetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) } }) } diff --git a/mmv1/templates/terraform/constants/integration_connectors_connection.go.erb b/mmv1/templates/terraform/constants/integration_connectors_connection.go.erb index 15c1772b27e7..d0f610704185 100644 --- a/mmv1/templates/terraform/constants/integration_connectors_connection.go.erb +++ b/mmv1/templates/terraform/constants/integration_connectors_connection.go.erb @@ -2,16 +2,16 @@ // waitforConnectionReady waits for an connecion to leave the // "CREATING" state, to indicate that it's ready. func waitforConnectionReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceIntegrationConnectorsConnectionRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) status := d.Get("status").([]interface{}) state := status[0].(map[string]interface{})["state"] log.Printf("[DEBUG] Connection %q has state %v.", name, state) if state == "CREATING" || state == "UPDATING" { - return resource.RetryableError(fmt.Errorf("Connection %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("Connection %q has state %q.", name, state)) } log.Printf("[DEBUG] Connection %q has state %q.", name, state) return nil diff --git a/mmv1/templates/terraform/constants/interconnect_attachment.go.erb b/mmv1/templates/terraform/constants/interconnect_attachment.go.erb index f8d4ad8baa46..cf52429784ce 100644 --- a/mmv1/templates/terraform/constants/interconnect_attachment.go.erb +++ b/mmv1/templates/terraform/constants/interconnect_attachment.go.erb @@ -3,15 +3,15 @@ // "UNPROVISIONED" state, to indicate that it's either ready or awaiting partner // activity. func waitForAttachmentToBeProvisioned(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceComputeInterconnectAttachmentRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "UNPROVISIONED" { - return resource.RetryableError(fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("InterconnectAttachment %q has state %q.", name, state)) } log.Printf("InterconnectAttachment %q has state %q.", name, state) return nil diff --git a/mmv1/templates/terraform/constants/notebooks_instance.go b/mmv1/templates/terraform/constants/notebooks_instance.go index b1c47a704ffa..6181d1ce386e 100644 --- a/mmv1/templates/terraform/constants/notebooks_instance.go +++ b/mmv1/templates/terraform/constants/notebooks_instance.go @@ -51,9 +51,9 @@ func NotebooksInstanceKmsDiffSuppress(_, old, new string, _ *schema.ResourceData <% unless compiler == "terraformgoogleconversion-codegen" -%> // waitForNotebooksInstanceActive waits for an Notebook instance to become "ACTIVE" func waitForNotebooksInstanceActive(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceNotebooksInstanceRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) @@ -62,7 +62,7 @@ func waitForNotebooksInstanceActive(d *schema.ResourceData, config *transport_tp log.Printf("[DEBUG] Notebook Instance %q has state %q.", name, state) return nil } else { - return resource.RetryableError(fmt.Errorf("Notebook Instance %q has state %q. Waiting for ACTIVE state", name, state)) + return retry.RetryableError(fmt.Errorf("Notebook Instance %q has state %q. Waiting for ACTIVE state", name, state)) } }) diff --git a/mmv1/templates/terraform/constants/private_connection.go.erb b/mmv1/templates/terraform/constants/private_connection.go.erb index 79812d25b564..f6fa4fe80290 100644 --- a/mmv1/templates/terraform/constants/private_connection.go.erb +++ b/mmv1/templates/terraform/constants/private_connection.go.erb @@ -12,22 +12,22 @@ func extractError(d *schema.ResourceData) error { // waitForPrivateConnectionReady waits for a private connection state to become // CREATED, if the state is FAILED propegate the error to the user. func waitForPrivateConnectionReady(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceDatastreamPrivateConnectionRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) state := d.Get("state").(string) if state == "CREATING" { - return resource.RetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) + return retry.RetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) } else if state == "CREATED" { log.Printf("[DEBUG] PrivateConnection %q has state %q.", name, state) return nil } else if state == "FAILED" { - return resource.NonRetryableError(extractError(d)) + return retry.NonRetryableError(extractError(d)) } else { - return resource.NonRetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) + return retry.NonRetryableError(fmt.Errorf("PrivateConnection %q has state %q.", name, state)) } }) } diff --git a/mmv1/templates/terraform/constants/workbench_instance.go b/mmv1/templates/terraform/constants/workbench_instance.go index 4462c0e5baa7..d5359c286b08 100644 --- a/mmv1/templates/terraform/constants/workbench_instance.go +++ b/mmv1/templates/terraform/constants/workbench_instance.go @@ -123,9 +123,9 @@ func WorkbenchInstanceTagsDiffSuppress(_, _, _ string, d *schema.ResourceData) b <% unless compiler == "terraformgoogleconversion-codegen" -%> // waitForWorkbenchInstanceActive waits for an workbench instance to become "ACTIVE" func waitForWorkbenchInstanceActive(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := resourceWorkbenchInstanceRead(d, config); err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } name := d.Get("name").(string) @@ -134,7 +134,7 @@ func waitForWorkbenchInstanceActive(d *schema.ResourceData, config *transport_tp log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) return nil } else { - return resource.RetryableError(fmt.Errorf("Workbench Instance %q has state %q. Waiting for ACTIVE state", name, state)) + return retry.RetryableError(fmt.Errorf("Workbench Instance %q has state %q. Waiting for ACTIVE state", name, state)) } }) diff --git a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl index 9cbc4ad9c32a..9b9f5f103c10 100644 --- a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl @@ -15,9 +15,9 @@ func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.T if v, ok := d.GetOk("name"); ok { certName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource.PrefixedUniqueId(v.(string)) + certName = id.PrefixedUniqueId(v.(string)) } else { - certName = resource.UniqueId() + certName = id.UniqueId() } // We need to get the {{"{{"}}name{{"}}"}} into schema to set the ID using tpgresource.ReplaceVars diff --git a/mmv1/templates/terraform/custom_expand/name_or_name_prefix.go.erb b/mmv1/templates/terraform/custom_expand/name_or_name_prefix.go.erb index 9a449569ff25..b3297fd187c2 100644 --- a/mmv1/templates/terraform/custom_expand/name_or_name_prefix.go.erb +++ b/mmv1/templates/terraform/custom_expand/name_or_name_prefix.go.erb @@ -17,9 +17,9 @@ func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d t if v, ok := d.GetOk("name"); ok { certName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - certName = resource.PrefixedUniqueId(v.(string)) + certName = id.PrefixedUniqueId(v.(string)) } else { - certName = resource.UniqueId() + certName = id.UniqueId() } // We need to get the {{name}} into schema to set the ID using tpgresource.ReplaceVars diff --git a/mmv1/templates/terraform/encoders/api_config.go.erb b/mmv1/templates/terraform/encoders/api_config.go.erb index 39d38b3d715c..c23df7b7a721 100644 --- a/mmv1/templates/terraform/encoders/api_config.go.erb +++ b/mmv1/templates/terraform/encoders/api_config.go.erb @@ -2,12 +2,12 @@ var apiConfigId string if v, ok := d.GetOk("api_config_id"); ok { apiConfigId = v.(string) } else if v, ok := d.GetOk("api_config_id_prefix"); ok { - apiConfigId = resource.PrefixedUniqueId(v.(string)) + apiConfigId = id.PrefixedUniqueId(v.(string)) } else { - apiConfigId = resource.UniqueId() + apiConfigId = id.UniqueId() } if err := d.Set("api_config_id", apiConfigId); err != nil { return nil, fmt.Errorf("Error setting api_config_id: %s", err) } -return obj, nil \ No newline at end of file +return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/api_config.go.tmpl b/mmv1/templates/terraform/encoders/go/api_config.go.tmpl index 39d38b3d715c..c23df7b7a721 100644 --- a/mmv1/templates/terraform/encoders/go/api_config.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/api_config.go.tmpl @@ -2,12 +2,12 @@ var apiConfigId string if v, ok := d.GetOk("api_config_id"); ok { apiConfigId = v.(string) } else if v, ok := d.GetOk("api_config_id_prefix"); ok { - apiConfigId = resource.PrefixedUniqueId(v.(string)) + apiConfigId = id.PrefixedUniqueId(v.(string)) } else { - apiConfigId = resource.UniqueId() + apiConfigId = id.UniqueId() } if err := d.Set("api_config_id", apiConfigId); err != nil { return nil, fmt.Errorf("Error setting api_config_id: %s", err) } -return obj, nil \ No newline at end of file +return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/spanner_instance.go.tmpl b/mmv1/templates/terraform/encoders/go/spanner_instance.go.tmpl index 7dde01e82fa7..fdda9c430a47 100644 --- a/mmv1/templates/terraform/encoders/go/spanner_instance.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/spanner_instance.go.tmpl @@ -5,7 +5,7 @@ if obj["processingUnits"] == nil && obj["nodeCount"] == nil && obj["autoscalingC newObj := make(map[string]interface{}) newObj["instance"] = obj if obj["name"] == nil { - if err := d.Set("name", resource.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { + if err := d.Set("name", id.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { return nil, fmt.Errorf("Error setting name: %s", err) } newObj["instanceId"] = d.Get("name").(string) diff --git a/mmv1/templates/terraform/encoders/go/workflow.go.tmpl b/mmv1/templates/terraform/encoders/go/workflow.go.tmpl index 8e87273991e9..4888f187bd15 100644 --- a/mmv1/templates/terraform/encoders/go/workflow.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/workflow.go.tmpl @@ -2,13 +2,13 @@ var ResName string if v, ok := d.GetOk("name"); ok { ResName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = resource.PrefixedUniqueId(v.(string)) + ResName = id.PrefixedUniqueId(v.(string)) } else { - ResName = resource.UniqueId() + ResName = id.UniqueId() } if err := d.Set("name", ResName); err != nil { return nil, fmt.Errorf("Error setting name: %s", err) } -return obj, nil \ No newline at end of file +return obj, nil diff --git a/mmv1/templates/terraform/encoders/spanner_instance.go.erb b/mmv1/templates/terraform/encoders/spanner_instance.go.erb index 7dde01e82fa7..fdda9c430a47 100644 --- a/mmv1/templates/terraform/encoders/spanner_instance.go.erb +++ b/mmv1/templates/terraform/encoders/spanner_instance.go.erb @@ -5,7 +5,7 @@ if obj["processingUnits"] == nil && obj["nodeCount"] == nil && obj["autoscalingC newObj := make(map[string]interface{}) newObj["instance"] = obj if obj["name"] == nil { - if err := d.Set("name", resource.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { + if err := d.Set("name", id.PrefixedUniqueId("tfgen-spanid-")[:30]); err != nil { return nil, fmt.Errorf("Error setting name: %s", err) } newObj["instanceId"] = d.Get("name").(string) diff --git a/mmv1/templates/terraform/encoders/workflow.go.erb b/mmv1/templates/terraform/encoders/workflow.go.erb index 8e87273991e9..4888f187bd15 100644 --- a/mmv1/templates/terraform/encoders/workflow.go.erb +++ b/mmv1/templates/terraform/encoders/workflow.go.erb @@ -2,13 +2,13 @@ var ResName string if v, ok := d.GetOk("name"); ok { ResName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - ResName = resource.PrefixedUniqueId(v.(string)) + ResName = id.PrefixedUniqueId(v.(string)) } else { - ResName = resource.UniqueId() + ResName = id.UniqueId() } if err := d.Set("name", ResName); err != nil { return nil, fmt.Errorf("Error setting name: %s", err) } -return obj, nil \ No newline at end of file +return obj, nil diff --git a/mmv1/templates/terraform/resource.erb b/mmv1/templates/terraform/resource.erb index e3e557e1ac7e..c6840e3385ad 100644 --- a/mmv1/templates/terraform/resource.erb +++ b/mmv1/templates/terraform/resource.erb @@ -37,7 +37,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" diff --git a/mmv1/templates/tgc/resource_converter.go.erb b/mmv1/templates/tgc/resource_converter.go.erb index 20bd286427be..a7d64689a9d3 100644 --- a/mmv1/templates/tgc/resource_converter.go.erb +++ b/mmv1/templates/tgc/resource_converter.go.erb @@ -10,7 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_async_replication.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_async_replication.go.erb index d769ac6c5d39..53e70fb840d6 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_async_replication.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_async_replication.go.erb @@ -11,7 +11,7 @@ import ( transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" <% if version == "ga" -%> "google.golang.org/api/compute/v1" <% else -%> @@ -150,23 +150,23 @@ func resourceDiskAsyncReplicationCreate(d *schema.ResourceData, meta interface{} return err } } - err = resource.Retry(time.Minute*time.Duration(5), func() *resource.RetryError { + err = retry.Retry(time.Minute*time.Duration(5), func() *retry.RetryError { diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } if diskStatus.ResourceStatus == nil { - return resource.NonRetryableError(fmt.Errorf("no resource status for disk: %s", resourceId)) + return retry.NonRetryableError(fmt.Errorf("no resource status for disk: %s", resourceId)) } if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[secondaryDisk]; ok { if secondaryState.State != "ACTIVE" { time.Sleep(5 * time.Second) - return resource.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not: ACTIVE", secondaryDisk, secondaryState)) + return retry.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not: ACTIVE", secondaryDisk, secondaryState)) } return nil } time.Sleep(5 * time.Second) - return resource.RetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) + return retry.RetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) }) if err != nil { return err @@ -274,19 +274,19 @@ func resourceDiskAsyncReplicationDelete(d *schema.ResourceData, meta interface{} return err } } - err = resource.Retry(time.Minute*time.Duration(5), func() *resource.RetryError { + err = retry.Retry(time.Minute*time.Duration(5), func() *retry.RetryError { diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { if secondaryState.State != "STOPPED" { time.Sleep(5 * time.Second) - return resource.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not STOPPED", secondaryDisk, secondaryState)) + return retry.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not STOPPED", secondaryDisk, secondaryState)) } return nil } - return resource.NonRetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) + return retry.NonRetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) }) if err != nil { return err diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 97bf3c56f864..e9c1a0b5c3cb 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/mitchellh/hashstructure" @@ -1389,7 +1389,7 @@ func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.R } return instance.Id, instance.Status, nil } - stateChangeConf := resource.StateChangeConf{ + stateChangeConf := retry.StateChangeConf{ Delay: 5 * time.Second, Pending: getAllStatusBut(desiredStatus), Refresh: stateRefreshFunc, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb index 043806126e5a..6aaa7cb5898a 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -1141,7 +1141,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte func computeIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" - conf := resource.StateChangeConf{ + conf := retry.StateChangeConf{ Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getManager, waitForUpdates, d, meta), diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index 6db07747edb0..a76fdd9f75aa 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -1442,9 +1442,9 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = resource.PrefixedUniqueId(v.(string)) + itName = id.PrefixedUniqueId(v.(string)) } else { - itName = resource.UniqueId() + itName = id.UniqueId() } instanceTemplate := &compute.InstanceTemplate{ Description: d.Get("description").(string), diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb index e6d363fd1fe8..7f441905f8c3 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_group_manager.go.erb @@ -7,7 +7,7 @@ import ( "strings" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -673,7 +673,7 @@ func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, met func computeRIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" - conf := resource.StateChangeConf{ + conf := retry.StateChangeConf{ Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, Target: []string{"created"}, Refresh: waitForInstancesRefreshFunc(getRegionalManager, waitForUpdates, d, meta), @@ -715,7 +715,7 @@ func getRegionalManager(d *schema.ResourceData, meta interface{}) (*compute.Inst return manager, nil } -func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { +func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, d *schema.ResourceData, meta interface{}) retry.StateRefreshFunc { return func() (interface{}, string, error) { m, err := f(d, meta) if err != nil { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index 56b931cae358..46817b0944e8 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -1139,9 +1139,9 @@ func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta in if v, ok := d.GetOk("name"); ok { itName = v.(string) } else if v, ok := d.GetOk("name_prefix"); ok { - itName = resource.PrefixedUniqueId(v.(string)) + itName = id.PrefixedUniqueId(v.(string)) } else { - itName = resource.UniqueId() + itName = id.UniqueId() } instanceTemplate := make(map[string]interface{}) diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index dcc2d64cca07..c68a9d196f63 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -4414,7 +4414,7 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er var op *container.Operation var count = 0 - err = resource.Retry(30*time.Second, func() *resource.RetryError { + err = retry.Retry(30*time.Second, func() *retry.RetryError { count++ name := containerClusterFullName(project, location, clusterName) @@ -4426,11 +4426,11 @@ func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) er if err != nil { log.Printf("[WARNING] Cluster is still not ready to delete, retrying %s", clusterName) - return resource.RetryableError(err) + return retry.RetryableError(err) } if count == 15 { - return resource.NonRetryableError(fmt.Errorf("Error retrying to delete cluster %s", clusterName)) + return retry.NonRetryableError(fmt.Errorf("Error retrying to delete cluster %s", clusterName)) } return nil }) @@ -4460,7 +4460,7 @@ var containerClusterRestingStates = RestingStates{ // returns a state with no error if the state is a resting state, and the last state with an error otherwise func containerClusterAwaitRestingState(config *transport_tpg.Config, project, location, clusterName, userAgent string, timeout time.Duration) (state string, err error) { - err = resource.Retry(timeout, func() *resource.RetryError { + err = retry.Retry(timeout, func() *retry.RetryError { name := containerClusterFullName(project, location, clusterName) clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) if config.UserProjectOverride { @@ -4468,7 +4468,7 @@ func containerClusterAwaitRestingState(config *transport_tpg.Config, project, lo } cluster, gErr := clusterGetCall.Do() if gErr != nil { - return resource.NonRetryableError(gErr) + return retry.NonRetryableError(gErr) } state = cluster.Status @@ -4481,7 +4481,7 @@ func containerClusterAwaitRestingState(config *transport_tpg.Config, project, lo log.Printf("[DEBUG] Cluster %q has error state %q with message %q.", clusterName, state, cluster.StatusMessage) return nil default: - return resource.RetryableError(fmt.Errorf("Cluster %q has state %q with message %q", clusterName, state, cluster.StatusMessage)) + return retry.RetryableError(fmt.Errorf("Cluster %q has state %q with message %q", clusterName, state, cluster.StatusMessage)) } }) diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 99126aceb3bf..370354cdbfce 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -9,7 +9,8 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -605,7 +606,7 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e } var operation *container.Operation - err = resource.Retry(timeout, func() *resource.RetryError { + err = retry.Retry(timeout, func() *retry.RetryError { clusterNodePoolsCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Create(nodePoolInfo.parent(), req) if config.UserProjectOverride { clusterNodePoolsCreateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) @@ -618,9 +619,9 @@ func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) e // while we try to add the node pool. // We get quota errors if there the number of running concurrent // operations reaches the quota. - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } return nil }) @@ -812,7 +813,7 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e startTime := time.Now() var operation *container.Operation - err = resource.Retry(timeout, func() *resource.RetryError { + err = retry.Retry(timeout, func() *retry.RetryError { clusterNodePoolsDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(nodePoolInfo.fullyQualifiedName(name)) if config.UserProjectOverride { clusterNodePoolsDeleteCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) @@ -825,9 +826,9 @@ func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) e // while we try to delete the node pool. // We get quota errors if there the number of running concurrent // operations reaches the quota. - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } return nil @@ -931,9 +932,9 @@ func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, } name = v.(string) } else if v, ok := d.GetOk(prefix + "name_prefix"); ok { - name = resource.PrefixedUniqueId(v.(string)) + name = id.PrefixedUniqueId(v.(string)) } else { - name = resource.UniqueId() + name = id.UniqueId() } nodeCount := 0 @@ -2131,14 +2132,14 @@ var containerNodePoolRestingStates = RestingStates{ // takes in a config object, full node pool name, project name and the current CRUD action timeout // returns a state with no error if the state is a resting state, and the last state with an error otherwise func containerNodePoolAwaitRestingState(config *transport_tpg.Config, name, project, userAgent string, timeout time.Duration) (state string, err error) { - err = resource.Retry(timeout, func() *resource.RetryError { + err = retry.Retry(timeout, func() *retry.RetryError { clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(name) if config.UserProjectOverride { clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", project) } nodePool, gErr := clusterNodePoolsGetCall.Do() if gErr != nil { - return resource.NonRetryableError(gErr) + return retry.NonRetryableError(gErr) } state = nodePool.Status @@ -2150,7 +2151,7 @@ func containerNodePoolAwaitRestingState(config *transport_tpg.Config, name, proj log.Printf("[DEBUG] NodePool %q has error state %q with message %q.", name, state, nodePool.StatusMessage) return nil default: - return resource.RetryableError(fmt.Errorf("NodePool %q has state %q with message %q", name, state, nodePool.StatusMessage)) + return retry.RetryableError(fmt.Errorf("NodePool %q has state %q with message %q", name, state, nodePool.StatusMessage)) } }) @@ -2164,12 +2165,12 @@ func containerNodePoolAwaitRestingState(config *transport_tpg.Config, name, proj // retried until the incompatible operation completes, and the newly // requested operation can begin. func retryWhileIncompatibleOperation(timeout time.Duration, lockKey string, f func() error) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { if err := transport_tpg.LockedCall(lockKey, f); err != nil { if tpgresource.IsFailedPreconditionError(err) || tpgresource.IsQuotaError(err) { - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } return nil }) diff --git a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job.go.erb b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job.go.erb index b84f208a0fd1..ff096dcab551 100644 --- a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job.go.erb +++ b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job.go.erb @@ -11,7 +11,7 @@ import ( "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-google/google/tpgresource" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" "google.golang.org/api/googleapi" @@ -504,23 +504,23 @@ func resourceDataflowFlexTemplateJobRead(d *schema.ResourceData, meta interface{ } func waitForDataflowJobState(d *schema.ResourceData, config *transport_tpg.Config, jobID, userAgent string, timeout time.Duration, targetState string) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { project, err := tpgresource.GetProject(d, config) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } region, err := tpgresource.GetRegion(d, config) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } job, err := resourceDataflowJobGetJob(config, project, region, userAgent, jobID) if err != nil { if transport_tpg.IsRetryableError(err, nil, nil) { - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } state := job.CurrentState @@ -530,13 +530,13 @@ func waitForDataflowJobState(d *schema.ResourceData, config *transport_tpg.Confi } _, terminating := DataflowTerminatingStatesMap[state] if terminating && targetState == "JOB_STATE_RUNNING" { - return resource.NonRetryableError(fmt.Errorf("the job with ID %q is terminating with state %q and cannot reach expected state %q", jobID, state, targetState)) + return retry.NonRetryableError(fmt.Errorf("the job with ID %q is terminating with state %q and cannot reach expected state %q", jobID, state, targetState)) } if _, terminated := DataflowTerminalStatesMap[state]; terminated { - return resource.NonRetryableError(fmt.Errorf("the job with ID %q has terminated with state %q instead of expected state %q", jobID, state, targetState)) + return retry.NonRetryableError(fmt.Errorf("the job with ID %q has terminated with state %q instead of expected state %q", jobID, state, targetState)) } else { log.Printf("[DEBUG] the job with ID %q has state %q.", jobID, state) - return resource.RetryableError(fmt.Errorf("the job with ID %q has state %q, waiting for %q", jobID, state, targetState)) + return retry.RetryableError(fmt.Errorf("the job with ID %q has state %q, waiting for %q", jobID, state, targetState)) } }) } @@ -637,7 +637,7 @@ func resourceDataflowFlexTemplateJobDelete(d *schema.ResourceData, meta interfac } // Retry updating the state while the job is not ready to be canceled/drained. - err = resource.Retry(time.Minute*time.Duration(15), func() *resource.RetryError { + err = retry.Retry(time.Minute*time.Duration(15), func() *retry.RetryError { // To terminate a dataflow job, we update the job with a requested // terminal state. job := &dataflow.Job{ @@ -649,14 +649,14 @@ func resourceDataflowFlexTemplateJobDelete(d *schema.ResourceData, meta interfac gerr, isGoogleErr := updateErr.(*googleapi.Error) if !isGoogleErr { // If we have an error and it's not a google-specific error, we should go ahead and return. - return resource.NonRetryableError(updateErr) + return retry.NonRetryableError(updateErr) } if strings.Contains(gerr.Message, "not yet ready for canceling") { // Retry cancelling job if it's not ready. // Sleep to avoid hitting update quota with repeated attempts. time.Sleep(5 * time.Second) - return resource.RetryableError(updateErr) + return retry.RetryableError(updateErr) } if strings.Contains(gerr.Message, "Job has terminated") { diff --git a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_job.go.erb b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_job.go.erb index e2a0bb2f1b95..dee8fa97ca4c 100644 --- a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_job.go.erb +++ b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_job.go.erb @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" dataflow "google.golang.org/api/dataflow/v1b3" "google.golang.org/api/googleapi" ) @@ -498,7 +498,7 @@ func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { } // Retry updating the state while the job is not ready to be canceled/drained. - err = resource.Retry(time.Minute*time.Duration(15), func() *resource.RetryError { + err = retry.Retry(time.Minute*time.Duration(15), func() *retry.RetryError { // To terminate a dataflow job, we update the job with a requested // terminal state. job := &dataflow.Job{ @@ -510,14 +510,14 @@ func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { gerr, isGoogleErr := updateErr.(*googleapi.Error) if !isGoogleErr { // If we have an error and it's not a google-specific error, we should go ahead and return. - return resource.NonRetryableError(updateErr) + return retry.NonRetryableError(updateErr) } if strings.Contains(gerr.Message, "not yet ready for canceling") { // Retry cancelling job if it's not ready. // Sleep to avoid hitting update quota with repeated attempts. time.Sleep(5 * time.Second) - return resource.RetryableError(updateErr) + return retry.RetryableError(updateErr) } if strings.Contains(gerr.Message, "Job has terminated") { @@ -683,31 +683,31 @@ func jobHasUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Sche } func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *transport_tpg.Config, replacementJobID, userAgent string, timeout time.Duration) error { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { project, err := tpgresource.GetProject(d, config) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } region, err := tpgresource.GetRegion(d, config) if err != nil { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } replacementJob, err := resourceDataflowJobGetJob(config, project, region, userAgent, replacementJobID) if err != nil { if transport_tpg.IsRetryableError(err, nil, nil) { - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } state := replacementJob.CurrentState switch state { case "", "JOB_STATE_PENDING": - return resource.RetryableError(fmt.Errorf("the replacement job with ID %q has pending state %q.", replacementJobID, state)) + return retry.RetryableError(fmt.Errorf("the replacement job with ID %q has pending state %q.", replacementJobID, state)) case "JOB_STATE_FAILED": - return resource.NonRetryableError(fmt.Errorf("the replacement job with ID %q failed with state %q.", replacementJobID, state)) + return retry.NonRetryableError(fmt.Errorf("the replacement job with ID %q failed with state %q.", replacementJobID, state)) default: log.Printf("[DEBUG] the replacement job with ID %q has state %q.", replacementJobID, state) return nil diff --git a/mmv1/third_party/terraform/services/dns/dns_change.go b/mmv1/third_party/terraform/services/dns/dns_change.go index 5dff00b3a8b4..0b9d3a0dd976 100644 --- a/mmv1/third_party/terraform/services/dns/dns_change.go +++ b/mmv1/third_party/terraform/services/dns/dns_change.go @@ -5,7 +5,7 @@ import ( "google.golang.org/api/dns/v1" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) type DnsChangeWaiter struct { @@ -15,7 +15,7 @@ type DnsChangeWaiter struct { ManagedZone string } -func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { +func (w *DnsChangeWaiter) RefreshFunc() retry.StateRefreshFunc { return func() (interface{}, string, error) { var chg *dns.Change var err error @@ -31,8 +31,8 @@ func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { } } -func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ +func (w *DnsChangeWaiter) Conf() *retry.StateChangeConf { + return &retry.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"done"}, Refresh: w.RefreshFunc(), diff --git a/mmv1/third_party/terraform/services/resourcemanager/service_account_waiter.go b/mmv1/third_party/terraform/services/resourcemanager/service_account_waiter.go index 8f715b6e51ef..ce84ef8d40f2 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/service_account_waiter.go +++ b/mmv1/third_party/terraform/services/resourcemanager/service_account_waiter.go @@ -4,7 +4,7 @@ import ( "fmt" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "google.golang.org/api/googleapi" "google.golang.org/api/iam/v1" ) @@ -15,7 +15,7 @@ type ServiceAccountKeyWaiter struct { KeyName string } -func (w *ServiceAccountKeyWaiter) RefreshFunc() resource.StateRefreshFunc { +func (w *ServiceAccountKeyWaiter) RefreshFunc() retry.StateRefreshFunc { return func() (interface{}, string, error) { var err error var sak *iam.ServiceAccountKey @@ -40,7 +40,7 @@ func ServiceAccountKeyWaitTime(client *iam.ProjectsServiceAccountsKeysService, k KeyName: keyName, } - c := &resource.StateChangeConf{ + c := &retry.StateChangeConf{ Pending: []string{"PENDING"}, Target: []string{"DONE"}, Refresh: w.RefreshFunc(), diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb index 825f8c2c5adc..61772ba8567c 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/go-cty/cty" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -1054,7 +1054,7 @@ func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) if v, ok := d.GetOk("name"); ok { name = v.(string) } else { - name = resource.UniqueId() + name = id.UniqueId() } if err := d.Set("name", name); err != nil { diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index e7bf3b051fad..ecab8783ddfc 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -20,7 +20,7 @@ import ( "github.com/gammazero/workerpool" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -1026,15 +1026,15 @@ func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error } // remove empty bucket - err = resource.Retry(1*time.Minute, func() *resource.RetryError { + err = retry.Retry(1*time.Minute, func() *retry.RetryError { err := config.NewStorageClient(userAgent).Buckets.Delete(bucket).Do() if err == nil { return nil } if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { - return resource.RetryableError(gerr) + return retry.RetryableError(gerr) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) }) if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 && strings.Contains(gerr.Message, "not empty") && listError != nil { return fmt.Errorf("could not delete non-empty bucket due to error when listing contents: %v", listError) diff --git a/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb b/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb index d593658c30b5..cbe858f95d4a 100644 --- a/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb +++ b/mmv1/third_party/terraform/services/storagetransfer/resource_storage_transfer_job.go.erb @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/verify" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "google.golang.org/api/storagetransfer/v1" @@ -822,10 +822,10 @@ func resourceStorageTransferJobDelete(d *schema.ResourceData, meta interface{}) // Update transfer job with status set to DELETE log.Printf("[DEBUG] Setting status to DELETE for: %v\n\n", transferJobName) - err = resource.Retry(1*time.Minute, func() *resource.RetryError { + err = retry.Retry(1*time.Minute, func() *retry.RetryError { _, err := config.NewStorageTransferClient(userAgent).TransferJobs.Patch(transferJobName, updateRequest).Do() if err != nil { - return resource.RetryableError(err) + return retry.RetryableError(err) } return nil diff --git a/mmv1/third_party/terraform/tpgresource/common_operation.go b/mmv1/third_party/terraform/tpgresource/common_operation.go index cfe6880fde86..6eb38d164d07 100644 --- a/mmv1/third_party/terraform/tpgresource/common_operation.go +++ b/mmv1/third_party/terraform/tpgresource/common_operation.go @@ -5,7 +5,7 @@ import ( "log" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" ) @@ -107,7 +107,7 @@ func OperationDone(w Waiter) bool { return false } -func CommonRefreshFunc(w Waiter) resource.StateRefreshFunc { +func CommonRefreshFunc(w Waiter) retry.StateRefreshFunc { return func() (interface{}, string, error) { op, err := w.QueryOp() if err != nil { @@ -141,7 +141,7 @@ func OperationWait(w Waiter, activity string, timeout time.Duration, pollInterva return w.Error() } - c := &resource.StateChangeConf{ + c := &retry.StateChangeConf{ Pending: w.PendingStates(), Target: w.TargetStates(), Refresh: CommonRefreshFunc(w), diff --git a/mmv1/third_party/terraform/transport/common_polling.go b/mmv1/third_party/terraform/transport/common_polling.go index 763be67c8c39..f4957355b63a 100644 --- a/mmv1/third_party/terraform/transport/common_polling.go +++ b/mmv1/third_party/terraform/transport/common_polling.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) type ( @@ -16,16 +16,16 @@ type ( // Function to check the response from polling once PollCheckResponseFunc func(resp map[string]interface{}, respErr error) PollResult - PollResult *resource.RetryError + PollResult *retry.RetryError ) // Helper functions to construct result of single pollRead as return result for a PollCheckResponseFunc func ErrorPollResult(err error) PollResult { - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) } func PendingStatusPollResult(status string) PollResult { - return resource.RetryableError(fmt.Errorf("got pending status %q", status)) + return retry.RetryableError(fmt.Errorf("got pending status %q", status)) } func SuccessPollResult() PollResult { @@ -37,12 +37,12 @@ func PollingWaitTime(pollF PollReadFunc, checkResponse PollCheckResponseFunc, ac log.Printf("[DEBUG] %s: Polling until expected state is read", activity) log.Printf("[DEBUG] Target occurrences: %d", targetOccurrences) if targetOccurrences == 1 { - return resource.Retry(timeout, func() *resource.RetryError { + return retry.Retry(timeout, func() *retry.RetryError { readResp, readErr := pollF() return checkResponse(readResp, readErr) }) } - return RetryWithTargetOccurrences(timeout, targetOccurrences, func() *resource.RetryError { + return RetryWithTargetOccurrences(timeout, targetOccurrences, func() *retry.RetryError { readResp, readErr := pollF() return checkResponse(readResp, readErr) }) @@ -52,13 +52,13 @@ func PollingWaitTime(pollF PollReadFunc, checkResponse PollCheckResponseFunc, ac // a function until it returns the specified amount of target occurrences continuously. // Adapted from the Retry function in the go SDK. func RetryWithTargetOccurrences(timeout time.Duration, targetOccurrences int, - f resource.RetryFunc) error { + f retry.RetryFunc) error { // These are used to pull the error out of the function; need a mutex to // avoid a data race. var resultErr error var resultErrMu sync.Mutex - c := &resource.StateChangeConf{ + c := &retry.StateChangeConf{ Pending: []string{"retryableerror"}, Target: []string{"success"}, Timeout: timeout, diff --git a/mmv1/third_party/terraform/transport/config.go.erb b/mmv1/third_party/terraform/transport/config.go.erb index 7c9667635655..a548e2371120 100644 --- a/mmv1/third_party/terraform/transport/config.go.erb +++ b/mmv1/third_party/terraform/transport/config.go.erb @@ -194,7 +194,7 @@ type Config struct { DefaultLabels map[string]string AddTerraformAttributionLabel bool TerraformAttributionLabelAdditionStrategy string - // PollInterval is passed to resource.StateChangeConf in common_operation.go + // PollInterval is passed to retry.StateChangeConf in common_operation.go // It controls the interval at which we poll for successful operations PollInterval time.Duration diff --git a/mmv1/third_party/terraform/transport/retry_transport.go b/mmv1/third_party/terraform/transport/retry_transport.go index 1649a15d7582..2419357ec291 100644 --- a/mmv1/third_party/terraform/transport/retry_transport.go +++ b/mmv1/third_party/terraform/transport/retry_transport.go @@ -4,7 +4,7 @@ // Do not use for waiting on operations or polling of resource state, // especially if the expected state (operation done, resource ready, etc) // takes longer to reach than the default client Timeout. -// In those cases, Retry(...)/resource.Retry with appropriate timeout +// In those cases, Retry(...)/retry.Retry with appropriate timeout // and error predicates/handling should be used as a wrapper around the request // instead. // @@ -40,7 +40,7 @@ import ( "net/http/httputil" "time" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "google.golang.org/api/googleapi" ) @@ -179,7 +179,7 @@ func copyHttpRequest(req *http.Request) (*http.Request, error) { // checkForRetryableError uses the googleapi.CheckResponse util to check for // errors in the response, and determines whether there is a retryable error. // in response/response error. -func (t *retryTransport) checkForRetryableError(resp *http.Response, respErr error) *resource.RetryError { +func (t *retryTransport) checkForRetryableError(resp *http.Response, respErr error) *retry.RetryError { var errToCheck error if respErr != nil { @@ -194,7 +194,7 @@ func (t *retryTransport) checkForRetryableError(resp *http.Response, respErr err // error code and messages in the response body. dumpBytes, err := httputil.DumpResponse(resp, true) if err != nil { - return resource.NonRetryableError(fmt.Errorf("unable to check response for error: %v", err)) + return retry.NonRetryableError(fmt.Errorf("unable to check response for error: %v", err)) } respToCheck.Body = ioutil.NopCloser(bytes.NewReader(dumpBytes)) } @@ -205,7 +205,7 @@ func (t *retryTransport) checkForRetryableError(resp *http.Response, respErr err return nil } if IsRetryableError(errToCheck, t.retryPredicates, nil) { - return resource.RetryableError(errToCheck) + return retry.RetryableError(errToCheck) } - return resource.NonRetryableError(errToCheck) + return retry.NonRetryableError(errToCheck) } diff --git a/mmv1/third_party/terraform/transport/retry_utils.go b/mmv1/third_party/terraform/transport/retry_utils.go index fc386a56ebf5..881b9aacbfdb 100644 --- a/mmv1/third_party/terraform/transport/retry_utils.go +++ b/mmv1/third_party/terraform/transport/retry_utils.go @@ -5,7 +5,7 @@ import ( "time" "github.com/hashicorp/errwrap" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" ) type RetryOptions struct { @@ -36,7 +36,7 @@ func Retry(opt RetryOptions) error { // The error is not retryable. return "", "done", err } - stateChange := &resource.StateChangeConf{ + stateChange := &retry.StateChangeConf{ Pending: []string{ "retrying", }, @@ -52,15 +52,15 @@ func Retry(opt RetryOptions) error { return err } - return resource.Retry(opt.Timeout, func() *resource.RetryError { + return retry.Retry(opt.Timeout, func() *retry.RetryError { err := opt.RetryFunc() if err == nil { return nil } if IsRetryableError(err, opt.ErrorRetryPredicates, opt.ErrorAbortPredicates) { - return resource.RetryableError(err) + return retry.RetryableError(err) } - return resource.NonRetryableError(err) + return retry.NonRetryableError(err) }) } diff --git a/mmv1/third_party/tgc/sql_database_instance.go b/mmv1/third_party/tgc/sql_database_instance.go index cb844f1dc8b1..45cecb9a91a7 100644 --- a/mmv1/third_party/tgc/sql_database_instance.go +++ b/mmv1/third_party/tgc/sql_database_instance.go @@ -11,7 +11,7 @@ package google import ( "regexp" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" @@ -65,7 +65,7 @@ func GetSQLDatabaseInstanceApiObject(d tpgresource.TerraformResourceData, config if v, ok := d.GetOk("name"); ok { name = v.(string) } else { - name = resource.UniqueId() + name = id.UniqueId() } instance := &sqladmin.DatabaseInstance{ From 9c2a3d502e39f8bee63f117f879030e63334b2a4 Mon Sep 17 00:00:00 2001 From: dvfons <167889585+dvfons@users.noreply.github.com> Date: Fri, 7 Jun 2024 17:28:57 +0000 Subject: [PATCH 088/356] Add support for 'aiplatform.googleapis.com/Dataset' to TGC (#10740) Co-authored-by: Stephen Lewis (Burrows) --- mmv1/products/vertexai/product.yaml | 2 ++ mmv1/templates/tgc/resource_converters.go.erb | 1 + .../tests/data/example_vertex_ai_dataset.json | 21 +++++++++++++++++ .../tests/data/example_vertex_ai_dataset.tf | 23 +++++++++++++++++++ 4 files changed, 47 insertions(+) create mode 100644 mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.json create mode 100644 mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.tf diff --git a/mmv1/products/vertexai/product.yaml b/mmv1/products/vertexai/product.yaml index 86d545efae39..ca466a53cde9 100644 --- a/mmv1/products/vertexai/product.yaml +++ b/mmv1/products/vertexai/product.yaml @@ -18,8 +18,10 @@ versions: - !ruby/object:Api::Product::Version name: ga base_url: https://{{region}}-aiplatform.googleapis.com/v1/ + cai_base_url: https://aiplatform.googleapis.com/v1/ - !ruby/object:Api::Product::Version name: beta base_url: https://{{region}}-aiplatform.googleapis.com/v1beta1/ + cai_base_url: https://aiplatform.googleapis.com/v1beta1/ scopes: - https://www.googleapis.com/auth/cloud-platform diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index fecfc286418c..aaed7c483d82 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -123,6 +123,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_cloudfunctions_function": {resourceConverterCloudFunctionsCloudFunction()}, "google_monitoring_notification_channel": {monitoring.ResourceConverterMonitoringNotificationChannel()}, "google_monitoring_alert_policy": {monitoring.ResourceConverterMonitoringAlertPolicy()}, + "google_vertex_ai_dataset": {vertexai.ResourceConverterVertexAIDataset()}, <% products.each do |product| -%> <% (product[:definitions].objects || []).each do |object| -%> <% if object.min_version.name == "ga" -%> diff --git a/mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.json b/mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.json new file mode 100644 index 000000000000..0dc9d1a8887d --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.json @@ -0,0 +1,21 @@ +[ + { + "name": "//aiplatform.googleapis.com/placeholder-VSDAy5Fn", + "asset_type": "aiplatform.googleapis.com/Dataset", + "resource": { + "version": "v1beta1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/aiplatform/v1beta1/rest", + "discovery_name": "Dataset", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "displayName": "terraform", + "labels": { + "env": "test" + }, + "metadataSchemaUri": "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml" + } + }, + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] + } +] diff --git a/mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.tf b/mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.tf new file mode 100644 index 000000000000..092c52391a0b --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_vertex_ai_dataset.tf @@ -0,0 +1,23 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + + +resource "google_vertex_ai_dataset" "dataset" { + display_name = "terraform" + metadata_schema_uri = "gs://google-cloud-aiplatform/schema/dataset/metadata/image_1.0.0.yaml" + region = "us-central1" + + labels = { + env = "test" + } +} From 146bdbfaee2522710659e5c80f26aabf12d7d0a9 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Fri, 7 Jun 2024 10:39:09 -0700 Subject: [PATCH 089/356] Update enrolled_teams.yml (#10912) --- tools/issue-labeler/labeler/enrolled_teams.yml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tools/issue-labeler/labeler/enrolled_teams.yml b/tools/issue-labeler/labeler/enrolled_teams.yml index 8c09d21f1de7..b22412b37b1c 100755 --- a/tools/issue-labeler/labeler/enrolled_teams.yml +++ b/tools/issue-labeler/labeler/enrolled_teams.yml @@ -239,7 +239,7 @@ service/compute-security-policy: resources: - google_compute_network_edge_security_service - google_compute_region_security_policy.* - - google_compute_security_policy + - google_compute_security_policy.* service/compute-sole-tenancy: resources: - google_compute_node_group @@ -507,6 +507,9 @@ service/osconfig: service/oslogin: resources: - google_os_login_.* +service/parallelstore: + resources: + - google_parallestore_.* service/policy-based-routing: resources: - google_network_connectivity_policy_based_route @@ -519,6 +522,9 @@ service/private-service-connect-published-service: service/privateca: resources: - google_privateca_.* +service/privilegedaccessmanager: + resources: + - google_privileged_access_manager_.* service/publicca: resources: - google_public_ca_external_account_key @@ -539,6 +545,9 @@ service/redis-cluster: service/redis-instance: resources: - google_redis_instance +service/regional-endpoints: + resources: + - google_network_connectivity_regional_endpoint service/run: team: cloud-run-control-plane resources: @@ -615,4 +624,4 @@ service/workflows: - google_workflows.* service/workstations: resources: - - google_workstations_.* \ No newline at end of file + - google_workstations_.* From 66ca55b3c0bfb8765a3c3a60adcd140c5d0e2e39 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 7 Jun 2024 11:42:50 -0700 Subject: [PATCH 090/356] Pr notifications fixes (#10913) --- .ci/magician/cmd/scheduled_pr_reminders.go | 2 +- .ci/magician/cmd/scheduled_pr_reminders_test.go | 8 ++++---- .github/workflows/scheduled-pr-reminders.yml | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.ci/magician/cmd/scheduled_pr_reminders.go b/.ci/magician/cmd/scheduled_pr_reminders.go index 0201f0da8807..96f1822c6ded 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders.go +++ b/.ci/magician/cmd/scheduled_pr_reminders.go @@ -421,7 +421,7 @@ func shouldNotify(pr *github.PullRequest, state pullRequestReviewState, sinceDay if _, ok := labels["disable-review-reminders"]; ok { return false } - return sinceDays == 2 || (sinceDays > 0 && sinceDays%5 == 0) + return sinceDays == 3 || (sinceDays > 0 && sinceDays%5 == 0) } return false } diff --git a/.ci/magician/cmd/scheduled_pr_reminders_test.go b/.ci/magician/cmd/scheduled_pr_reminders_test.go index b23edb64fd45..f99087e6f012 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders_test.go +++ b/.ci/magician/cmd/scheduled_pr_reminders_test.go @@ -627,10 +627,10 @@ func TestShouldNotify(t *testing.T) { sinceDays: 1, want: false, }, - "waitingForReview two days": { + "waitingForReview three days": { pullRequest: &github.PullRequest{}, state: waitingForReview, - sinceDays: 2, + sinceDays: 3, want: true, }, "waitingForReview first week": { @@ -784,10 +784,10 @@ func TestFormatReminderComment(t *testing.T) { state: waitingForReview, data: reminderCommentData{ PullRequest: &github.PullRequest{}, - SinceDays: 2, + SinceDays: 3, }, expectedStrings: []string{ - "waiting for review for 2 weekdays", + "waiting for review for 3 weekdays", "disable-review-reminders", }, notExpectedStrings: []string{ diff --git a/.github/workflows/scheduled-pr-reminders.yml b/.github/workflows/scheduled-pr-reminders.yml index 3652bfb0b217..b96ac2dcef0a 100644 --- a/.github/workflows/scheduled-pr-reminders.yml +++ b/.github/workflows/scheduled-pr-reminders.yml @@ -4,7 +4,7 @@ permissions: read-all on: schedule: - - cron: '37 9 * * *' + - cron: '37 9 * * 1-5' jobs: send-pr-reminders: From c1202ec9e4aabdc2211c33eb41b55ccb491da4cb Mon Sep 17 00:00:00 2001 From: bcreddy-gcp <123543489+bcreddy-gcp@users.noreply.github.com> Date: Fri, 7 Jun 2024 11:49:37 -0700 Subject: [PATCH 091/356] update protected metadata (#10900) --- .../terraform/constants/workbench_instance.go | 100 ++++++++++-------- 1 file changed, 53 insertions(+), 47 deletions(-) diff --git a/mmv1/templates/terraform/constants/workbench_instance.go b/mmv1/templates/terraform/constants/workbench_instance.go index d5359c286b08..b758fb35c2db 100644 --- a/mmv1/templates/terraform/constants/workbench_instance.go +++ b/mmv1/templates/terraform/constants/workbench_instance.go @@ -24,53 +24,59 @@ func WorkbenchInstanceLabelsDiffSuppress(k, old, new string, d *schema.ResourceD var WorkbenchInstanceProvidedMetadata = []string{ - "disable-swap-binaries", - "enable-guest-attributes", - "proxy-backend-id", - "proxy-registration-url", - "agent-health-check-interval-seconds", - "agent-health-check-path", - "container", - "data-disk-uri", - "dataproc-allow-custom-clusters", - "dataproc-cluster-name", - "dataproc-configs", - "dataproc-default-subnet", - "dataproc-locations-list", - "dataproc-machine-types-list", - "dataproc-notebooks-url", - "dataproc-region", - "dataproc-service-account", - "disable-check-xsrf", - "framework", - "gcs-data-bucket", - "generate-diagnostics-bucket", - "generate-diagnostics-file", - "generate-diagnostics-options", - "image-url", - "install-monitoring-agent", - "install-nvidia-driver", - "installed-extensions", - "notebooks-api", - "notebooks-api-version", - "notebooks-examples-location", - "notebooks-location", - "nvidia-driver-gcs-path", - "proxy-mode", - "proxy-status", - "proxy-url", - "proxy-user-mail", - "report-container-health", - "report-notebook-metrics", - "report-system-health", - "report-system-status", - "restriction", - "serial-port-logging-enable", - "shutdown-script", - "title", - "use-collaborative", - "version", - "enable-oslogin", + "agent-health-check-interval-seconds", + "agent-health-check-path", + "container", + "custom-container-image", + "custom-container-payload", + "data-disk-uri", + "dataproc-allow-custom-clusters", + "dataproc-cluster-name", + "dataproc-configs", + "dataproc-default-subnet", + "dataproc-locations-list", + "dataproc-machine-types-list", + "dataproc-notebooks-url", + "dataproc-region", + "dataproc-service-account", + "disable-check-xsrf", + "framework", + "gcs-data-bucket", + "generate-diagnostics-bucket", + "generate-diagnostics-file", + "generate-diagnostics-options", + "image-url", + "install-monitoring-agent", + "install-nvidia-driver", + "installed-extensions", + "last_updated_diagnostics", + "notebooks-api", + "notebooks-api-version", + "notebooks-examples-location", + "notebooks-location", + "proxy-backend-id", + "proxy-byoid-url", + "proxy-mode", + "proxy-status", + "proxy-url", + "proxy-user-mail", + "report-container-health", + "report-event-url", + "report-notebook-metrics", + "report-system-health", + "report-system-status", + "restriction", + "serial-port-logging-enable", + "shutdown-script", + "title", + "use-collaborative", + "user-data", + "version", + + "disable-swap-binaries", + "enable-guest-attributes", + "enable-oslogin", + "proxy-registration-url", } func WorkbenchInstanceMetadataDiffSuppress(k, old, new string, d *schema.ResourceData) bool { From be1bb1792416b950819b57fbbd9b0d57f3ec9d69 Mon Sep 17 00:00:00 2001 From: delimaneto <167232526+delimaneto@users.noreply.github.com> Date: Fri, 7 Jun 2024 18:52:10 +0000 Subject: [PATCH 092/356] Add support for `google_firebase_project` to TGC (#10847) --- mmv1/provider/terraform_tgc.rb | 4 +- mmv1/templates/tgc/resource_converters.go.erb | 1 + mmv1/third_party/tgc/firebase_project.go | 105 ++++++++++++++++++ .../data/example_google_firebase_project.json | 20 ++++ .../data/example_google_firebase_project.tf | 17 +++ 5 files changed, 146 insertions(+), 1 deletion(-) create mode 100644 mmv1/third_party/tgc/firebase_project.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_firebase_project.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_firebase_project.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 6d48122f5c41..3674fe99cd77 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -317,7 +317,9 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/dataproc_cluster.go', 'third_party/tgc/dataproc_cluster.go'], ['converters/google/resources/commitment.go', - 'third_party/tgc/commitment.go'] + 'third_party/tgc/commitment.go'], + ['converters/google/resources/firebase_project.go', + 'third_party/tgc/firebase_project.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index aaed7c483d82..5f1d0474b3d3 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -86,6 +86,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_datastream_connection_profile": {datastream.ResourceConverterDatastreamConnectionProfile()}, "google_datastream_private_connection": {datastream.ResourceConverterDatastreamPrivateConnection()}, "google_datastream_stream": {datastream.ResourceConverterDatastreamStream()}, + "google_firebase_project": {resourceConverterFirebaseProject()}, "google_org_policy_policy": {resourceConverterOrgPolicyPolicy()}, "google_redis_instance": {redis.ResourceConverterRedisInstance()}, "google_spanner_database": {spanner.ResourceConverterSpannerDatabase()}, diff --git a/mmv1/third_party/tgc/firebase_project.go b/mmv1/third_party/tgc/firebase_project.go new file mode 100644 index 000000000000..28b469278020 --- /dev/null +++ b/mmv1/third_party/tgc/firebase_project.go @@ -0,0 +1,105 @@ +package google + +import ( + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const firebaseProjectAssetType string = "firebase.googleapis.com/FirebaseProject" + +func resourceConverterFirebaseProject() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: firebaseProjectAssetType, + Convert: GetFirebaseProjectCaiObject, + } +} + +func GetFirebaseProjectCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//firebase.googleapis.com/v1beta1/projects/{{project}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetFirebaseProjectApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: firebaseProjectAssetType, + Resource: &cai.AssetResource{ + Version: "v1beta1", + DiscoveryDocumentURI: "https://firebase.googleapis.com/$discovery/rest?version=v1beta1", + DiscoveryName: "FirebaseProject", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetFirebaseProjectApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + nameProp, err := expandFirebaseProjectName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + projectProp, err := expandFirebaseProjectProjectId(d.Get("project"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(projectProp)) && (ok || !reflect.DeepEqual(v, projectProp)) { + obj["projectId"] = projectProp + } + + idProp, err := expandFirebaseProjectId(d.Get("id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + + projectNumberProp, err := expandFirebaseProjectProjectNumber(d.Get("project_number"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("project_number"); !tpgresource.IsEmptyValue(reflect.ValueOf(projectNumberProp)) && (ok || !reflect.DeepEqual(v, projectNumberProp)) { + obj["projectNumber"] = projectNumberProp + } + + displayNameProp, err := expandFirebaseProjectDisplayName(d.Get("display_name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + return obj, nil +} + +func expandFirebaseProjectDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirebaseProjectProjectNumber(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirebaseProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}") + if err != nil { + return nil, err + } + + return v, nil +} + +func expandFirebaseProjectProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandFirebaseProjectName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_firebase_project.json b/mmv1/third_party/tgc/tests/data/example_google_firebase_project.json new file mode 100644 index 000000000000..afbbf22805ff --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_firebase_project.json @@ -0,0 +1,20 @@ +[ + { + "name": "//firebase.googleapis.com/v1beta1/projects/{{.Provider.project}}", + "asset_type": "firebase.googleapis.com/FirebaseProject", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1beta1", + "discovery_document_uri": "https://firebase.googleapis.com/$discovery/rest?version=v1beta1", + "discovery_name": "FirebaseProject", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "id": "projects/{{.Provider.project}}", + "projectId": "{{.Provider.project}}" + } + }, + "ancestors": [ + "organizations/{{.OrgID}}" + ] + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_firebase_project.tf b/mmv1/third_party/tgc/tests/data/example_google_firebase_project.tf new file mode 100644 index 000000000000..6fd5d9d40b12 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_firebase_project.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_firebase_project" "default" { + provider = google-beta + project = "{{.Provider.project}}" +} \ No newline at end of file From 50c6e9ac25cfcdb3d642fa60f8695c4f25212ddc Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 7 Jun 2024 14:40:39 -0700 Subject: [PATCH 093/356] Included PR reviewer handles in message for clarity (#10917) --- ...HEDULED_PR_WAITING_FOR_CONTRIBUTOR.md.tmpl | 4 +- .../SCHEDULED_PR_WAITING_FOR_MERGE.md.tmpl | 2 +- .../SCHEDULED_PR_WAITING_FOR_REVIEW.md.tmpl | 2 +- .ci/magician/cmd/scheduled_pr_reminders.go | 27 ++- .../cmd/scheduled_pr_reminders_test.go | 207 +++++++++++++----- 5 files changed, 178 insertions(+), 64 deletions(-) diff --git a/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_CONTRIBUTOR.md.tmpl b/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_CONTRIBUTOR.md.tmpl index beca1ba5bfd2..62b5ef2fba2e 100644 --- a/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_CONTRIBUTOR.md.tmpl +++ b/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_CONTRIBUTOR.md.tmpl @@ -1,5 +1,5 @@ {{ if lt .SinceDays 30 -}} -@{{.PullRequest.User.Login}}, this PR is waiting for action from you. Please address any comments or change requests, or [re-request review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from a core reviewer if no action is required. +@{{.User}}, this PR is waiting for action from you. Please address any comments or change requests, or [re-request review](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review) from a core reviewer if no action is required. ![Image showing the re-request review button](https://docs.github.com/assets/cb-28785/mw-1440/images/help/pull_requests/request-re-review.webp) @@ -10,5 +10,5 @@ If no action is taken, this PR will be closed in This notification can be disabled with the `disable-automatic-closure` label. {{ else -}} -@{{.PullRequest.User.Login}}, this PR is being closed due to inactivity. +@{{.User}}, this PR is being closed due to inactivity. {{ end -}} \ No newline at end of file diff --git a/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_MERGE.md.tmpl b/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_MERGE.md.tmpl index c436f97cae76..bcbacfeb4394 100644 --- a/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_MERGE.md.tmpl +++ b/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_MERGE.md.tmpl @@ -1 +1 @@ -This PR is approved and has been waiting for merge for {{if eq .SinceDays 5}}1 week{{else}}{{weekdaysToWeeks .SinceDays}} weeks{{end}}. Is it ready to merge? Use the label `disable-review-reminders` to disable these notifications. \ No newline at end of file +{{range .CoreReviewers}}@{{ . }} {{end}}This PR is approved and has been waiting for merge for {{if eq .SinceDays 5}}1 week{{else}}{{weekdaysToWeeks .SinceDays}} weeks{{end}}. Is it ready to merge? Use the label `disable-review-reminders` to disable these notifications. \ No newline at end of file diff --git a/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_REVIEW.md.tmpl b/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_REVIEW.md.tmpl index 3d58a5984780..83f1fab90359 100644 --- a/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_REVIEW.md.tmpl +++ b/.ci/magician/cmd/SCHEDULED_PR_WAITING_FOR_REVIEW.md.tmpl @@ -1 +1 @@ -{{if ge .SinceDays 5}}@GoogleCloudPlatform/terraform-team {{end}}This PR has been waiting for review for {{if lt .SinceDays 5}}{{.SinceDays}} weekdays{{else if eq .SinceDays 5}}1 week{{else}}{{weekdaysToWeeks .SinceDays}} weeks{{end}}. Please take a look! Use the label `disable-review-reminders` to disable these notifications. \ No newline at end of file +{{if ge .SinceDays 5}}@GoogleCloudPlatform/terraform-team {{end}}{{range .CoreReviewers}}@{{ . }} {{end}}This PR has been waiting for review for {{if lt .SinceDays 5}}{{.SinceDays}} weekdays{{else if eq .SinceDays 5}}1 week{{else}}{{weekdaysToWeeks .SinceDays}} weeks{{end}}. Please take a look! Use the label `disable-review-reminders` to disable these notifications. \ No newline at end of file diff --git a/.ci/magician/cmd/scheduled_pr_reminders.go b/.ci/magician/cmd/scheduled_pr_reminders.go index 96f1822c6ded..73315835f596 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders.go +++ b/.ci/magician/cmd/scheduled_pr_reminders.go @@ -49,9 +49,9 @@ var ( ) type reminderCommentData struct { - PullRequest *github.PullRequest - State pullRequestReviewState - SinceDays int + User string + SinceDays int + CoreReviewers []string } // scheduledPrReminders sends automated PR notifications and closes stale PRs @@ -165,10 +165,7 @@ func execScheduledPrReminders(gh *github.Client) error { ) sinceDays := businessDaysDiff(since, time.Now()) if shouldNotify(pr, state, sinceDays) { - comment, err := formatReminderComment(state, reminderCommentData{ - PullRequest: pr, - SinceDays: sinceDays, - }) + comment, err := formatReminderComment(pr, state, sinceDays) if err != nil { fmt.Printf( "%d/%d: PR %d: error rendering comment: %s\n", @@ -426,7 +423,7 @@ func shouldNotify(pr *github.PullRequest, state pullRequestReviewState, sinceDay return false } -func formatReminderComment(state pullRequestReviewState, data reminderCommentData) (string, error) { +func formatReminderComment(pullRequest *github.PullRequest, state pullRequestReviewState, sinceDays int) (string, error) { embeddedTemplate := "" switch state { case waitingForMerge: @@ -446,6 +443,20 @@ func formatReminderComment(state pullRequestReviewState, data reminderCommentDat if err != nil { panic(fmt.Sprintf("Unable to parse template for %s: %s", state.String(), err)) } + + coreReviewers := []string{} + for _, reviewer := range pullRequest.RequestedReviewers { + if membership.IsCoreReviewer(*reviewer.Login) { + coreReviewers = append(coreReviewers, *reviewer.Login) + } + } + + data := reminderCommentData{ + User: *pullRequest.User.Login, + SinceDays: sinceDays, + CoreReviewers: coreReviewers, + } + sb := new(strings.Builder) err = tmpl.Execute(sb, data) if err != nil { diff --git a/.ci/magician/cmd/scheduled_pr_reminders_test.go b/.ci/magician/cmd/scheduled_pr_reminders_test.go index f99087e6f012..96a617f830bb 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders_test.go +++ b/.ci/magician/cmd/scheduled_pr_reminders_test.go @@ -738,156 +738,259 @@ func TestShouldNotify(t *testing.T) { } func TestFormatReminderComment(t *testing.T) { + firstCoreReviewer := membership.AvailableReviewers()[0] + secondCoreReviewer := membership.AvailableReviewers()[1] cases := map[string]struct { + pullRequest *github.PullRequest state pullRequestReviewState - data reminderCommentData + sinceDays int expectedStrings []string notExpectedStrings []string }{ // waitingForMerge "waitingForMerge one week": { - state: waitingForMerge, - data: reminderCommentData{ - PullRequest: &github.PullRequest{}, - SinceDays: 5, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, + }, }, + state: waitingForMerge, + sinceDays: 5, expectedStrings: []string{ "waiting for merge for 1 week", "disable-review-reminders", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + }, + notExpectedStrings: []string{ + "@pr-author", + "@other-reviewer", }, }, "waitingForMerge two weeks": { - state: waitingForMerge, - data: reminderCommentData{ - PullRequest: &github.PullRequest{}, - SinceDays: 5 * 2, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, + }, }, + state: waitingForMerge, + sinceDays: 5 * 2, expectedStrings: []string{ "waiting for merge for 2 weeks", "disable-review-reminders", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + }, + notExpectedStrings: []string{ + "@pr-author", + "@other-reviewer", }, }, "waitingForMerge many weeks": { - state: waitingForMerge, - data: reminderCommentData{ - PullRequest: &github.PullRequest{}, - SinceDays: 5 * 57, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, + }, }, + state: waitingForMerge, + sinceDays: 5 * 57, expectedStrings: []string{ "waiting for merge for 57 weeks", "disable-review-reminders", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + }, + notExpectedStrings: []string{ + "@pr-author", + "@other-reviewer", }, }, // waitingForReview - "waitingForReview two days": { - state: waitingForReview, - data: reminderCommentData{ - PullRequest: &github.PullRequest{}, - SinceDays: 3, + "waitingForReview three days": { + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, + }, }, + state: waitingForReview, + sinceDays: 3, expectedStrings: []string{ "waiting for review for 3 weekdays", "disable-review-reminders", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, }, notExpectedStrings: []string{ "@GoogleCloudPlatform/terraform-team", + "@pr-author", + "@other-reviewer", }, }, "waitingForReview one week": { - state: waitingForReview, - data: reminderCommentData{ - PullRequest: &github.PullRequest{}, - SinceDays: 5, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, + }, }, + state: waitingForReview, + sinceDays: 5, expectedStrings: []string{ "@GoogleCloudPlatform/terraform-team", "waiting for review for 1 week", "disable-review-reminders", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + }, + notExpectedStrings: []string{ + "@pr-author", + "@other-reviewer", }, }, "waitingForReview two weeks": { - state: waitingForReview, - data: reminderCommentData{ - PullRequest: &github.PullRequest{}, - SinceDays: 10, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, + }, }, + state: waitingForReview, + sinceDays: 10, expectedStrings: []string{ "@GoogleCloudPlatform/terraform-team", "waiting for review for 2 weeks", "disable-review-reminders", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + }, + notExpectedStrings: []string{ + "@pr-author", + "@other-reviewer", }, }, // waitingForContributor "waitingForContributor two weeks": { - state: waitingForContributor, - data: reminderCommentData{ - PullRequest: &github.PullRequest{ - User: &github.User{Login: github.String("pr-author")}, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, }, - SinceDays: 10, }, + state: waitingForContributor, + sinceDays: 10, expectedStrings: []string{ "@pr-author", "If no action is taken, this PR will be closed in 28 days", "disable-automatic-closure", }, + notExpectedStrings: []string{ + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + "@other-reviewer", + }, }, "waitingForContributor four weeks": { - state: waitingForContributor, - data: reminderCommentData{ - PullRequest: &github.PullRequest{ - User: &github.User{Login: github.String("pr-author")}, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, }, - SinceDays: 20, }, + state: waitingForContributor, + sinceDays: 20, expectedStrings: []string{ "@pr-author", "If no action is taken, this PR will be closed in 14 days", "disable-automatic-closure", }, + notExpectedStrings: []string{ + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + "@other-reviewer", + }, }, "waitingForContributor 28 days": { - state: waitingForContributor, - data: reminderCommentData{ - PullRequest: &github.PullRequest{ - User: &github.User{Login: github.String("pr-author")}, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, }, - SinceDays: 28, }, + state: waitingForContributor, + sinceDays: 28, expectedStrings: []string{ "@pr-author", "If no action is taken, this PR will be closed in 2 weekdays", "disable-automatic-closure", }, + notExpectedStrings: []string{ + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + "@other-reviewer", + }, }, "waitingForContributor six weeks": { - state: waitingForContributor, - data: reminderCommentData{ - PullRequest: &github.PullRequest{ - User: &github.User{Login: github.String("pr-author")}, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, }, - SinceDays: 30, }, + state: waitingForContributor, + sinceDays: 30, expectedStrings: []string{"@pr-author", "PR is being closed due to inactivity"}, notExpectedStrings: []string{ "If no action is taken, this PR will be closed", "disable-automatic-closure", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + "@other-reviewer", }, }, "waitingForContributor seven weeks": { - state: waitingForContributor, - data: reminderCommentData{ - PullRequest: &github.PullRequest{ - User: &github.User{Login: github.String("pr-author")}, + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("pr-author")}, + RequestedReviewers: []*github.User{ + &github.User{Login: github.String(firstCoreReviewer)}, + &github.User{Login: github.String(secondCoreReviewer)}, + &github.User{Login: github.String("other-reviewer")}, }, - SinceDays: 35, }, + state: waitingForContributor, + sinceDays: 35, expectedStrings: []string{"@pr-author", "PR is being closed due to inactivity"}, notExpectedStrings: []string{ "If no action is taken, this PR will be closed", "disable-automatic-closure", + "@" + firstCoreReviewer, + "@" + secondCoreReviewer, + "@other-reviewer", }, }, } @@ -897,7 +1000,7 @@ func TestFormatReminderComment(t *testing.T) { t.Run(tn, func(t *testing.T) { t.Parallel() - comment, err := formatReminderComment(tc.state, tc.data) + comment, err := formatReminderComment(tc.pullRequest, tc.state, tc.sinceDays) assert.Nil(t, err) for _, s := range tc.expectedStrings { From 49e75a0f48d5b9baf0138cbc97cec6ad5f9651c3 Mon Sep 17 00:00:00 2001 From: vijaykanthm Date: Fri, 7 Jun 2024 15:31:31 -0700 Subject: [PATCH 094/356] Add Resource ProjectSecurityHealthAnalyticsCustomModule (#10863) --- ...ctSecurityHealthAnalyticsCustomModule.yaml | 213 ++++++++++++++++++ ...ealth_analytics_custom_module_basic.tf.erb | 18 ++ ...health_analytics_custom_module_full.tf.erb | 32 +++ ...ity_health_analytics_custom_module_test.go | 196 ++++++++++++++++ 4 files changed, 459 insertions(+) create mode 100644 mmv1/products/securitycentermanagement/ProjectSecurityHealthAnalyticsCustomModule.yaml create mode 100644 mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_project_security_health_analytics_custom_module_test.go diff --git a/mmv1/products/securitycentermanagement/ProjectSecurityHealthAnalyticsCustomModule.yaml b/mmv1/products/securitycentermanagement/ProjectSecurityHealthAnalyticsCustomModule.yaml new file mode 100644 index 000000000000..12963011fd63 --- /dev/null +++ b/mmv1/products/securitycentermanagement/ProjectSecurityHealthAnalyticsCustomModule.yaml @@ -0,0 +1,213 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ProjectSecurityHealthAnalyticsCustomModule' +description: | + Represents an instance of a Security Health Analytics custom module, including + its full module name, display name, enablement state, and last updated time. + You can create a custom module at the organization, folder, or project level. + Custom modules that you create at the organization or folder level are inherited + by the child folders and projects. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Overview of custom modules for Security Health Analytics': 'https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview' + api: 'https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/projects.locations.securityHealthAnalyticsCustomModules' +base_url: 'projects/{{project}}/locations/{{location}}/securityHealthAnalyticsCustomModules' +self_link: 'projects/{{project}}/locations/{{location}}/securityHealthAnalyticsCustomModules/{{name}}' +mutex: 'projects/{{project}}/locations/{{location}}/securityHealthAnalyticsCustomModules' +update_verb: :PATCH +update_mask: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_organization_project_security_health_analytics_custom_module_basic" + primary_resource_id: "example" + skip_test: true + vars: + display_name: basic_custom_module + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_organization_project_security_health_analytics_custom_module_full" + primary_resource_id: "example" + skip_test: true + vars: + display_name: full_custom_module + +parameters: + - !ruby/object:Api::Type::String + name: 'location' + immutable: true + required: false + url_param_only: true + default_value: 'global' + description: | + Location ID of the parent organization. If not provided, 'global' will be used as the default location. + +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb + description: | + The resource name of the custom module. Its format is "projects/{project}/locations/{location}/securityHealthAnalyticsCustomModules/{securityHealthAnalyticsCustomModule}". + The id {securityHealthAnalyticsCustomModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits. + - !ruby/object:Api::Type::String + name: 'displayName' + immutable: true + required: false + # API error for invalid display names is just "INVALID_ARGUMENT" with no details + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateRegexp(`^[a-z][\w_]{0,127}$`)' + description: | + The display name of the Security Health Analytics custom module. This + display name becomes the finding category for all findings that are + returned by this custom module. The display name must be between 1 and + 128 characters, start with a lowercase letter, and contain alphanumeric + characters or underscores only. + - !ruby/object:Api::Type::Enum + name: 'enablementState' + required: false + description: | + The enablement state of the custom module. + values: + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::String + name: 'updateTime' + output: true + description: | + The time at which the custom module was last updated. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: 'lastEditor' + output: true + description: | + The editor that last updated the custom module. + - !ruby/object:Api::Type::String + name: 'ancestorModule' + output: true + description: | + If empty, indicates that the custom module was created in the organization,folder, + or project in which you are viewing the custom module. Otherwise, ancestor_module + specifies the organization or folder from which the custom module is inherited. + - !ruby/object:Api::Type::NestedObject + name: 'customConfig' + required: false + description: | + The user specified custom configuration for the module. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'predicate' + required: true + description: | + The CEL expression to evaluate to produce findings. When the expression evaluates + to true against a resource, a finding is generated. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'customOutput' + description: | + Custom output properties. + properties: + - !ruby/object:Api::Type::Array + name: 'properties' + description: | + A list of custom output properties to add to the finding. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the property for the custom output. + - !ruby/object:Api::Type::NestedObject + name: 'valueExpression' + description: | + The CEL expression for the custom output. A resource property can be specified + to return the value of the property or a text string enclosed in quotation marks. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'resourceSelector' + required: true + description: | + The resource types that the custom module operates on. Each custom module + can specify up to 5 resource types. + properties: + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + required: true + description: | + The resource types to run the detector on. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'severity' + required: true + description: | + The severity to assign to findings generated by the module. + values: + - :CRITICAL + - :HIGH + - :MEDIUM + - :LOW + - !ruby/object:Api::Type::String + name: 'description' + description: | + Text that describes the vulnerability or misconfiguration that the custom + module detects. This explanation is returned with each finding instance to + help investigators understand the detected issue. The text must be enclosed in quotation marks. + - !ruby/object:Api::Type::String + name: 'recommendation' + required: true + description: | + An explanation of the recommended steps that security teams can take to resolve + the detected issue. This explanation is returned with each finding generated by + this module in the nextSteps property of the finding JSON. diff --git a/mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.erb b/mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.erb new file mode 100644 index 000000000000..f023f5490fc3 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.erb @@ -0,0 +1,18 @@ +resource "google_scc_management_project_security_health_analytics_custom_module" "<%= ctx[:primary_resource_id] %>" { + location = "global" + display_name = "<%= ctx[:vars]['display_name'] %>" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_full.tf.erb b/mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_full.tf.erb new file mode 100644 index 000000000000..78c99e2819b2 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_organization_project_security_health_analytics_custom_module_full.tf.erb @@ -0,0 +1,32 @@ +resource "google_scc_management_project_security_health_analytics_custom_module" "<%= ctx[:primary_resource_id] %>" { + location = "global" + display_name = "<%= ctx[:vars]['display_name'] %>" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_project_security_health_analytics_custom_module_test.go b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_project_security_health_analytics_custom_module_test.go new file mode 100644 index 000000000000..4df57339138c --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_project_security_health_analytics_custom_module_test.go @@ -0,0 +1,196 @@ +package securitycentermanagement_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Custom Module tests cannot be run in parallel without running into 409 Conflict reponses. +// Run them as individual steps of an update test instead. +func TestAccSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "location": "global", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule_sccProjectCustomModuleBasicExample(context), + }, + { + ResourceName: "google_scc_management_project_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule_sccProjectCustomModuleFullExample(context), + }, + { + ResourceName: "google_scc_management_project_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule_sccProjectCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_management_project_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule_sccProjectCustomModuleBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_project_security_health_analytics_custom_module" "example" { + display_name = "tf_test_basic_custom_module%{random_suffix}" + enablement_state = "ENABLED" + location = "%{location}" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} +`, context) +} + +func testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule_sccProjectCustomModuleFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_project_security_health_analytics_custom_module" "example" { + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "ENABLED" + location = "%{location}" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +`, context) +} + +func testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule_sccProjectCustomModuleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_project_security_health_analytics_custom_module" "example" { + location = "%{location}" + display_name = "full_custom_module" + enablement_state = "DISABLED" + custom_config { + predicate { + expression = "resource.name == \"updated-name\"" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + custom_output { + properties { + name = "violation" + value_expression { + expression = "resource.name" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + } + } + resource_selector { + resource_types = [ + "compute.googleapis.com/Instance", + ] + } + severity = "CRITICAL" + description = "Updated description of the custom module" + recommendation = "Updated steps to resolve violation" + } +} +`, context) +} + +func testAccCheckSecurityCenterManagementProjectSecurityHealthAnalyticsCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_scc_management_project_security_health_analytics_custom_module" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + location := rs.Primary.Attributes["location"] + + url, err := tpgresource.ReplaceVarsForTest(config, rs, fmt.Sprintf( + "{{SecurityCenterBasePath}}projects/{{project}}/locations/%s/securityHealthAnalyticsCustomModules/{{name}}", location)) + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("SecurityCenterManagementProjectSecurityHealthAnalyticsCustomModule still exists at %s", url) + } + } + + return nil + } +} From fe5261738873baeb94443099ccd7fdcc1e3ad1c9 Mon Sep 17 00:00:00 2001 From: James Jung Date: Fri, 7 Jun 2024 16:19:32 -0700 Subject: [PATCH 095/356] Add terraform support for edgecontainer maintenance cluster's exclusion windows (#10804) --- mmv1/products/edgecontainer/Cluster.yaml | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/mmv1/products/edgecontainer/Cluster.yaml b/mmv1/products/edgecontainer/Cluster.yaml index 73db23b5f82c..6c83007c351e 100644 --- a/mmv1/products/edgecontainer/Cluster.yaml +++ b/mmv1/products/edgecontainer/Cluster.yaml @@ -256,6 +256,37 @@ properties: An RRULE (https://tools.ietf.org/html/rfc5545#section-3.8.5.3) for how this window recurs. They go on for the span of time between the start and end time. + - !ruby/object:Api::Type::Array + name: "maintenanceExclusions" + required: false + description: | + Exclusions to automatic maintenance. Non-emergency maintenance should not occur + in these windows. Each exclusion has a unique name and may be active or expired. + The max number of maintenance exclusions allowed at a given time is 3. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::NestedObject + name: "window" + default_from_api: true + description: | + Represents an arbitrary window of time. + properties: + - !ruby/object:Api::Type::Time + name: "startTime" + description: | + The time that the window first starts. + default_from_api: true + - !ruby/object:Api::Type::Time + name: "endTime" + description: | + The time that the window ends. The end time must take place after the + start time. + default_from_api: true + - !ruby/object:Api::Type::String + name: "id" + default_from_api: true + description: | + A unique (per cluster) id for the window. - !ruby/object:Api::Type::String name: "controlPlaneVersion" description: | From 9d4effd9df41201d01194c5a06595745878487c4 Mon Sep 17 00:00:00 2001 From: Luca Prete Date: Mon, 10 Jun 2024 15:11:20 +0200 Subject: [PATCH 096/356] Add field update tests for google_network_securtiy_tls_inspection_policy (#10729) Co-authored-by: Luca Prete --- ...security_tls_inspection_policy_test.go.erb | 340 +++++++++++++----- 1 file changed, 259 insertions(+), 81 deletions(-) diff --git a/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb b/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb index 9101b3124076..f59072ffa828 100644 --- a/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb @@ -1,56 +1,61 @@ <% autogen_exception -%> package networksecurity_test +<% unless version == 'ga' -%> import ( - "fmt" - "testing" + "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" ) func TestAccNetworkSecurityTlsInspectionPolicy_update(t *testing.T){ t.Parallel() - tlsInspectionPolicyName := fmt.Sprintf("tf-test-tls-inspection-policy-%s", acctest.RandString(t, 10)) - caPoolName := fmt.Sprintf("tf-test-tls-ca-pool-%s", acctest.RandString(t, 10)) - certificateAuthorityName := fmt.Sprintf("tf-test-tls-certificate-authority-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "projectNumber": envvar.GetTestProjectNumberFromEnv(), + "randomSuffix": acctest.RandString(t, 10), + } acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckNetworkSecurityTlsInspectionPolicyDestroyProducer(t), - Steps: []resource.TestStep{ - { - Config: testAccNetworkSecurityTlsInspectionPolicy_basic(caPoolName, certificateAuthorityName, tlsInspectionPolicyName), - }, - { - ResourceName: "google_network_security_tls_inspection_policy.foobar", - ImportState: true, - ImportStateVerify: true, - }, - { - Config: testAccNetworkSecurityTlsInspectionPolicy_update(caPoolName, certificateAuthorityName, tlsInspectionPolicyName), - }, - { - ResourceName: "google_network_security_tls_inspection_policy.foobar", - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func testAccNetworkSecurityTlsInspectionPolicy_basic(caPoolName, certificateAuthorityName, tlsInspectionPolicyName string) string { - return fmt.Sprintf(` + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckNetworkSecurityTlsInspectionPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityTlsInspectionPolicy_basic(context), + }, + { + ResourceName: "google_network_security_tls_inspection_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkSecurityTlsInspectionPolicy_update(context), + }, + { + ResourceName: "google_network_security_tls_inspection_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkSecurityTlsInspectionPolicy_basic(context map[string]interface{}) string { + return acctest.Nprintf(` resource "google_privateca_ca_pool" "default" { - name = "%s" + provider = google-beta + name = "tf-test-cap-%{randomSuffix}" location = "us-central1" tier = "DEVOPS" + publishing_options { publish_ca_cert = false - publish_crl = false + publish_crl = false } + issuance_policy { maximum_lifetime = "1209600s" baseline_values { @@ -67,21 +72,22 @@ resource "google_privateca_ca_pool" "default" { } } - resource "google_privateca_certificate_authority" "default" { - pool = google_privateca_ca_pool.default.name - certificate_authority_id = "%s" - location = "us-central1" - lifetime = "86400s" - type = "SELF_SIGNED" - deletion_protection = false - skip_grace_period = true + provider = google-beta + pool = google_privateca_ca_pool.default.name + certificate_authority_id = "tf-test-ca-%{randomSuffix}" + location = "us-central1" + lifetime = "86400s" + type = "SELF_SIGNED" + deletion_protection = false + skip_grace_period = true ignore_active_certificates_on_deletion = true + config { subject_config { subject { organization = "Test LLC" - common_name = "my-ca" + common_name = "my-ca" } } x509_config { @@ -91,7 +97,7 @@ resource "google_privateca_certificate_authority" "default" { key_usage { base_key_usage { cert_sign = true - crl_sign = true + crl_sign = true } extended_key_usage { server_auth = false @@ -99,38 +105,89 @@ resource "google_privateca_certificate_authority" "default" { } } } + key_spec { algorithm = "RSA_PKCS1_4096_SHA256" } } -data "google_project" "project" {} +resource "google_project_service_identity" "default" { + provider = google-beta + service = "networksecurity.googleapis.com" +} -resource "google_privateca_ca_pool_iam_member" "tls_inspection_permission" { - ca_pool = google_privateca_ca_pool.default.id - role = "roles/privateca.certificateManager" - member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-networksecurity.iam.gserviceaccount.com" +resource "google_privateca_ca_pool_iam_member" "default" { + provider = google-beta + ca_pool = google_privateca_ca_pool.default.id + role = "roles/privateca.certificateManager" + member = "serviceAccount:${google_project_service_identity.default.email}" } -resource "google_network_security_tls_inspection_policy" "foobar" { - name = "%s" - location = "us-central1" - ca_pool = google_privateca_ca_pool.default.id - depends_on = [google_privateca_ca_pool.default, google_privateca_certificate_authority.default, google_privateca_ca_pool_iam_member.tls_inspection_permission] +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + name = "tf-test-tc-%{randomSuffix}" + description = "sample trust config description" + location = "us-central1" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } +} + +resource "google_network_security_tls_inspection_policy" "default" { + provider = google-beta + name = "tf-test-tip-%{randomSuffix}" + location = "us-central1" + ca_pool = google_privateca_ca_pool.default.id + exclude_public_ca_set = false + min_tls_version = "TLS_1_0" + trust_config = google_certificate_manager_trust_config.default.id + tls_feature_profile = "PROFILE_CUSTOM" + + custom_tls_features = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_256_GCM_SHA384" + ] + + depends_on = [ + google_privateca_certificate_authority.default, + google_privateca_ca_pool_iam_member.default + ] } -`, caPoolName, certificateAuthorityName, tlsInspectionPolicyName) +`, context) } -func testAccNetworkSecurityTlsInspectionPolicy_update(caPoolName, certificateAuthorityName, tlsInspectionPolicyName string) string { - return fmt.Sprintf(` +func testAccNetworkSecurityTlsInspectionPolicy_update(context map[string]interface{}) string { + return acctest.Nprintf(` resource "google_privateca_ca_pool" "default" { - name = "%s" - location = "us-central1" + provider = google-beta + name = "tf-test-cap-%{randomSuffix}" + location = "us-central1" tier = "DEVOPS" + publishing_options { publish_ca_cert = false - publish_crl = false + publish_crl = false } + issuance_policy { maximum_lifetime = "1209600s" baseline_values { @@ -147,21 +204,49 @@ resource "google_privateca_ca_pool" "default" { } } +resource "google_privateca_ca_pool" "default_updated" { + provider = google-beta + name = "tf-test-cap-updated-%{randomSuffix}" + location = "us-central1" + tier = "DEVOPS" + + publishing_options { + publish_ca_cert = false + publish_crl = false + } + + issuance_policy { + maximum_lifetime = "1209600s" + baseline_values { + ca_options { + is_ca = false + } + key_usage { + base_key_usage {} + extended_key_usage { + server_auth = true + } + } + } + } +} resource "google_privateca_certificate_authority" "default" { - pool = google_privateca_ca_pool.default.name - certificate_authority_id = "%s" - location = "us-central1" - lifetime = "86400s" - type = "SELF_SIGNED" - deletion_protection = false - skip_grace_period = true + provider = google-beta + pool = google_privateca_ca_pool.default.name + certificate_authority_id = "tf-test-ca-%{randomSuffix}" + location = "us-central1" + lifetime = "86400s" + type = "SELF_SIGNED" + deletion_protection = false + skip_grace_period = true ignore_active_certificates_on_deletion = true + config { subject_config { subject { organization = "Test LLC" - common_name = "my-ca" + common_name = "my-ca" } } x509_config { @@ -171,7 +256,7 @@ resource "google_privateca_certificate_authority" "default" { key_usage { base_key_usage { cert_sign = true - crl_sign = true + crl_sign = true } extended_key_usage { server_auth = false @@ -179,25 +264,118 @@ resource "google_privateca_certificate_authority" "default" { } } } + key_spec { algorithm = "RSA_PKCS1_4096_SHA256" } } -data "google_project" "project" {} +resource "google_privateca_certificate_authority" "default_updated" { + provider = google-beta + pool = google_privateca_ca_pool.default_updated.name + certificate_authority_id = "tf-test-ca-%{randomSuffix}" + location = "us-central1" + lifetime = "86400s" + type = "SELF_SIGNED" + deletion_protection = false + skip_grace_period = true + ignore_active_certificates_on_deletion = true + + config { + subject_config { + subject { + organization = "Test LLC" + common_name = "my-ca" + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = false + } + } + } + } -resource "google_privateca_ca_pool_iam_member" "tls_inspection_permission" { - ca_pool = google_privateca_ca_pool.default.id - role = "roles/privateca.certificateManager" - member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-networksecurity.iam.gserviceaccount.com" + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } +} + +resource "google_project_service_identity" "default" { + provider = google-beta + service = "networksecurity.googleapis.com" +} + +resource "google_privateca_ca_pool_iam_member" "default" { + provider = google-beta + ca_pool = google_privateca_ca_pool.default.id + role = "roles/privateca.certificateManager" + member = "serviceAccount:${google_project_service_identity.default.email}" } -resource "google_network_security_tls_inspection_policy" "foobar" { - name = "%s" +resource "google_privateca_ca_pool_iam_member" "default_updated" { + provider = google-beta + ca_pool = google_privateca_ca_pool.default_updated.id + role = "roles/privateca.certificateManager" + member = "serviceAccount:${google_project_service_identity.default.email}" +} + +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + name = "tf-test-tc-%{randomSuffix}" + description = "sample trust config description" location = "us-central1" - description = "my tls inspection policy updated" - ca_pool = google_privateca_ca_pool.default.id - depends_on = [google_privateca_ca_pool.default, google_privateca_certificate_authority.default, google_privateca_ca_pool_iam_member.tls_inspection_permission] + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } } -`, caPoolName, certificateAuthorityName, tlsInspectionPolicyName) + +resource "google_certificate_manager_trust_config" "default_updated" { + provider = google-beta + name = "tf-test-tc-updated-%{randomSuffix}" + description = "another sample trust config description" + location = "us-central1" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } } + +resource "google_network_security_tls_inspection_policy" "default" { + provider = google-beta + name = "tf-test-tip-%{randomSuffix}" + location = "us-central1" + description = "my tls inspection policy updated" + ca_pool = google_privateca_ca_pool.default_updated.id + exclude_public_ca_set = true + min_tls_version = "TLS_1_2" + trust_config = google_certificate_manager_trust_config.default_updated.id + + depends_on = [ + google_privateca_certificate_authority.default_updated, + google_privateca_ca_pool_iam_member.default_updated + ] +} +`, context) +} + +<% end -%> From 412117bc2b059f7974f1392c9658c981266d34ee Mon Sep 17 00:00:00 2001 From: Charles Leon Date: Mon, 10 Jun 2024 07:38:07 -0700 Subject: [PATCH 097/356] Revert "Update Documentation for ACM Service Perimeter resources to reflect Granular Controls group support" (#10914) --- .../ServicePerimeter.yaml | 31 ++++------ .../ServicePerimeterEgressPolicy.yaml | 4 +- .../ServicePerimeterIngressPolicy.yaml | 6 +- .../ServicePerimeters.yaml | 28 ++++----- ...service_perimeter_granular_controls.tf.erb | 59 ------------------- 5 files changed, 29 insertions(+), 99 deletions(-) delete mode 100644 mmv1/templates/terraform/examples/access_context_manager_service_perimeter_granular_controls.tf.erb diff --git a/mmv1/products/accesscontextmanager/ServicePerimeter.yaml b/mmv1/products/accesscontextmanager/ServicePerimeter.yaml index d6b02a13d48d..b79c83c666cf 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeter.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeter.yaml @@ -71,9 +71,6 @@ examples: primary_resource_id: 'service-perimeter' vars: service_perimeter_name: 'restrict_bigquery_dryrun_storage' - - !ruby/object:Provider::Terraform::Examples - name: 'access_context_manager_service_perimeter_granular_controls' - skip_test: true custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/access_level_never_send_parent.go.erb custom_import: templates/terraform/custom_import/set_access_policy_parent_from_self_link.go.erb @@ -243,10 +240,9 @@ properties: item_type: Api::Type::String is_set: true description: | - 'A list of identities that are allowed access through this `IngressPolicy`. - To specify an identity or identity group, use the IAM v1 - format specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. - !ruby/object:Api::Type::Array name: 'sources' description: | @@ -368,10 +364,9 @@ properties: - !ruby/object:Api::Type::Array name: 'identities' description: | - 'A list of identities that are allowed access through this `EgressPolicy`. - To specify an identity or identity group, use the IAM v1 - format specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. is_set: true item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject @@ -533,10 +528,9 @@ properties: item_type: Api::Type::String is_set: true description: | - 'A list of identities that are allowed access through this `IngressPolicy`. - To specify an identity or identity group, use the IAM v1 - format specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. - !ruby/object:Api::Type::Array name: 'sources' description: | @@ -658,10 +652,9 @@ properties: - !ruby/object:Api::Type::Array name: 'identities' description: | - 'A list of identities that are allowed access through this `EgressPolicy`. - To specify an identity or identity group, use the IAM v1 - format specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. item_type: Api::Type::String is_set: true - !ruby/object:Api::Type::NestedObject diff --git a/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml b/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml index 5c26b6c85bc8..2d2511ab8aec 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeterEgressPolicy.yaml @@ -84,8 +84,8 @@ properties: name: 'identities' description: | A list of identities that are allowed access through this `EgressPolicy`. - Should be in the format of an email address. The email address should - represent an individual user, service account, or Google group. + Should be in the format of email address. The email address should + represent individual user or service account only. item_type: Api::Type::String - !ruby/object:Api::Type::Array name: 'sources' diff --git a/mmv1/products/accesscontextmanager/ServicePerimeterIngressPolicy.yaml b/mmv1/products/accesscontextmanager/ServicePerimeterIngressPolicy.yaml index c17f8a3e0471..a008c94e83e2 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeterIngressPolicy.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeterIngressPolicy.yaml @@ -86,9 +86,9 @@ properties: name: 'identities' item_type: Api::Type::String description: | - A list of identities that are allowed access through this `IngressPolicy`. - Should be in the format of an email address. The email address should represent - an individual user, service account, or Google group. + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. - !ruby/object:Api::Type::Array name: 'sources' description: | diff --git a/mmv1/products/accesscontextmanager/ServicePerimeters.yaml b/mmv1/products/accesscontextmanager/ServicePerimeters.yaml index 655c3545a2e6..2b941289d41a 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeters.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeters.yaml @@ -220,10 +220,9 @@ properties: is_set: true item_type: Api::Type::String description: | - 'A list of identities that are allowed access through this `IngressPolicy`. - To specify an identity or identity group, use the IAM v1 format - specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. - !ruby/object:Api::Type::Array name: 'sources' description: | @@ -330,10 +329,9 @@ properties: - !ruby/object:Api::Type::Array name: 'identities' description: | - 'A list of identities that are allowed access through this `EgressPolicy`. - To specify an identity or identity group, use the IAM v1 format - specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. is_set: true item_type: Api::Type::String - !ruby/object:Api::Type::Array @@ -516,10 +514,9 @@ properties: is_set: true item_type: Api::Type::String description: | - 'A list of identities that are allowed access through this `IngressPolicy`. - To specify an identity or identity group, use the IAM v1 format - specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. - !ruby/object:Api::Type::Array name: 'sources' description: | @@ -626,10 +623,9 @@ properties: - !ruby/object:Api::Type::Array name: 'identities' description: | - 'A list of identities that are allowed access through this `EgressPolicy`. - To specify an identity or identity group, use the IAM v1 format - specified [here](https://cloud.google.com/iam/docs/principal-identifiers.md#v1). - The following prefixes are supprted: user, group, serviceAccount, principal, and principalSet.' + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. item_type: Api::Type::String is_set: true - !ruby/object:Api::Type::Array diff --git a/mmv1/templates/terraform/examples/access_context_manager_service_perimeter_granular_controls.tf.erb b/mmv1/templates/terraform/examples/access_context_manager_service_perimeter_granular_controls.tf.erb deleted file mode 100644 index 728b3a87ca83..000000000000 --- a/mmv1/templates/terraform/examples/access_context_manager_service_perimeter_granular_controls.tf.erb +++ /dev/null @@ -1,59 +0,0 @@ -resource "google_access_context_manager_access_policy" "access-policy" { - parent = "organizations/123456789" - title = "Policy with Granular Controls Group Support" -} - -resource "google_access_context_manager_service_perimeter" "test-access" { - parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" - name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/%s" - title = "%s" - perimeter_type = "PERIMETER_TYPE_REGULAR" - status { - restricted_services = ["bigquery.googleapis.com", "storage.googleapis.com"] - - vpc_accessible_services { - enable_restriction = true - allowed_services = ["bigquery.googleapis.com", "storage.googleapis.com"] - } - - ingress_policies { - ingress_from { - sources { - access_level = google_access_context_manager_access_level.test-access.name - } - identities = ["group:database-admins@google.com"] - identities = ["principal://iam.googleapis.com/locations/global/workforcePools/1234/subject/janedoe"] - identities = ["principalSet://iam.googleapis.com/locations/global/workforcePools/1234/*"] - } - - ingress_to { - resources = [ "*" ] - operations { - service_name = "storage.googleapis.com" - - method_selectors { - method = "google.storage.objects.create" - } - } - } - } - - egress_policies { - egress_from { - identities = ["group:database-admins@google.com"] - identities = ["principal://iam.googleapis.com/locations/global/workforcePools/1234/subject/janedoe"] - identities = ["principalSet://iam.googleapis.com/locations/global/workforcePools/1234/*"] - } - egress_to { - resources = [ "*" ] - operations { - service_name = "storage.googleapis.com" - - method_selectors { - method = "google.storage.objects.create" - } - } - } - } - } -} From cc22b1c5cf452bcac3a404a4ea1c1eafd04f6724 Mon Sep 17 00:00:00 2001 From: Sam Levenick Date: Mon, 10 Jun 2024 10:52:17 -0400 Subject: [PATCH 098/356] Add copyright to fix rubocop (#10928) --- mmv1/products/managedkafka/Cluster.yaml | 13 +++++++++++++ mmv1/products/managedkafka/product.yaml | 13 +++++++++++++ 2 files changed, 26 insertions(+) diff --git a/mmv1/products/managedkafka/Cluster.yaml b/mmv1/products/managedkafka/Cluster.yaml index 65a10796105f..f193cd96c7c1 100644 --- a/mmv1/products/managedkafka/Cluster.yaml +++ b/mmv1/products/managedkafka/Cluster.yaml @@ -1,3 +1,16 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + --- !ruby/object:Api::Resource base_url: projects/{{project}}/locations/{{location}}/clusters create_url: projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}} diff --git a/mmv1/products/managedkafka/product.yaml b/mmv1/products/managedkafka/product.yaml index 46d2c6d023e8..e52dfb9b9449 100644 --- a/mmv1/products/managedkafka/product.yaml +++ b/mmv1/products/managedkafka/product.yaml @@ -1,3 +1,16 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + --- !ruby/object:Api::Product versions: - !ruby/object:Api::Product::Version From 4b497e390392a5f05466b2348db676d6e0de3531 Mon Sep 17 00:00:00 2001 From: vijaykanthm Date: Mon, 10 Jun 2024 08:38:36 -0700 Subject: [PATCH 099/356] Add Resource SCC Management Organization Security Health Analytics Custom Module (#10815) --- ...onSecurityHealthAnalyticsCustomModule.yaml | 225 ++++++++++++++++++ ...ealth_analytics_custom_module_basic.tf.erb | 19 ++ ...health_analytics_custom_module_full.tf.erb | 33 +++ ...ity_health_analytics_custom_module_test.go | 205 ++++++++++++++++ 4 files changed, 482 insertions(+) create mode 100644 mmv1/products/securitycentermanagement/OrganizationSecurityHealthAnalyticsCustomModule.yaml create mode 100644 mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_full.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_security_health_analytics_custom_module_test.go diff --git a/mmv1/products/securitycentermanagement/OrganizationSecurityHealthAnalyticsCustomModule.yaml b/mmv1/products/securitycentermanagement/OrganizationSecurityHealthAnalyticsCustomModule.yaml new file mode 100644 index 000000000000..efac21582cdd --- /dev/null +++ b/mmv1/products/securitycentermanagement/OrganizationSecurityHealthAnalyticsCustomModule.yaml @@ -0,0 +1,225 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'OrganizationSecurityHealthAnalyticsCustomModule' +description: | + Represents an instance of a Security Health Analytics custom module, including + its full module name, display name, enablement state, and last updated time. + You can create a custom module at the organization, folder, or project level. + Custom modules that you create at the organization or folder level are inherited + by the child folders and projects. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Overview of custom modules for Security Health Analytics': 'https://cloud.google.com/security-command-center/docs/custom-modules-sha-overview' + api: 'https://cloud.google.com/security-command-center/docs/reference/security-center-management/rest/v1/organizations.locations.securityHealthAnalyticsCustomModules' +base_url: 'organizations/{{organization}}/locations/{{location}}/securityHealthAnalyticsCustomModules' +self_link: 'organizations/{{organization}}/locations/{{location}}/securityHealthAnalyticsCustomModules/{{name}}' +mutex: 'organizations/{{organization}}/locations/{{location}}/securityHealthAnalyticsCustomModules' +update_verb: :PATCH +update_mask: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_organization_security_health_analytics_custom_module_basic" + primary_resource_id: "example" + skip_test: true + vars: + display_name: basic_custom_module + test_env_vars: + org_id: :ORG_ID + test_vars_overrides: + sleep: "true" + - !ruby/object:Provider::Terraform::Examples + name: "scc_management_organization_security_health_analytics_custom_module_full" + primary_resource_id: "example" + skip_test: true + vars: + display_name: full_custom_module + test_env_vars: + org_id: :ORG_ID + test_vars_overrides: + sleep: "true" + +parameters: + - !ruby/object:Api::Type::String + name: 'organization' + immutable: true + required: true + url_param_only: true + description: | + Numerical ID of the parent organization. + - !ruby/object:Api::Type::String + name: 'location' + immutable: true + required: false + url_param_only: true + default_value: 'global' + description: | + Location ID of the parent organization. If not provided, 'global' will be used as the default location. + +properties: + - !ruby/object:Api::Type::String + name: 'name' + output: true + custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb + description: | + The resource name of the custom module. Its format is "organizations/{organization}/locations/{location}/securityHealthAnalyticsCustomModules/{securityHealthAnalyticsCustomModule}". + The id {securityHealthAnalyticsCustomModule} is server-generated and is not user settable. It will be a numeric id containing 1-20 digits. + - !ruby/object:Api::Type::String + name: 'displayName' + immutable: true + # API error for invalid display names is just "INVALID_ARGUMENT" with no details + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateRegexp(`^[a-z][\w_]{0,127}$`)' + description: | + The display name of the Security Health Analytics custom module. This + display name becomes the finding category for all findings that are + returned by this custom module. The display name must be between 1 and + 128 characters, start with a lowercase letter, and contain alphanumeric + characters or underscores only. + - !ruby/object:Api::Type::Enum + name: 'enablementState' + description: | + The enablement state of the custom module. + values: + - :ENABLED + - :DISABLED + - !ruby/object:Api::Type::String + name: 'updateTime' + output: true + description: | + The time at which the custom module was last updated. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and + up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + - !ruby/object:Api::Type::String + name: 'lastEditor' + output: true + description: | + The editor that last updated the custom module. + - !ruby/object:Api::Type::String + name: 'ancestorModule' + output: true + description: | + If empty, indicates that the custom module was created in the organization, folder, + or project in which you are viewing the custom module. Otherwise, ancestor_module + specifies the organization or folder from which the custom module is inherited. + - !ruby/object:Api::Type::NestedObject + name: 'customConfig' + description: | + The user specified custom configuration for the module. + properties: + - !ruby/object:Api::Type::NestedObject + name: 'predicate' + required: true + description: | + The CEL expression to evaluate to produce findings. When the expression evaluates + to true against a resource, a finding is generated. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'customOutput' + description: | + Custom output properties. + properties: + - !ruby/object:Api::Type::Array + name: 'properties' + description: | + A list of custom output properties to add to the finding. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the property for the custom output. + - !ruby/object:Api::Type::NestedObject + name: 'valueExpression' + description: | + The CEL expression for the custom output. A resource property can be specified + to return the value of the property or a text string enclosed in quotation marks. + properties: + - !ruby/object:Api::Type::String + name: 'expression' + required: true + description: | + Textual representation of an expression in Common Expression Language syntax. + - !ruby/object:Api::Type::String + name: 'title' + description: | + Title for the expression, i.e. a short string describing its purpose. This can + be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the expression. This is a longer text which describes the + expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: 'location' + description: | + String indicating the location of the expression for error reporting, e.g. a + file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'resourceSelector' + required: true + description: | + The resource types that the custom module operates on. Each custom module + can specify up to 5 resource types. + properties: + - !ruby/object:Api::Type::Array + name: 'resourceTypes' + required: true + description: | + The resource types to run the detector on. + item_type: Api::Type::String + - !ruby/object:Api::Type::Enum + name: 'severity' + required: true + description: | + The severity to assign to findings generated by the module. + values: + - :CRITICAL + - :HIGH + - :MEDIUM + - :LOW + - !ruby/object:Api::Type::String + name: 'description' + description: | + Text that describes the vulnerability or misconfiguration that the custom + module detects. This explanation is returned with each finding instance to + help investigators understand the detected issue. The text must be enclosed in quotation marks. + - !ruby/object:Api::Type::String + name: 'recommendation' + required: true + description: | + An explanation of the recommended steps that security teams can take to resolve + the detected issue. This explanation is returned with each finding generated by + this module in the nextSteps property of the finding JSON. diff --git a/mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_basic.tf.erb b/mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_basic.tf.erb new file mode 100644 index 000000000000..75e06a99b102 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_basic.tf.erb @@ -0,0 +1,19 @@ +resource "google_scc_management_organization_security_health_analytics_custom_module" "<%= ctx[:primary_resource_id] %>" { + organization = "<%= ctx[:test_env_vars]['org_id'] %>" + display_name = "<%= ctx[:vars]['display_name'] %>" + location = "global" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_full.tf.erb b/mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_full.tf.erb new file mode 100644 index 000000000000..57511f76d211 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_management_organization_security_health_analytics_custom_module_full.tf.erb @@ -0,0 +1,33 @@ +resource "google_scc_management_organization_security_health_analytics_custom_module" "<%= ctx[:primary_resource_id] %>" { + organization = "<%= ctx[:test_env_vars]['org_id'] %>" + display_name = "<%= ctx[:vars]['display_name'] %>" + location = "global" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_security_health_analytics_custom_module_test.go b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_security_health_analytics_custom_module_test.go new file mode 100644 index 000000000000..98400bf41509 --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycentermanagement/resource_scc_management_organization_security_health_analytics_custom_module_test.go @@ -0,0 +1,205 @@ +package securitycentermanagement_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Custom Module tests cannot be run in parallel without running into 409 Conflict reponses. +// Run them as individual steps of an update test instead. +func TestAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "location": "global", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule_sccOrganizationCustomModuleBasicExample(context), + }, + { + ResourceName: "google_scc_management_organization_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization", "location"}, + }, + { + Config: testAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule_sccOrganizationCustomModuleFullExample(context), + }, + { + ResourceName: "google_scc_management_organization_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization", "location"}, + }, + { + Config: testAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule_sccOrganizationCustomModuleUpdate(context), + }, + { + ResourceName: "google_scc_management_organization_security_health_analytics_custom_module.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"organization", "location"}, + }, + }, + }) +} + +func testAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule_sccOrganizationCustomModuleBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_organization_security_health_analytics_custom_module" "example" { + organization = "%{org_id}" + location = "%{location}" + display_name = "tf_test_basic_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} +`, context) +} + +func testAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule_sccOrganizationCustomModuleFullExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_organization_security_health_analytics_custom_module" "example" { + organization = "%{org_id}" + location = "%{location}" + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} +`, context) +} + +func testAccSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule_sccOrganizationCustomModuleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_scc_management_organization_security_health_analytics_custom_module" "example" { + organization = "%{org_id}" + location = "%{location}" + display_name = "tf_test_full_custom_module%{random_suffix}" + enablement_state = "DISABLED" + custom_config { + predicate { + expression = "resource.name == \"updated-name\"" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + custom_output { + properties { + name = "violation" + value_expression { + expression = "resource.name" + title = "Updated expression title" + description = "Updated description of the expression" + location = "Updated location of the expression" + } + } + } + resource_selector { + resource_types = [ + "compute.googleapis.com/Instance", + ] + } + severity = "CRITICAL" + description = "Updated description of the custom module" + recommendation = "Updated steps to resolve violation" + } +} +`, context) +} + +func testAccCheckSecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModuleDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_scc_management_organization_security_health_analytics_custom_module" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + location := rs.Primary.Attributes["location"] + + url, err := tpgresource.ReplaceVarsForTest(config, rs, fmt.Sprintf( + "{{SecurityCenterBasePath}}organizations/{{organization}}/locations/%s/securityHealthAnalyticsCustomModules/{{name}}", location)) + + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("SecurityCenterManagementOrganizationSecurityHealthAnalyticsCustomModule still exists at %s", url) + } + } + + return nil + } +} From 909afce26d0f421d18d71367e7ec734d63e43e8b Mon Sep 17 00:00:00 2001 From: Jimmy xu Date: Mon, 10 Jun 2024 11:49:33 -0400 Subject: [PATCH 100/356] Bitbucket support for cloudbuild repos (#10789) Co-authored-by: Riley Karson --- mmv1/products/cloudbuildv2/Connection.yaml | 118 +++++++++++- .../resource_cloudbuildv2_connection_test.go | 174 ++++++++++++++++++ 2 files changed, 291 insertions(+), 1 deletion(-) diff --git a/mmv1/products/cloudbuildv2/Connection.yaml b/mmv1/products/cloudbuildv2/Connection.yaml index 0678336e62b4..d4098206e000 100644 --- a/mmv1/products/cloudbuildv2/Connection.yaml +++ b/mmv1/products/cloudbuildv2/Connection.yaml @@ -26,7 +26,7 @@ async: !ruby/object:Api::OpAsync base_url: '{{op_id}}' update_verb: :PATCH description: | - A connection to a SCM like GitHub, GitHub Enterprise, Bitbucket Data Center or GitLab. + A connection to a SCM like GitHub, GitHub Enterprise, Bitbucket Data Center/Cloud or GitLab. exclude_tgc: true legacy_long_form_project: true iam_policy: !ruby/object:Api::Resource::IamPolicy @@ -84,6 +84,8 @@ properties: conflicts: - 'github_enterprise_config' - 'gitlab_config' + - 'bitbucket_cloud_config' + - 'bitbucket_data_center_config' properties: - !ruby/object:Api::Type::NestedObject name: authorizerCredential @@ -105,6 +107,8 @@ properties: conflicts: - 'github_config' - 'gitlab_config' + - 'bitbucket_cloud_config' + - 'bitbucket_data_center_config' description: Configuration for connections to an instance of GitHub Enterprise. properties: - !ruby/object:Api::Type::String @@ -145,6 +149,8 @@ properties: conflicts: - 'github_config' - 'github_enterprise_config' + - 'bitbucket_cloud_config' + - 'bitbucket_data_center_config' description: Configuration for connections to gitlab.com or an instance of GitLab Enterprise. properties: - !ruby/object:Api::Type::String @@ -201,6 +207,116 @@ properties: name: serverVersion description: Output only. Version of the GitLab Enterprise server running on the `host_uri`. output: true + - !ruby/object:Api::Type::NestedObject + name: bitbucketDataCenterConfig + conflicts: + - 'github_config' + - 'github_enterprise_config' + - 'bitbucket_cloud_config' + - 'gitlab_config' + description: Configuration for connections to Bitbucket Data Center. + properties: + - !ruby/object:Api::Type::String + name: hostUri + description: The URI of the Bitbucket Data Center host this connection is for. + required: true + - !ruby/object:Api::Type::String + name: webhookSecretSecretVersion + description: Required. Immutable. SecretManager resource containing the webhook secret used to verify webhook events, formatted as `projects/*/secrets/*/versions/*`. + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::NestedObject + name: readAuthorizerCredential + description: Required. A http access token with the `REPO_READ` access. + required: true + properties: + - !ruby/object:Api::Type::String + name: userTokenSecretVersion + description: 'Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.' + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::String + name: username + description: Output only. The username associated to this token. + output: true + - !ruby/object:Api::Type::NestedObject + name: authorizerCredential + description: Required. A http access token with the `REPO_ADMIN` scope access. + required: true + properties: + - !ruby/object:Api::Type::String + name: userTokenSecretVersion + description: 'Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.' + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::String + name: username + description: Output only. The username associated to this token. + output: true + - !ruby/object:Api::Type::NestedObject + name: serviceDirectoryConfig + description: Configuration for using Service Directory to privately connect to a Bitbucket Data Center. This should only be set if the Bitbucket Data Center is hosted on-premises and not reachable by public internet. If this field is left empty, calls to the Bitbucket Data Center will be made over the public internet. + properties: + - !ruby/object:Api::Type::String + name: service + description: 'Required. The Service Directory service name. Format: projects/{project}/locations/{location}/namespaces/{namespace}/services/{service}.' + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::String + name: sslCa + description: SSL certificate to use for requests to the Bitbucket Data Center. + - !ruby/object:Api::Type::String + name: serverVersion + description: Output only. Version of the Bitbucket Data Center running on the `host_uri`. + output: true + - !ruby/object:Api::Type::NestedObject + name: bitbucketCloudConfig + conflicts: + - 'github_config' + - 'github_enterprise_config' + - 'gitlab_config' + - 'bitbucket_data_center_config' + description: Configuration for connections to Bitbucket Cloud. + properties: + - !ruby/object:Api::Type::String + name: workspace + description: The Bitbucket Cloud Workspace ID to be connected to Google Cloud Platform. + required: true + - !ruby/object:Api::Type::String + name: webhookSecretSecretVersion + description: Required. Immutable. SecretManager resource containing the webhook secret used to verify webhook events, formatted as `projects/*/secrets/*/versions/*`. + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::NestedObject + name: readAuthorizerCredential + description: Required. An access token with the `repository` access. It can be either a workspace, project or repository access token. It's recommended to use a system account to generate the credentials. + required: true + properties: + - !ruby/object:Api::Type::String + name: userTokenSecretVersion + description: 'Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.' + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::String + name: username + description: Output only. The username associated to this token. + output: true + - !ruby/object:Api::Type::NestedObject + name: authorizerCredential + description: Required. An access token with the `webhook`, `repository`, `repository:admin` and `pullrequest` scope access. It can be either a workspace, project or repository access token. It's recommended to use a system account to generate these credentials. + required: true + properties: + - !ruby/object:Api::Type::String + name: userTokenSecretVersion + description: 'Required. A SecretManager resource containing the user token that authorizes the Cloud Build connection. Format: `projects/*/secrets/*/versions/*`.' + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::String + name: username + description: Output only. The username associated to this token. + output: true - !ruby/object:Api::Type::NestedObject name: installationState description: Output only. Installation state of the Connection. diff --git a/mmv1/third_party/terraform/services/cloudbuildv2/resource_cloudbuildv2_connection_test.go b/mmv1/third_party/terraform/services/cloudbuildv2/resource_cloudbuildv2_connection_test.go index bdf4b9fce0a4..b99f00836e9d 100644 --- a/mmv1/third_party/terraform/services/cloudbuildv2/resource_cloudbuildv2_connection_test.go +++ b/mmv1/third_party/terraform/services/cloudbuildv2/resource_cloudbuildv2_connection_test.go @@ -327,6 +327,93 @@ func TestAccCloudbuildv2Connection_GlePrivUpdateConnection(t *testing.T) { }) } +func TestAccCloudbuildv2Connection_BbdcPrivConnection(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudbuildv2ConnectionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudbuildv2Connection_BbdcPrivConnection(context), + }, + { + ResourceName: "google_cloudbuildv2_connection.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + +func TestAccCloudbuildv2Connection_BbdcPrivUpdateConnection(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudbuildv2ConnectionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudbuildv2Connection_BbdcConnection(context), + }, + { + ResourceName: "google_cloudbuildv2_connection.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + { + Config: testAccCloudbuildv2Connection_BbdcPrivConnection(context), + }, + { + ResourceName: "google_cloudbuildv2_connection.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + +func TestAccCloudbuildv2Connection_BbcConnection(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudbuildv2ConnectionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudbuildv2Connection_BbcConnection(context), + }, + { + ResourceName: "google_cloudbuildv2_connection.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + func testAccCloudbuildv2Connection_GheCompleteConnection(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_cloudbuildv2_connection" "primary" { @@ -675,5 +762,92 @@ resource "google_cloudbuildv2_connection" "primary" { } +`, context) +} + +func testAccCloudbuildv2Connection_BbdcConnection(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloudbuildv2_connection" "primary" { + location = "us-west1" + name = "tf-test-connection%{random_suffix}" + + bitbucket_data_center_config { + authorizer_credential { + user_token_secret_version = "projects/407304063574/secrets/bbdc-api-token/versions/latest" + } + + read_authorizer_credential { + user_token_secret_version = "projects/407304063574/secrets/bbdc-read-token/versions/latest" + } + + webhook_secret_secret_version = "projects/407304063574/secrets/bbdc-webhook-secret/versions/latest" + host_uri = "https://bitbucket-us-central.gcb-test.com" + } + + project = "%{project_name}" + annotations = {} +} + + +`, context) +} + +func testAccCloudbuildv2Connection_BbdcPrivConnection(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloudbuildv2_connection" "primary" { + location = "us-west1" + name = "tf-test-connection%{random_suffix}" + + bitbucket_data_center_config { + authorizer_credential { + user_token_secret_version = "projects/407304063574/secrets/private-bbdc-api-token/versions/1" + } + + read_authorizer_credential { + user_token_secret_version = "projects/407304063574/secrets/private-bbdc-read-token/versions/1" + } + + webhook_secret_secret_version = "projects/407304063574/secrets/bbdc-webhook-secret/versions/latest" + host_uri = "https://private-bitbucket.proctor-test.com" + + service_directory_config { + service = "projects/407304063574/locations/us-west1/namespaces/private-conn/services/private-bitbucket" + } + + ssl_ca = "-----BEGIN CERTIFICATE-----\nMIIDjDCCAnSgAwIBAgIUBh5+3oeT1vmUSS5rSNaFfy6igSAwDQYJKoZIhvcNAQEL\nBQAwVzELMAkGA1UEBhMCVVMxGzAZBgNVBAoMEkdvb2dsZSBDbG91ZCBCdWlsZDEr\nMCkGA1UEAwwicHJpdmF0ZS1iaXRidWNrZXQucHJvY3Rvci10ZXN0LmNvbTAeFw0y\nMzEyMTIyMzI5NTlaFw0yNDEyMTEyMzI5NTlaMFcxCzAJBgNVBAYTAlVTMRswGQYD\nVQQKDBJHb29nbGUgQ2xvdWQgQnVpbGQxKzApBgNVBAMMInByaXZhdGUtYml0YnVj\na2V0LnByb2N0b3ItdGVzdC5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK\nAoIBAQCNfMx4ImGD4imZR64RbmRtpUNmypDokx2/S9kgobmyNvBWeSgRVhOGHGbU\nUgyvENcEg803K8unwF2jF6sdGrocRnIdPpr2tUoViOM2Ss6ds+TD8a2kqBA6+hmQ\nOMJiEIirpGT3Mw1pYTpuLisfIeeuuYssoS5k18kFLZ+Mk6MUSAHCgC8EowUZLGBZ\nagh9OhrjpMSXyidv+2d7FKTh/k3BWffVkDXehjvWjcr47hSvQwqW5m773ewCq0uD\nwxUgO6MAAAxLJz15cjhfvk4ishgSqcp49IZrx+xsNCLbHjPVyGkrL2OhgFaGsQS/\nq6GkXYfJ1sJYrf5Xm1EXbZlQZzJPAgMBAAGjUDBOMC0GA1UdEQQmMCSCInByaXZh\ndGUtYml0YnVja2V0LnByb2N0b3ItdGVzdC5jb20wHQYDVR0OBBYEFISmuuTpHKMB\n+m1h62gEqg1ovC86MA0GCSqGSIb3DQEBCwUAA4IBAQAwIwR6pIum9EZyLtC438Q1\nEgH3SKqbdyMFCkFSBvr4WfFU6ja1pn5ZxzJWt5TRFlI9GMy7BupQrxJGebOiFuUC\noNJpc4QDt9a0/GKh48DGF7uKo9XK33p0v1ahq3ewNT/CUnHewQNX7aXXP1/rL+br\nZPA20XWURUTviMik7DdhaXKQv76K9coI3H74heeBUp+OHKgUkqA3D1QIGNRGOKos\n4z6MyBWVpMUIeJQGtIQBd9CY1hBN231iG1+hdOlOMwgyNVK2GS738r+HbngFo9v4\nh2I1HMUHVcHiPQLqwZ2/OTmTmF1aWCUbhnAvoisu20rHVcGnVIOqMrHYFzdGr3ZQ\n-----END CERTIFICATE-----\n" + } + + project = "%{project_name}" + annotations = {} +} + + +`, context) +} + +func testAccCloudbuildv2Connection_BbcConnection(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloudbuildv2_connection" "primary" { + location = "us-west1" + name = "tf-test-connection%{random_suffix}" + + bitbucket_cloud_config { + workspace = "proctor-test" + authorizer_credential { + user_token_secret_version = "projects/407304063574/secrets/bbc-api-token/versions/latest" + } + + read_authorizer_credential { + user_token_secret_version = "projects/407304063574/secrets/bbc-read-token/versions/latest" + } + + webhook_secret_secret_version = "projects/407304063574/secrets/bbdc-webhook-secret/versions/latest" + } + + project = "%{project_name}" + annotations = {} +} + + `, context) } From eec94a53c6bf2ff71eed7bad453e091ab8c5ed17 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 10 Jun 2024 11:06:04 -0500 Subject: [PATCH 101/356] fix testSweepComputeNetworkEdgeSecurityService (#10876) --- ...twork_edge_security_service_sweeper.go.erb | 98 +++---------------- .../resource_compute_target_pool_sweeper.go | 3 - 2 files changed, 16 insertions(+), 85 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_network_edge_security_service_sweeper.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_network_edge_security_service_sweeper.go.erb index e08cd1b0caab..956fd5d25eef 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_network_edge_security_service_sweeper.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_network_edge_security_service_sweeper.go.erb @@ -5,13 +5,9 @@ package compute import ( "context" "log" - "strings" - "testing" - "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/sweeper" "github.com/hashicorp/terraform-provider-google/google/tpgresource" - transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" ) func init() { @@ -35,95 +31,33 @@ func testSweepComputeNetworkEdgeSecurityService(region string) error { return err } - t := &testing.T{} - billingId := envvar.GetTestBillingAccountFromEnv(t) - - regions := []string{"us-central1", "us-west2", "us-south1", "southamerica-west1", "europe-west1"} - for _, r := range regions { - log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s in %s", resourceName, r) - - // Setup variables to replace in list template - d := &tpgresource.ResourceDataMock{ - FieldsInSchema: map[string]interface{}{ - "project": config.Project, - "region": r, - "location": r, - "zone": "-", - "billing_account": billingId, - }, - } - - listTemplate := strings.Split("https://compute.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices", "?")[0] - listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) - return nil - } - - res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "GET", - Project: config.Project, - RawURL: listUrl, - UserAgent: config.UserAgent, - }) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) - return nil - } - - resourceList, ok := res["networkEdgeSecurityServices"] - if !ok { - log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") - return nil - } - - rl := resourceList.([]interface{}) - - log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s in %s list response.", len(rl), r, resourceName) - // Keep count of items that aren't sweepable for logging. - nonPrefixCount := 0 - for _, ri := range rl { - obj := ri.(map[string]interface{}) - if obj["name"] == nil { - log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) - return nil - } + found, err := config.NewComputeClient(config.UserAgent).NetworkEdgeSecurityServices.AggregatedList(config.Project).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request: %s", err) + return nil + } - name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) - // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for zone, itemList := range found.Items { + for _, tp := range itemList.NetworkEdgeSecurityServices { + if !sweeper.IsSweepableTestResource(tp.Name) { nonPrefixCount++ continue } - deleteTemplate := "https://compute.googleapis.com/compute/beta/projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices/{{name}}" - deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) - if err != nil { - log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) - return nil - } - deleteUrl = deleteUrl + name - // Don't wait on operations as we may have a lot to delete - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ - Config: config, - Method: "DELETE", - Project: config.Project, - RawURL: deleteUrl, - UserAgent: config.UserAgent, - }) + _, err := config.NewComputeClient(config.UserAgent).NetworkEdgeSecurityServices.Delete(config.Project, tpgresource.GetResourceNameFromSelfLink(zone), tp.Name).Do() if err != nil { - log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + log.Printf("[INFO][SWEEPER_LOG] Error deleting %s resource %s : %s", resourceName, tp.Name, err) } else { - log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, tp.Name) } } + } - if nonPrefixCount > 0 { - log.Printf("[INFO][SWEEPER_LOG] %d items in %s were non-sweepable and skipped.", nonPrefixCount, r) - } - + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) } return nil diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go b/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go index 6f14e5c8a43b..b41c2789d60f 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go +++ b/mmv1/third_party/terraform/services/compute/resource_compute_target_pool_sweeper.go @@ -36,9 +36,6 @@ func testSweepTargetPool(region string) error { return nil } - // log.Printf("cam here") - // log.Printf("%+v", found) - // Keep count of items that aren't sweepable for logging. nonPrefixCount := 0 for zone, itemList := range found.Items { From 070e08060509e9c8e16495abfe7b29e8dee7b4ca Mon Sep 17 00:00:00 2001 From: Balanagu Harsha Vardhan Date: Mon, 10 Jun 2024 21:41:21 +0530 Subject: [PATCH 102/356] Adding "status.0.description" to ImportStateVerifyIgnore of Connection resource tests (#10902) --- ...e_integration_connectors_connection_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_connection_test.go b/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_connection_test.go index dbe7e4f5bc9d..2551008583a2 100644 --- a/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_connection_test.go +++ b/mmv1/third_party/terraform/services/integrationconnectors/resource_integration_connectors_connection_test.go @@ -57,7 +57,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionBasic ResourceName: "google_integration_connectors_connection.pubsubconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -104,7 +104,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionAdvan ResourceName: "google_integration_connectors_connection.zendeskconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -365,7 +365,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionSaRes ResourceName: "google_integration_connectors_connection.zendeskconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -624,7 +624,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionOauth ResourceName: "google_integration_connectors_connection.boxconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -711,7 +711,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionOauth ResourceName: "google_integration_connectors_connection.boxconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -796,7 +796,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionOauth ResourceName: "google_integration_connectors_connection.boxconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -912,7 +912,7 @@ func testAccIntegrationConnectorsConnection_integrationConnectorsConnectionOauth ResourceName: "google_integration_connectors_connection.boxconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) @@ -996,7 +996,7 @@ func testAccIntegrationConnectorsConnection_updateResource(t *testing.T) { ResourceName: "google_integration_connectors_connection.zendeskconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, { Config: testAccIntegrationConnectorsConnection_update(context), @@ -1005,7 +1005,7 @@ func testAccIntegrationConnectorsConnection_updateResource(t *testing.T) { ResourceName: "google_integration_connectors_connection.zendeskconnection", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels", "status.0.description"}, }, }, }) From 7f78f530c63a52654dfcbc2719867a50ceb95cfa Mon Sep 17 00:00:00 2001 From: Salome Papiashvili Date: Mon, 10 Jun 2024 18:12:15 +0200 Subject: [PATCH 103/356] Adding mmv1 generated Resource: google_composer_user_workloads_config_map (#10865) Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> --- .../composer/UserWorkloadsConfigMap.yaml | 75 ++++++++ mmv1/products/composer/product.yaml | 25 +++ ...ser_user_workloads_config_map_basic.tf.erb | 20 ++ .../terraform/fwmodels/provider_model.go.erb | 1 - .../fwprovider/framework_provider.go.erb | 6 - .../provider/provider_mmv1_resources.go.erb | 2 + ...oser_user_workloads_config_map_test.go.erb | 171 ++++++++++++++++++ .../terraform/transport/config.go.erb | 7 - 8 files changed, 293 insertions(+), 14 deletions(-) create mode 100644 mmv1/products/composer/UserWorkloadsConfigMap.yaml create mode 100644 mmv1/products/composer/product.yaml create mode 100644 mmv1/templates/terraform/examples/composer_user_workloads_config_map_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_config_map_test.go.erb diff --git a/mmv1/products/composer/UserWorkloadsConfigMap.yaml b/mmv1/products/composer/UserWorkloadsConfigMap.yaml new file mode 100644 index 000000000000..2547f63aca1c --- /dev/null +++ b/mmv1/products/composer/UserWorkloadsConfigMap.yaml @@ -0,0 +1,75 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'UserWorkloadsConfigMap' +description: | + User workloads ConfigMap used by Airflow tasks that run with Kubernetes Executor or KubernetesPodOperator. + Intended for Composer 3 Environments. +references: !ruby/object:Api::Resource::ReferenceLinks + # TODO: add v1 reference when this is moved to ga + api: https://cloud.google.com/composer/docs/reference/rest/v1beta1/projects.locations.environments.userWorkloadsConfigMaps + +min_version: beta + +base_url: projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsConfigMaps +self_link: projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsConfigMaps/{{name}} + +# Overrides one or more timeouts, in minutes. All timeouts default to 20. +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 1 + update_minutes: 1 + delete_minutes: 1 + +examples: + - !ruby/object:Provider::Terraform::Examples + name: "composer_user_workloads_config_map_basic" + primary_resource_id: "config_map" + vars: + environment_name: "test-environment" + config_map_name: "test-config-map" + +parameters: + - !ruby/object:Api::Type::String + name: 'region' + immutable: true + default_from_api: true + url_param_only: true + description: | + The location or Compute Engine region for the environment. + - !ruby/object:Api::Type::String + name: 'environment' + required: true + immutable: true + url_param_only: true + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateGCEName' + description: | + Environment where the Kubernetes ConfigMap will be stored and used. +properties: + - !ruby/object:Api::Type::String + name: 'name' + required: true + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' + custom_expand: 'templates/terraform/custom_expand/shortname_to_url.go.erb' + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateGCEName' + description: | + Name of the Kubernetes ConfigMap. + - !ruby/object:Api::Type::KeyValuePairs + name: 'data' + immutable: false + description: | + The "data" field of Kubernetes ConfigMap, organized in key-value pairs. + For details see: https://kubernetes.io/docs/concepts/configuration/configmap/ diff --git a/mmv1/products/composer/product.yaml b/mmv1/products/composer/product.yaml new file mode 100644 index 000000000000..d8184e1643c5 --- /dev/null +++ b/mmv1/products/composer/product.yaml @@ -0,0 +1,25 @@ +# Copyright 2022 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Product +name: Composer +display_name: Cloud Composer +scopes: + - https://www.googleapis.com/auth/cloud-platform +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://composer.googleapis.com/v1/ + - !ruby/object:Api::Product::Version + name: beta + base_url: https://composer.googleapis.com/v1beta1/ diff --git a/mmv1/templates/terraform/examples/composer_user_workloads_config_map_basic.tf.erb b/mmv1/templates/terraform/examples/composer_user_workloads_config_map_basic.tf.erb new file mode 100644 index 000000000000..356119c4ed0e --- /dev/null +++ b/mmv1/templates/terraform/examples/composer_user_workloads_config_map_basic.tf.erb @@ -0,0 +1,20 @@ +resource "google_composer_environment" "environment" { + provider = google-beta + name = "<%= ctx[:vars]['environment_name'] %>" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_config_map" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['config_map_name'] %>" + region = "us-central1" + environment = google_composer_environment.environment.name + data = { + api_host: "apihost:443", + } +} diff --git a/mmv1/third_party/terraform/fwmodels/provider_model.go.erb b/mmv1/third_party/terraform/fwmodels/provider_model.go.erb index a27f8ff84e92..90da65b6f40f 100644 --- a/mmv1/third_party/terraform/fwmodels/provider_model.go.erb +++ b/mmv1/third_party/terraform/fwmodels/provider_model.go.erb @@ -33,7 +33,6 @@ type ProviderModel struct { // Handwritten Products / Versioned / Atypical Entries CloudBillingCustomEndpoint types.String `tfsdk:"cloud_billing_custom_endpoint"` - ComposerCustomEndpoint types.String `tfsdk:"composer_custom_endpoint"` ContainerCustomEndpoint types.String `tfsdk:"container_custom_endpoint"` DataflowCustomEndpoint types.String `tfsdk:"dataflow_custom_endpoint"` IamCredentialsCustomEndpoint types.String `tfsdk:"iam_credentials_custom_endpoint"` diff --git a/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb b/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb index 75dd435df71c..bb982c8ae97c 100644 --- a/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb +++ b/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb @@ -162,12 +162,6 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, - "composer_custom_endpoint": &schema.StringAttribute{ - Optional: true, - Validators: []validator.String{ - transport_tpg.CustomEndpointValidator(), - }, - }, "container_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 548737cf28c1..19bd44541e4f 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -8,7 +8,9 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/<%= service -%>" <% end -%> + <% if version == 'ga' -%> "github.com/hashicorp/terraform-provider-google/google/services/composer" + <% end -%> "github.com/hashicorp/terraform-provider-google/google/services/container" "github.com/hashicorp/terraform-provider-google/google/services/containeraws" "github.com/hashicorp/terraform-provider-google/google/services/containerazure" diff --git a/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_config_map_test.go.erb b/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_config_map_test.go.erb new file mode 100644 index 000000000000..f5a2c3aaec00 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/resource_composer_user_workloads_config_map_test.go.erb @@ -0,0 +1,171 @@ +<% autogen_exception -%> +package composer_test + +<% unless version == 'ga' -%> + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComposerUserWorkloadsConfigMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_basic(context), + }, + { + ResourceName: "google_composer_user_workloads_config_map.config_map", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_update(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_composer_user_workloads_config_map.config_map", "data.db_host", "dbhost:5432"), + resource.TestCheckNoResourceAttr("google_composer_user_workloads_config_map.config_map", "data.api_host"), + ), + }, + { + ResourceName: "google_composer_user_workloads_config_map.config_map", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_delete(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComposerUserWorkloadsConfigMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_basic(context), + }, + { + ResourceName: "google_composer_user_workloads_config_map.config_map", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_delete(context), + Check: resource.ComposeTestCheckFunc( + testAccComposerUserWorkloadsConfigMapDestroyed(t), + ), + }, + }, + }) +} + +func testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "environment" { + provider = google-beta + name = "tf-test-test-environment%{random_suffix}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_config_map" "config_map" { + provider = google-beta + name = "tf-test-test-config-map%{random_suffix}" + region = "us-central1" + environment = google_composer_environment.environment.name + data = { + api_host: "apihost:443", + } +} +`, context) +} + +func testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "environment" { + provider = google-beta + name = "tf-test-test-environment%{random_suffix}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_config_map" "config_map" { + provider = google-beta + name = "tf-test-test-config-map%{random_suffix}" + region = "us-central1" + environment = google_composer_environment.environment.name + data = { + db_host: "dbhost:5432", + } +} +`, context) +} + +func testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_delete(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "environment" { + provider = google-beta + name = "tf-test-test-environment%{random_suffix}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +`, context) +} + +func testAccComposerUserWorkloadsConfigMapDestroyed(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_composer_user_workloads_config_map" { + continue + } + + idTokens := strings.Split(rs.Primary.ID, "/") + if len(idTokens) != 8 { + return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}/userWorkloadsConfigMaps/{name}", rs.Primary.ID) + } + _, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.UserWorkloadsConfigMaps.Get(rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("config map %s still exists", rs.Primary.ID) + } + } + + return nil + } +} + +<% end -%> diff --git a/mmv1/third_party/terraform/transport/config.go.erb b/mmv1/third_party/terraform/transport/config.go.erb index a548e2371120..e20f818b5545 100644 --- a/mmv1/third_party/terraform/transport/config.go.erb +++ b/mmv1/third_party/terraform/transport/config.go.erb @@ -210,7 +210,6 @@ type Config struct { <% end -%> CloudBillingBasePath string - ComposerBasePath string ContainerBasePath string DataflowBasePath string IamCredentialsBasePath string @@ -233,7 +232,6 @@ type Config struct { const <%= product[:definitions].name -%>BasePathKey = "<%= product[:definitions].name -%>" <% end -%> const CloudBillingBasePathKey = "CloudBilling" -const ComposerBasePathKey = "Composer" const ContainerBasePathKey = "Container" const DataflowBasePathKey = "Dataflow" const IAMBasePathKey = "IAM" @@ -251,11 +249,6 @@ var DefaultBasePaths = map[string]string{ <%= product[:definitions].name -%>BasePathKey : "<%= product[:definitions].base_url -%>", <% end -%> CloudBillingBasePathKey : "https://cloudbilling.googleapis.com/v1/", -<% if version == "ga" -%> - ComposerBasePathKey : "https://composer.googleapis.com/v1/", -<% else -%> - ComposerBasePathKey : "https://composer.googleapis.com/v1beta1/", -<% end -%> <% if version == "ga" -%> ContainerBasePathKey : "https://container.googleapis.com/v1/", <% else -%> From 61066cfe9fa0da1023d45ab8dc704b3403b65a09 Mon Sep 17 00:00:00 2001 From: askubis Date: Mon, 10 Jun 2024 18:32:49 +0200 Subject: [PATCH 104/356] Unset operation when reading instance_group_manager resource (#10859) --- .../compute/resource_compute_instance_group_manager.go.erb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb index 6aaa7cb5898a..e549fd33182b 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb @@ -803,8 +803,8 @@ func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interf Name: operation, Zone: zone, } - if err := d.Set("operation", op.Name); err != nil { - return fmt.Errorf("Error setting operation: %s", err) + if err := d.Set("operation", ""); err != nil { + return fmt.Errorf("Error unsetting operation: %s", err) } err = ComputeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { From db315c5675fea4c2e35b237d9cedc2cabe381369 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Mon, 10 Jun 2024 09:40:17 -0700 Subject: [PATCH 105/356] Ignored review requests that were later removed (#10916) --- .ci/magician/cmd/scheduled_pr_reminders.go | 10 ++++ .../cmd/scheduled_pr_reminders_test.go | 48 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/.ci/magician/cmd/scheduled_pr_reminders.go b/.ci/magician/cmd/scheduled_pr_reminders.go index 73315835f596..a6806c577332 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders.go +++ b/.ci/magician/cmd/scheduled_pr_reminders.go @@ -280,13 +280,23 @@ func notificationState(pr *github.PullRequest, issueEvents []*github.IssueEvent, }) var latestReviewRequest *github.IssueEvent + removedRequests := map[string]struct{}{} for _, event := range issueEvents { + if *event.Event == "review_request_removed" && event.RequestedReviewer != nil { + removedRequests[*event.RequestedReviewer.Login] = struct{}{} + continue + } if *event.Event != "review_requested" { continue } + // Ignore review requests for users who no longer exist. if event.RequestedReviewer == nil { continue } + // Ignore review requests that were later removed. + if _, ok := removedRequests[*event.RequestedReviewer.Login]; ok { + continue + } if membership.IsCoreReviewer(*event.RequestedReviewer.Login) { latestReviewRequest = event break diff --git a/.ci/magician/cmd/scheduled_pr_reminders_test.go b/.ci/magician/cmd/scheduled_pr_reminders_test.go index 96a617f830bb..760cf6e9fb60 100644 --- a/.ci/magician/cmd/scheduled_pr_reminders_test.go +++ b/.ci/magician/cmd/scheduled_pr_reminders_test.go @@ -65,6 +65,29 @@ func TestNotificationState(t *testing.T) { expectState: waitingForReviewerAssignment, expectSince: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), }, + "request for team reviewer which was later removed, and no reviews": { + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("author")}, + CreatedAt: &github.Timestamp{time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)}, + RequestedTeams: []*github.Team{ + &github.Team{Name: github.String("terraform-team")}, + }, + }, + issueEvents: []*github.IssueEvent{ + &github.IssueEvent{ + Event: github.String("review_requested"), + CreatedAt: &github.Timestamp{time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC)}, + RequestedReviewer: &github.User{Login: github.String(firstCoreReviewer)}, + }, + &github.IssueEvent{ + Event: github.String("review_request_removed"), + CreatedAt: &github.Timestamp{time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC)}, + RequestedReviewer: &github.User{Login: github.String(firstCoreReviewer)}, + }, + }, + expectState: waitingForReviewerAssignment, + expectSince: time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC), + }, // expectState: waitingForReview "no reviews": { @@ -82,6 +105,31 @@ func TestNotificationState(t *testing.T) { expectState: waitingForReview, expectSince: time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC), }, + "review requested, removed, and rerequested, with no reviews": { + pullRequest: &github.PullRequest{ + User: &github.User{Login: github.String("author")}, + CreatedAt: &github.Timestamp{time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC)}, + }, + issueEvents: []*github.IssueEvent{ + &github.IssueEvent{ + Event: github.String("review_requested"), + CreatedAt: &github.Timestamp{time.Date(2024, 1, 2, 0, 0, 0, 0, time.UTC)}, + RequestedReviewer: &github.User{Login: github.String(firstCoreReviewer)}, + }, + &github.IssueEvent{ + Event: github.String("review_request_removed"), + CreatedAt: &github.Timestamp{time.Date(2024, 1, 3, 0, 0, 0, 0, time.UTC)}, + RequestedReviewer: &github.User{Login: github.String(firstCoreReviewer)}, + }, + &github.IssueEvent{ + Event: github.String("review_requested"), + CreatedAt: &github.Timestamp{time.Date(2024, 1, 4, 0, 0, 0, 0, time.UTC)}, + RequestedReviewer: &github.User{Login: github.String(firstCoreReviewer)}, + }, + }, + expectState: waitingForReview, + expectSince: time.Date(2024, 1, 4, 0, 0, 0, 0, time.UTC), + }, "no reviews since latest review request": { pullRequest: &github.PullRequest{ User: &github.User{Login: github.String("author")}, From bc5bf5c486b04d5759ab7ffdbdd98eb3fe0a0d9a Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Mon, 10 Jun 2024 09:41:16 -0700 Subject: [PATCH 106/356] Add warning to google_access_context_manager_access_levels docs (#10898) --- mmv1/products/accesscontextmanager/AccessLevels.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mmv1/products/accesscontextmanager/AccessLevels.yaml b/mmv1/products/accesscontextmanager/AccessLevels.yaml index ab2c88492f72..3dc73e403baf 100644 --- a/mmv1/products/accesscontextmanager/AccessLevels.yaml +++ b/mmv1/products/accesscontextmanager/AccessLevels.yaml @@ -22,6 +22,13 @@ references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Access Policy Quickstart': 'https://cloud.google.com/access-context-manager/docs/quickstart' api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.accessLevels' +docs: !ruby/object:Provider::Terraform::Docs + warning: | + This resource is authoritative over the access levels under an access policy. Due to a limitation in Terraform, + it will overwrite all preexisting access levels during a create opration without displaying the old values on + the left side of plan. To prevent this, we recommend importing the resource before applying it if overwriting + preexisting rules, as the plan will correctly display the complete changes to your access policy if the + resource is present in state. description: | Replace all existing Access Levels in an Access Policy with the Access Levels provided. This is done atomically. This is a bulk edit of all Access Levels and may override existing Access Levels created by `google_access_context_manager_access_level`, From 41aebe556b108ad5aadc18ab3439eb5a0a704e41 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Mon, 10 Jun 2024 09:48:06 -0700 Subject: [PATCH 107/356] Removed unused variable (#10908) --- .../dataflow/resource_dataflow_flex_template_job_test.go.erb | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb index 5eba332b9e79..0181f58eebc2 100644 --- a/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb +++ b/mmv1/third_party/terraform/services/dataflow/resource_dataflow_flex_template_job_test.go.erb @@ -300,7 +300,6 @@ func TestAccDataflowFlexTemplateJob_withKmsKey(t *testing.T) { randStr := acctest.RandString(t, 10) job := "tf-test-dataflow-job-" + randStr kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") - keyRing := kms.KeyRing.Name cryptoKey := kms.CryptoKey.Name bucket := "tf-test-dataflow-bucket-" + randStr topic := "tf-test-topic" + randStr @@ -319,7 +318,7 @@ func TestAccDataflowFlexTemplateJob_withKmsKey(t *testing.T) { CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccDataflowFlexTemplateJob_kms(job, keyRing, cryptoKey, bucket, topic), + Config: testAccDataflowFlexTemplateJob_kms(job, cryptoKey, bucket, topic), Check: resource.ComposeTestCheckFunc( testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_kms", false), ), @@ -1299,7 +1298,7 @@ resource "google_dataflow_flex_template_job" "flex_job_ipconfig" { `, topicName, network, subnetwork, bucket, job) } -func testAccDataflowFlexTemplateJob_kms(job, key_ring, crypto_key, bucket, topicName string) string { +func testAccDataflowFlexTemplateJob_kms(job, crypto_key, bucket, topicName string) string { return fmt.Sprintf(` data "google_project" "project" {} From 89d7e451adf2dbc6ad39a69c99d5280b47f59ecf Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 10 Jun 2024 10:48:25 -0700 Subject: [PATCH 108/356] Converte nested_query template with Go (#10888) --- mmv1/api/resource.go | 26 +- mmv1/api/resource/examples.go | 2 +- mmv1/google/template_utils.go | 61 ++++ mmv1/provider/template_data.go | 47 +-- mmv1/templates/terraform/nested_query.go.tmpl | 287 ++++++++++++++++++ .../property_documentation.html.markdown.tmpl | 2 +- mmv1/templates/terraform/resource.go.tmpl | 48 +-- .../terraform/resource.html.markdown.tmpl | 2 +- .../terraform/schema_property.go.tmpl | 10 +- 9 files changed, 406 insertions(+), 79 deletions(-) create mode 100644 mmv1/google/template_utils.go create mode 100644 mmv1/templates/terraform/nested_query.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 09a4de18142a..97b3f7a7e4fa 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -445,6 +445,10 @@ func (r Resource) SettableProperties() []*Type { return props } +func (r Resource) IsSettableProperty(t *Type) bool { + return slices.Contains(r.SettableProperties(), t) +} + // Properties that will be returned in the API body // def gettable_properties @@ -505,7 +509,6 @@ func (r Resource) GetIdentity() []*Type { return google.Select(props, func(p *Type) bool { return p.Name == "name" }) - } // def add_labels_related_fields(props, parent) @@ -1266,7 +1269,7 @@ func (r Resource) IamImportQualifiersForTest() string { return strings.Join(importQualifiers, ", ") } -func OrderProperties(props []*Type) []*Type { +func (r Resource) OrderProperties(props []*Type) []*Type { req := google.Select(props, func(p *Type) bool { return p.Required }) @@ -1315,7 +1318,7 @@ func (r Resource) GetPropertyUpdateMasksGroups(properties []*Type, maskPrefix st } // Formats whitespace in the style of the old Ruby generator's descriptions in documentation -func FormatDocDescription(desc string) string { +func (r Resource) FormatDocDescription(desc string) string { returnString := strings.ReplaceAll(desc, "\n\n", "\n") returnString = strings.ReplaceAll(returnString, "\n", "\n ") @@ -1352,3 +1355,20 @@ func (r Resource) ListUrlTemplate() string { func (r Resource) DeleteUrlTemplate() string { return fmt.Sprintf("%s%s", r.ProductMetadata.BaseUrl, r.DeleteUri()) } + +func (r Resource) LastNestedQueryKey() string { + if r.NestedQuery == nil { + return "" + } + len := len(r.NestedQuery.Keys) + return r.NestedQuery.Keys[len-1] +} + +func (r Resource) FirstIdentityProp() *Type { + idProps := r.GetIdentity() + if len(idProps) == 0 { + return nil + } + + return idProps[0] +} diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index 0c1159965942..705a17fe484d 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -236,7 +236,7 @@ func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { } templateFileName := filepath.Base(templatePath) - tmpl, err := template.New(templateFileName).ParseFiles(templates...) + tmpl, err := template.New(templateFileName).Funcs(google.TemplateFunctions).ParseFiles(templates...) if err != nil { glog.Exit(err) } diff --git a/mmv1/google/template_utils.go b/mmv1/google/template_utils.go new file mode 100644 index 000000000000..04fb5738e2e6 --- /dev/null +++ b/mmv1/google/template_utils.go @@ -0,0 +1,61 @@ +// Copyright 2024 Google Inc. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package google + +import ( + "errors" + "strings" + + "text/template" +) + +// Build a map(map[string]interface{}) from a list of paramerter +// The format of passed in parmeters are key1, value1, key2, value2 ... +func wrapMultipleParams(params ...interface{}) (map[string]interface{}, error) { + if len(params)%2 != 0 { + return nil, errors.New("invalid number of arguments") + } + m := make(map[string]interface{}, len(params)/2) + for i := 0; i < len(params); i += 2 { + key, ok := params[i].(string) + if !ok { + return nil, errors.New("keys must be strings") + } + m[key] = params[i+1] + } + return m, nil +} + +// subtract returns the difference between a and b +// and used in Go templates +func subtract(a, b int) int { + return a - b +} + +var TemplateFunctions = template.FuncMap{ + "title": SpaceSeparatedTitle, + "replace": strings.Replace, + "replaceAll": strings.ReplaceAll, + "camelize": Camelize, + "underscore": Underscore, + "plural": Plural, + "contains": strings.Contains, + "join": strings.Join, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "dict": wrapMultipleParams, + "format2regex": Format2Regex, + "hasPrefix": strings.HasPrefix, + "sub": subtract, +} diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index 8096d3450e1e..fcd04a8903f4 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -15,14 +15,12 @@ package provider import ( "bytes" - "errors" "fmt" "go/format" "log" "os" "os/exec" "path/filepath" - "strings" "text/template" @@ -47,48 +45,6 @@ type TemplateData struct { // attr_accessor :env } -// Build a map(map[string]interface{}) from a list of paramerter -// The format of passed in parmeters are key1, value1, key2, value2 ... -func wrapMultipleParams(params ...interface{}) (map[string]interface{}, error) { - if len(params)%2 != 0 { - return nil, errors.New("invalid number of arguments") - } - m := make(map[string]interface{}, len(params)/2) - for i := 0; i < len(params); i += 2 { - key, ok := params[i].(string) - if !ok { - return nil, errors.New("keys must be strings") - } - m[key] = params[i+1] - } - return m, nil -} - -// subtract returns the difference between a and b -// and used in Go templates -func subtract(a, b int) int { - return a - b -} - -var TemplateFunctions = template.FuncMap{ - "title": google.SpaceSeparatedTitle, - "replace": strings.Replace, - "replaceAll": strings.ReplaceAll, - "camelize": google.Camelize, - "underscore": google.Underscore, - "plural": google.Plural, - "contains": strings.Contains, - "join": strings.Join, - "lower": strings.ToLower, - "upper": strings.ToUpper, - "dict": wrapMultipleParams, - "format2regex": google.Format2Regex, - "orderProperties": api.OrderProperties, - "hasPrefix": strings.HasPrefix, - "sub": subtract, - "formatDocDescription": api.FormatDocDescription, -} - var GA_VERSION = "ga" var BETA_VERSION = "beta" var ALPHA_VERSION = "alpha" @@ -121,6 +77,7 @@ func (td *TemplateData) GenerateResourceFile(filePath string, resource api.Resou "templates/terraform/flatten_property_method.go.tmpl", "templates/terraform/expand_property_method.go.tmpl", "templates/terraform/update_mask.go.tmpl", + "templates/terraform/nested_query.go.tmpl", } td.GenerateFile(filePath, templatePath, resource, true, templates...) } @@ -217,7 +174,7 @@ func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, g templateFileName := filepath.Base(templatePath) - tmpl, err := template.New(templateFileName).Funcs(TemplateFunctions).ParseFiles(templates...) + tmpl, err := template.New(templateFileName).Funcs(google.TemplateFunctions).ParseFiles(templates...) if err != nil { glog.Exit(err) } diff --git a/mmv1/templates/terraform/nested_query.go.tmpl b/mmv1/templates/terraform/nested_query.go.tmpl new file mode 100644 index 000000000000..6a05bb981b8a --- /dev/null +++ b/mmv1/templates/terraform/nested_query.go.tmpl @@ -0,0 +1,287 @@ +{{- define "NestedQuery" }} +func flattenNested{{ $.ResourceName }}(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + +{{ range $i, $k := $.NestedQuery.Keys }} +{{- if ne $i (sub (len $.NestedQuery.Keys) 1) }} + v, ok = res["{{ $k }}"] + if !ok || v == nil { + return nil, nil + } + res = v.(map[string]interface{}) + +{{- end }} +{{- end }} + v, ok = res["{{ $.LastNestedQueryKey }}"] + if !ok || v == nil { + return nil,nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value {{ join $.NestedQuery.Keys "." }}. Actual value: %v", v) + } + + _, item, err := resource{{ $.ResourceName }}FindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resource{{ $.ResourceName }}FindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + {{- range $idProp := $.GetIdentity }} + {{- if $.IsSettableProperty $idProp }} + expected{{ $idProp.TitlelizeProperty }}, err := expandNested{{ $.ResourceName }}{{ $idProp.TitlelizeProperty }}(d.Get("{{ underscore $idProp.Name }}"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + {{- else }} + expected{{ $idProp.TitlelizeProperty }} := d.Get("{{ underscore $idProp.Name }}") + {{- end }}{{/* if $.IsSettableProperty $idProp */}} + expectedFlattened{{ $idProp.TitlelizeProperty }} := flattenNested{{ $.ResourceName }}{{ $idProp.TitlelizeProperty }}(expected{{ $idProp.TitlelizeProperty }}, d, meta.(*transport_tpg.Config)) + {{- end }}{{/* range $idProp := $.GetIdentity */}} + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + {{- if $.NestedQuery.IsListOfIds }} + // List response only contains the ID - construct a response object. + item := map[string]interface{}{ + "{{ $.FirstIdentity.ApiName }}": itemRaw, + } + {{- else }} + item := itemRaw.(map[string]interface{}) + {{- end }} + {{ if $.CustomCode.Decoder }} + // Decode list item before comparing. + item, err := resource{{ $.ResourceName }}Decoder(d, meta, item) + if err != nil { + return -1, nil, err + } + {{- end }} + {{ range $prop := $.GetIdentity }} + item{{ $prop.TitlelizeProperty }} := flattenNested{{ $.ResourceName }}{{ $prop.TitlelizeProperty }}(item["{{ $prop.ApiName }}"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(item{{ $prop.TitlelizeProperty }})) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattened{{ $prop.TitlelizeProperty }}))) && !reflect.DeepEqual(item{{ $prop.TitlelizeProperty }}, expectedFlattened{{ $prop.TitlelizeProperty }}) { + log.Printf("[DEBUG] Skipping item with {{ $prop.ApiName }}= %#v, looking for %#v)", item{{ $prop.TitlelizeProperty }}, expectedFlattened{{ $prop.TitlelizeProperty }}) + continue + } + {{- end }} + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} +{{- if $.NestedQuery.ModifyByPatch }} +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resource{{ $.ResourceName }}PatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resource{{ $.ResourceName }}ListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resource{{ $.ResourceName }}FindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create {{ $.Name }}, existing object already found: %+v", found) + } + + // Return list with the resource to create appended + {{- if $.NestedQuery.IsListOfIds }} + res := map[string]interface{}{ + "{{ $.LastNestedQueryKey }}": append(currItems, obj["{{ $.FirstIdentity.ApiName }}"]), + } + {{- else }} + res := map[string]interface{}{ + "{{ $.LastNestedQueryKey }}": append(currItems, obj), + } +{{- end }} + {{/* + Reconstruct the full nested object. For example, if nested_query.keys is: + - nested_item + - more_nested_item + Then the code above will build: + { + "more_nested_item": [...] + } + Add back the other keys so we get: + { + "nested_item": { + "more_nested_item": [...] + } + } + Note that this assumes that we can safely have "more_nested_item" be the only element + in the "nested_item" map, which only works if the patch request takes an update mask + (or if the rest of the map would have been empty anyway). + */}} +{{- range $i, $k := $.NestedQuery.Keys }} + {{- if ne $i 0 }} + wrapped := map[string]interface{}{ + "{{ index $.NestedQuery.Keys (sub (sub (len $.NestedQuery.Keys) $i) 1) }}": res, + } + res = wrapped + {{- end }} +{{- end }} + + return res, nil +} + +{{- if $.Updatable }} +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resource{{ $.ResourceName }}PatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + items, err := resource{{ $.ResourceName }}ListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resource{{ $.ResourceName }}FindNestedObjectInList(d, meta, items) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update {{ $.Name }} %q - not found in list", d.Id()) + } + + // Merge new object into old. + for k, v := range obj { + item[k] = v + } + items[idx] = item + + // Return list with new item added + res := map[string]interface{}{ + "{{ $.LastNestedQueryKey }}": items, + } + {{/* see comments in PatchCreateEncoder for details */}} +{{- range $i, $k := $.NestedQuery.Keys }} + {{- if ne $i 0 }} + wrapped := map[string]interface{}{ + "{{ $k }}": res, + } + res = wrapped + {{- end }} +{{- end }} + + return res, nil +} +{{- end }} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resource{{ $.ResourceName }}PatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, err := resource{{ $.ResourceName }}ListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resource{{ $.ResourceName }}FindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "{{ $.ResourceName }}") + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "{{ $.LastNestedQueryKey }}": updatedItems, + } + {{/* see comments in PatchCreateEncoder for details */}} + {{- range $i, $k := $.NestedQuery.Keys }} + {{- if ne $i 0 }} + wrapped := map[string]interface{}{ + "{{ $k }}": res, + } + res = wrapped + {{- end }} +{{- end }} + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +{{/* This function is similar to flattenNested...() but + # 1) does an API request to read the parent resource from API (flatten + takes in list from top-level Read() method, whereas this method + is called in Create/Update/Delete) + # 2) returns the full list of other resources, rather than just the + # matching resource +*/}} +func resource{{ $.ResourceName }}ListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}{{"}}$.SelfLinkUri{{"}}"}}") + if err != nil { + return nil, err + } + {{- if $.HasProject }} + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + {{- end }} + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "{{ $.ReadVerb }}", + {{- if $.HasProject }} + Project: project, + {{- end }} + RawURL: url, + UserAgent: userAgent, + {{- if $.ErrorRetryPredicates }} + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorRetryPredicates "," -}} }, + {{- end }} + {{- if $.ErrorAbortPredicates }} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{ {{- join $.ErrorAbortPredicates "," -}} }, + {{- end }} + }) + if err != nil { + return nil, err + } + + var v interface{} + var ok bool +{{- range $k := $.NestedQuery.Keys }} + if v, ok = res["{{ $k }}"]; ok && v != nil { + res = v.(map[string]interface{}) + } else { + return nil, nil + } +{{- end }} + + v, ok = res["{{ $.LastNestedQueryKey }}"] + if ok && v != nil { + ls, lsOk := v.([]interface{}) + if !lsOk { + return nil, fmt.Errorf(`expected list for nested field "{{ $.LastNestedQueryKey }}"`) + } + return ls, nil + } + return nil, nil +} +{{- end }}{{/* if $.NestedQuery.ModifyByPatch */}} +{{- end }} \ No newline at end of file diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index 60dc8df96638..70275d948917 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -21,7 +21,7 @@ (Deprecated) {{- end}} {{- end }} - {{ formatDocDescription $.Description -}} + {{ $.ResourceMetadata.FormatDocDescription $.Description -}} {{- if and (and ($.IsA "Array") ($.ItemType.IsA "Enum")) (and (not $.Output) (not $.ItemType.SkipDocsValues))}} {{- if $.ItemType.DefaultValue }} Default value is `{{ $.ItemType.DefaultValue }}`. diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 54a9854728c0..6d8ca5368bfa 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -132,7 +132,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- end}} Schema: map[string]*schema.Schema{ - {{- range $prop := orderProperties $.AllUserProperties }} + {{- range $prop := $.OrderProperties $.AllUserProperties }} {{template "SchemaFields" $prop -}} {{- end }} {{- if $.VirtualFields -}} @@ -171,7 +171,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { } {{- range $prop := $.AllUserProperties }} -{{if and (eq $prop.Type "Array") ($prop.IsSet) (eq $prop.ItemType "NestedObject")}} +{{if and (eq $prop.Type "Array") ($prop.IsSet) (eq $prop.ItemType.Type "NestedObject")}} {{template "SchemaSubResource" $prop}} {{end}} {{- end}} @@ -282,7 +282,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} -{{if $.ErrorAbortPredicates -}} +{{- if $.ErrorAbortPredicates -}} ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, {{- end}} }) @@ -336,7 +336,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error decoding response from operation: %s", err) } if opRes == nil { - return fmt.Errorf("Error decoding response from operation, could not find $") + return fmt.Errorf("Error decoding response from operation, could not find object") } {{- end}} @@ -345,11 +345,11 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ if _, ok := opRes["{{ index $.NestedQuery.Keys 0 -}}"]; ok { opRes, err = flattenNested{{ $.ResourceName -}}(d, meta, opRes) if err != nil { - return fmt.Errorf("Error getting nested $ from operation response: %s", err) + return fmt.Errorf("Error getting nested object from operation response: %s", err) } if opRes == nil { // Object isn't there any more - remove it from the state. - return fmt.Errorf("Error decoding response from operation, could not find nested $") + return fmt.Errorf("Error decoding response from operation, could not find nested object") } } {{- end}} @@ -371,7 +371,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{ else -}} err = {{ $.ClientNamePascal -}}OperationWaitTime( - config, res, {{if or $.Project $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Creating {{ $.Name -}}", userAgent, + config, res, {{if or $.HasProject $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Creating {{ $.Name -}}", userAgent, d.Timeout(schema.TimeoutCreate)) if err != nil { @@ -467,8 +467,8 @@ func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interfac {{if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} -{{if $.ErrorAbortPredicates -}} - ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, +{{- if $.ErrorAbortPredicates -}} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, {{- end}} }) if err != nil { @@ -554,8 +554,8 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} -{{if $.ErrorAbortPredicates -}} - ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, +{{- if $.ErrorAbortPredicates -}} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, {{- end}} }) if err != nil { @@ -574,7 +574,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) if res == nil { // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing {{ $.ResourceName -}} because it couldn't be matched.") + log.Printf("[DEBUG] Removing {{ $.ResourceName }} because it couldn't be matched.") d.SetId("") return nil } @@ -588,8 +588,8 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) } if res == nil { - // Decoding the $ has resulted in it being gone. It may be marked deleted - log.Printf("[DEBUG] Removing {{ $.ResourceName -}} because it no longer exists.") + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing {{ $.ResourceName }} because it no longer exists.") d.SetId("") return nil } @@ -785,7 +785,7 @@ if len(updateMask) > 0 { ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} {{- if $.ErrorAbortPredicates -}} - ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, {{- end}} }) @@ -862,8 +862,8 @@ if len(updateMask) > 0 { {{if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} -{{if $.ErrorAbortPredicates -}} - ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, +{{- if $.ErrorAbortPredicates -}} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, {{- end}} }) if err != nil { @@ -954,8 +954,8 @@ if len(updateMask) > 0 { {{if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} -{{if $.ErrorAbortPredicates -}} - ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, +{{- if $.ErrorAbortPredicates -}} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, {{- end}} Headers: headers, }) @@ -1187,15 +1187,17 @@ func resource{{ $.ResourceName -}}UpdateEncoder(d *schema.ResourceData, meta int {{- $.CustomTemplate $.CustomCode.UpdateEncoder true -}} } {{- end }} +{{- if $.NestedQuery }} + {{ template "NestedQuery" $ }} +{{- end }} {{- if $.CustomCode.Decoder }} -func resource{{ $.ResourceName -}}Decoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{ $.CustomTemplate $.CustomCode.Decoder true -}} +func resource{{ $.ResourceName -}}Decoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + {{- $.CustomTemplate $.CustomCode.Decoder true -}} } {{- end }} {{- if $.CustomCode.PostCreateFailure }} func resource{{ $.ResourceName -}}PostCreateFailure(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{ $.CustomTemplate $.CustomCode.PostCreateFailure true -}} + {{- $.CustomTemplate $.CustomCode.PostCreateFailure true -}} } {{- end }} -{{/* TODO nested query */}} {{/* TODO state upgraders */}} diff --git a/mmv1/templates/terraform/resource.html.markdown.tmpl b/mmv1/templates/terraform/resource.html.markdown.tmpl index ab8c9a1fe58b..d37f88b74341 100644 --- a/mmv1/templates/terraform/resource.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource.html.markdown.tmpl @@ -27,7 +27,7 @@ # ---------------------------------------------------------------------------- subcategory: "{{$.ProductMetadata.DisplayName}}" description: |- - {{ formatDocDescription $.Description }} + {{ $.FormatDocDescription $.Description }} --- # {{$.TerraformName}} diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 8d4dc2537ac1..7673032aded5 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -14,7 +14,7 @@ */}} {{- define "SchemaFields"}} {{- if .FlattenObject -}} - {{- range $prop := orderProperties .Properties -}} + {{- range $prop := .ResourceMetadata.OrderProperties .Properties -}} {{- template "SchemaFields" $prop -}} {{- end -}} {{- else -}} @@ -79,7 +79,7 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] {{ end -}} Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- range $prop := orderProperties $.Properties }} + {{- range $prop := .ResourceMetadata.OrderProperties $.Properties }} {{template "SchemaFields" $prop}} {{- end }} }, @@ -97,7 +97,7 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] {{ else -}} Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- range $prop := orderProperties $.ItemType.Properties }} + {{- range $prop := .ResourceMetadata.OrderProperties $.ItemType.Properties }} {{template "SchemaFields" $prop}} {{- end }} }, @@ -123,7 +123,7 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] {{ if .IsSet -}} {{ if .SetHashFunc -}} Set: {{ .SetHashFunc -}}, - {{ else if or (eq .ItemType "String") (eq .ItemType.Type "Enum") -}} + {{ else if or (eq .ItemType.Type "String") (eq .ItemType.Type "Enum") -}} Set: schema.HashString, {{ else -}} // Default schema.HashSchema is used. @@ -144,7 +144,7 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] ForceNew: true, {{ end -}} }, - {{- range $prop := orderProperties $.ValueType.Properties }} + {{- range $prop := .ResourceMetadata.OrderProperties $.ValueType.Properties }} {{template "SchemaFields" $prop}} {{- end }} }, From e1f988ee30459540ae6abcc966b21313f0901d0d Mon Sep 17 00:00:00 2001 From: Jesse DeJong Date: Mon, 10 Jun 2024 16:14:16 -0400 Subject: [PATCH 109/356] Add Managed Kafka Topic resource and tests. (#10774) --- mmv1/products/managedkafka/Cluster.yaml | 2 +- mmv1/products/managedkafka/Topic.yaml | 77 +++++++++++ .../examples/managedkafka_topic_basic.tf.erb | 34 +++++ .../resource_managed_kafka_topic_test.go.erb | 125 ++++++++++++++++++ 4 files changed, 237 insertions(+), 1 deletion(-) create mode 100644 mmv1/products/managedkafka/Topic.yaml create mode 100644 mmv1/templates/terraform/examples/managedkafka_topic_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_topic_test.go.erb diff --git a/mmv1/products/managedkafka/Cluster.yaml b/mmv1/products/managedkafka/Cluster.yaml index f193cd96c7c1..fb711f9bd9ac 100644 --- a/mmv1/products/managedkafka/Cluster.yaml +++ b/mmv1/products/managedkafka/Cluster.yaml @@ -19,7 +19,7 @@ id_format: projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}} import_format: - projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}} name: Cluster -description: An Apache Kafka for BigQuery cluster. +description: An Apache Kafka for BigQuery cluster. Apache Kafka is a trademark owned by the Apache Software Foundation. min_version: beta update_verb: :PATCH update_mask: true diff --git a/mmv1/products/managedkafka/Topic.yaml b/mmv1/products/managedkafka/Topic.yaml new file mode 100644 index 000000000000..395d221c2038 --- /dev/null +++ b/mmv1/products/managedkafka/Topic.yaml @@ -0,0 +1,77 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +base_url: projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics +create_url: projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics?topicId={{topic_id}} +self_link: projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}} +id_format: projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}} +import_format: + - projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/topics/{{topic_id}} +name: Topic +description: An Apache Kafka for BigQuery topic. Apache Kafka is a trademark owned by the Apache Software Foundation. +min_version: beta +update_verb: :PATCH +update_mask: true +examples: + - !ruby/object:Provider::Terraform::Examples + name: "managedkafka_topic_basic" + primary_resource_id: "example" + min_version: beta + vars: + cluster_id: "my-cluster" + topic_id: "my-topic" +properties: + - !ruby/object:Api::Type::String + name: name + description: "The name of the topic. The `topic` segment is used when + connecting directly to the cluster. Must be in the format `projects/PROJECT_ID/locations/LOCATION/clusters/CLUSTER_ID/topics/TOPIC_ID`." + output: true + - !ruby/object:Api::Type::Integer + name: partitionCount + description: "The number of partitions in a topic. You can increase the partition + count for a topic, but you cannot decrease it. Increasing partitions + for a topic that uses a key might change how messages are distributed." + - !ruby/object:Api::Type::Integer + name: replicationFactor + description: "The number of replicas of each partition. A replication factor of 3 is + recommended for high availability." + required: true + immutable: true + - !ruby/object:Api::Type::KeyValuePairs + name: configs + description: "Configuration for the topic that are overridden from the cluster + defaults. The key of the map is a Kafka topic property name, for + example: `cleanup.policy=compact`, `compression.type=producer`." +parameters: + - !ruby/object:Api::Type::String + name: location + description: "ID of the location of the Apache Kafka for BigQuery resource. See + https://cloud.google.com/managed-kafka/docs/locations for a list of + supported locations." + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: cluster + description: "The cluster name." + url_param_only: true + required: true + immutable: true + - !ruby/object:Api::Type::String + name: topicId + description: "The ID to use for the topic, which will become the final + component of the topic's name. This value is structured like: `my-topic-name`." + url_param_only: true + required: true + immutable: true diff --git a/mmv1/templates/terraform/examples/managedkafka_topic_basic.tf.erb b/mmv1/templates/terraform/examples/managedkafka_topic_basic.tf.erb new file mode 100644 index 000000000000..a0869e1e50c4 --- /dev/null +++ b/mmv1/templates/terraform/examples/managedkafka_topic_basic.tf.erb @@ -0,0 +1,34 @@ +resource "google_managed_kafka_cluster" "cluster" { + cluster_id = "<%= ctx[:vars]['cluster_id'] %>" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + + provider = google-beta +} + +resource "google_managed_kafka_topic" "<%= ctx[:primary_resource_id] %>" { + topic_id = "<%= ctx[:vars]['topic_id'] %>" + cluster = google_managed_kafka_cluster.cluster.cluster_id + location = "us-central1" + partition_count = 2 + replication_factor = 3 + configs = { + "cleanup.policy" = "compact" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} diff --git a/mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_topic_test.go.erb b/mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_topic_test.go.erb new file mode 100644 index 000000000000..015e5e177c45 --- /dev/null +++ b/mmv1/third_party/terraform/services/managedkafka/resource_managed_kafka_topic_test.go.erb @@ -0,0 +1,125 @@ +<% autogen_exception -%> +package managedkafka_test +<% unless version == 'ga' -%> + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccManagedKafkaTopic_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckManagedKafkaTopicDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccManagedKafkaTopic_basic(context), + }, + { + ResourceName: "google_managed_kafka_topic.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "location", "topic_id"}, + }, + { + Config: testAccManagedKafkaTopic_update(context), + }, + { + ResourceName: "google_managed_kafka_topic.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "location", "topic_id"}, + }, + }, + }) +} + +func testAccManagedKafkaTopic_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + + provider = google-beta +} + +resource "google_managed_kafka_topic" "example" { + cluster = google_managed_kafka_cluster.example.cluster_id + topic_id = "tf-test-my-topic%{random_suffix}" + location = "us-central1" + partition_count = 2 + replication_factor = 3 + configs = { + "cleanup.policy" = "compact" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} + +func testAccManagedKafkaTopic_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + + provider = google-beta +} + +resource "google_managed_kafka_topic" "example" { + cluster = google_managed_kafka_cluster.example.cluster_id + topic_id = "tf-test-my-topic%{random_suffix}" + location = "us-central1" + partition_count = 3 + replication_factor = 3 + configs = { + "cleanup.policy" = "compact" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} +<% else %> +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +<% end -%> From f5f85cada95a58e57c75f439a34d3a259c299de8 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 10 Jun 2024 13:38:01 -0700 Subject: [PATCH 110/356] Add handwritten lookerinstance sweeper (#10918) --- mmv1/products/looker/Instance.yaml | 5 +- ...ooker_instance_enterprise_full_test.tf.erb | 2 + .../resource_looker_instance_sweeper.go | 122 ++++++++++++++++++ 3 files changed, 127 insertions(+), 2 deletions(-) create mode 100644 mmv1/third_party/terraform/services/looker/resource_looker_instance_sweeper.go diff --git a/mmv1/products/looker/Instance.yaml b/mmv1/products/looker/Instance.yaml index b2f5f09d47cd..8893fbd4e518 100644 --- a/mmv1/products/looker/Instance.yaml +++ b/mmv1/products/looker/Instance.yaml @@ -30,6 +30,7 @@ timeouts: !ruby/object:Api::Timeouts delete_minutes: 90 autogen_async: true error_abort_predicates: ['transport_tpg.Is429QuotaError'] +skip_sweeper: true examples: - !ruby/object:Provider::Terraform::Examples name: 'looker_instance_basic' @@ -69,9 +70,9 @@ examples: client_id: 'my-client-id' client_secret: 'my-client-secret' test_vars_overrides: - address_name: 'acctest.BootstrapSharedTestGlobalAddress(t, "looker-vpc-network-2")' + address_name: 'acctest.BootstrapSharedTestGlobalAddress(t, "looker-vpc-network-3", acctest.AddressWithPrefixLength(8))' kms_key_name: 'acctest.BootstrapKMSKeyInLocation(t, "us-central1").CryptoKey.Name' - network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "looker-vpc-network-2")' + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "looker-vpc-network-3", acctest.ServiceNetworkWithPrefixLength(8))' skip_docs: true - !ruby/object:Provider::Terraform::Examples name: 'looker_instance_custom_domain' diff --git a/mmv1/templates/terraform/examples/looker_instance_enterprise_full_test.tf.erb b/mmv1/templates/terraform/examples/looker_instance_enterprise_full_test.tf.erb index 20fade6649cc..dd697238cbce 100644 --- a/mmv1/templates/terraform/examples/looker_instance_enterprise_full_test.tf.erb +++ b/mmv1/templates/terraform/examples/looker_instance_enterprise_full_test.tf.erb @@ -43,6 +43,8 @@ resource "google_looker_instance" "<%= ctx[:primary_resource_id] %>" { client_id = "<%= ctx[:vars]["client_id"] %>" client_secret = "<%= ctx[:vars]["client_secret"] %>" } + + depends_on = [google_kms_crypto_key_iam_member.crypto_key] } data "google_compute_global_address" "looker_range" { diff --git a/mmv1/third_party/terraform/services/looker/resource_looker_instance_sweeper.go b/mmv1/third_party/terraform/services/looker/resource_looker_instance_sweeper.go new file mode 100644 index 000000000000..0d33378d9571 --- /dev/null +++ b/mmv1/third_party/terraform/services/looker/resource_looker_instance_sweeper.go @@ -0,0 +1,122 @@ +package looker + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("LookerInstance", testSweepLookerInstance) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepLookerInstance(region string) error { + resourceName := "LookerInstance" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://looker.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["instances"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://looker.googleapis.com/v1/projects/{{project}}/locations/{{region}}/instances/{{name}}?force=true" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} From ff66f15fe1692229549d6431ac094ceee8163d41 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Mon, 10 Jun 2024 14:46:51 -0700 Subject: [PATCH 111/356] Fix downstream changelog generation (#10890) Co-authored-by: Stephen Lewis (Burrows) --- .ci/magician/cmd/generate_downstream.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.ci/magician/cmd/generate_downstream.go b/.ci/magician/cmd/generate_downstream.go index 0279bcd1335c..0cdc8526c682 100644 --- a/.ci/magician/cmd/generate_downstream.go +++ b/.ci/magician/cmd/generate_downstream.go @@ -132,7 +132,7 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G return fmt.Errorf("error getting pull request: %w", err) } if repo == "terraform" { - if err := addChangelogEntry(pullRequest, rnr); err != nil { + if err := addChangelogEntry(scratchRepo, pullRequest, rnr); err != nil { return fmt.Errorf("error adding changelog entry: %w", err) } } @@ -319,12 +319,15 @@ func createCommit(scratchRepo *source.Repo, commitMessage string, rnr ExecRunner return commitSha, err } -func addChangelogEntry(pullRequest *github.PullRequest, rnr ExecRunner) error { +func addChangelogEntry(downstreamRepo *source.Repo, pullRequest *github.PullRequest, rnr ExecRunner) error { + if err := rnr.PushDir(downstreamRepo.Path); err != nil { + return err + } rnr.Mkdir(".changelog") if err := rnr.WriteFile(filepath.Join(".changelog", fmt.Sprintf("%d.txt", pullRequest.Number)), strings.Join(changelogExp.FindAllString(pullRequest.Body, -1), "\n")); err != nil { return err } - return nil + return rnr.PopDir() } func mergePullRequest(downstreamRepo, scratchRepo *source.Repo, scratchRepoSha string, pullRequest *github.PullRequest, rnr ExecRunner, gh GithubClient) error { From ea792f3b377bf3e14ab01a271697da1be9e65f67 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Mon, 10 Jun 2024 15:25:42 -0700 Subject: [PATCH 112/356] Added MMv1 resource reference page (#10920) --- docs/content/develop/custom-code.md | 2 +- docs/content/develop/resource-reference.md | 318 ++++++++++++++++++ docs/content/develop/resource.md | 50 +-- .../content/reference/iam-policy-reference.md | 6 - docs/content/reference/resource-reference.md | 6 - 5 files changed, 322 insertions(+), 60 deletions(-) create mode 100644 docs/content/develop/resource-reference.md delete mode 100644 docs/content/reference/iam-policy-reference.md delete mode 100644 docs/content/reference/resource-reference.md diff --git a/docs/content/develop/custom-code.md b/docs/content/develop/custom-code.md index 4de4ca983a77..a6d608efa0d1 100644 --- a/docs/content/develop/custom-code.md +++ b/docs/content/develop/custom-code.md @@ -1,6 +1,6 @@ --- title: "Add custom resource code" -weight: 32 +weight: 39 --- # Add custom resource code diff --git a/docs/content/develop/resource-reference.md b/docs/content/develop/resource-reference.md new file mode 100644 index 000000000000..c4255242efbc --- /dev/null +++ b/docs/content/develop/resource-reference.md @@ -0,0 +1,318 @@ +--- +title: "MMv1 resource reference" +weight: 32 +aliases: + - /reference/resource-reference + - /reference/iam-policy-reference +--- + +# MMv1 resource reference + +This page documents commonly-used properties for resources. For a full list of +available properties, see [resource.rb ↗](https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/api/resource.rb). + +## Basic + +### `name` + +API resource name. + +### `description` + +Resource description. Used in documentation. + +Example: + +```yaml +description: | + This is a multi-line description + of a resource. +``` + +### `references` + +Links to reference documentation for a resource. Contains two attributes: + +- `guides`: Link to quickstart in the API's Guides section +- `api`: Link to the REST API reference for the resource + +Example: + +```yaml +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Create and connect to a database': 'https://cloud.google.com/alloydb/docs/quickstart/create-and-connect' + api: 'https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.backups' +``` + +### `min_version: beta` +Marks the field (and any subfields) as beta-only. Ensure a beta version block +is present in provider.yaml. + +### `docs` +Inserts styled markdown into the header of the resource's page in the provider +documentation. Can contain two attributes: + +- `warning`: Warning text which will be displayed at the top of the resource docs on a yellow background. +- `note`: Note text which will be displayed at the top of the resource docs on a blue background. + +Example: + +```yaml +docs: !ruby/object:Provider::Terraform::Docs + warning: | + This is a multi-line warning and will be + displayed on a yellow background. + note: | + This is a multi-line note and will be + displayed on a blue background. +``` + + +## API interactions + +### `base_url` + +URL for the resource's [standard List method](https://google.aip.dev/132). +Terraform field names enclosed in double curly braces are replaced with +the field values from the resource at runtime. + +```yaml +base_url: 'projects/{{project}}/locations/{{location}}/resourcenames' +``` + +### `self_link` + +URL for the resource's [standard Get method](https://google.aip.dev/131). +Terraform field names enclosed in double curly braces are replaced with +the field values from the resource at runtime. + +```yaml +self_link: 'projects/{{project}}/locations/{{location}}/resourcenames/{{name}}' +``` + +### `immutable` + +If true, the resource and all its fields are considered immutable - that is, +only creatable, not updatable. Individual fields can override this if they +have a custom update method in the API. + +See [Best practices: ForceNew](https://googlecloudplatform.github.io/magic-modules/best-practices/#forcenew) for more information. + +Default: `false` + +Example: + +```yaml +immutable: true +``` + +### `timeouts` + +Overrides one or more timeouts, in minutes. All timeouts default to 20. + +Example: + +```yaml +timeouts: !ruby/object:Api::Timeouts + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 40 +``` + +### `create_url` + +URL for the resource's [standard Create method](https://google.aip.dev/133), including query parameters. +Terraform field names enclosed in double curly braces are replaced with +the field values from the resource at runtime. + +Example: + +```yaml +create_url: 'projects/{{project}}/locations/{{location}}/resourcenames?resourceId={{name}}' +``` + +### `create_verb` + +Overrides the HTTP verb used to create a new resource. +Allowed values: `:POST`, `:PUT`, `:PATCH`. + +Default: `:POST` + +```yaml +create_verb: :PATCH +``` + +### `update_url` +Overrides the URL for the resource's [standard Update method](https://google.aip.dev/134). +If unset, the [`self_link` URL](#self_link) is used by default. +Terraform field names enclosed in double curly braces are replaced with +the field values from the resource at runtime. + +```yaml +update_url: 'projects/{{project}}/locations/{{location}}/resourcenames/{{name}}' +``` + +### `update_verb` + +The HTTP verb used to update a resource. Allowed values: `:POST`, `:PUT`, `:PATCH`. + +Default: `:PUT`. + +Example: + +```yaml +update_verb: :PATCH +``` + +### `update_mask` + +If true, the resource sets an `updateMask` query parameter listing modified +fields when updating the resource. If false, it doesn't. + +Default: `false` + +Example: + +```yaml +update_mask: true +``` + +### `delete_url` + +Overrides the URL for the resource's [standard Delete method](https://google.aip.dev/135). +If unset, the [`self_link` URL](#self_link) is used by default. +Terraform field names enclosed in double curly braces are replaced with +the field values from the resource at runtime. + +Example: + +```yaml +delete_url: 'projects/{{project}}/locations/{{location}}/resourcenames/{{name}}' +``` + +### `delete_verb` +Overrides the HTTP verb used to delete a resource. +Allowed values: `:POST`, `:PUT`, `:PATCH`, `:DELETE`. + +Default: `:DELETE` + +Example: + +```yaml +delete_verb: :POST +``` + +### `autogen_async` + +If true, code for handling long-running operations is generated along with +the resource. If false, that code isn't generated and must be handwritten. + +Default: `false` + +```yaml +autogen_async: true +``` + +### `async` + +Sets parameters for handling operations returned by the API. Can contain several attributes: + +- `actions`: Overrides which API calls return operations. Default: `['create', 'update', 'delete']` +- `operation.base_url`: This should always be set to `'{{op_id}}'` unless you know that's wrong. +- `result.resource_inside_response`: If true, the provider sets the resource's Terraform ID after + the resource is created, taking into account values that are set by the API at create time. This + is only possible when the completed operation's JSON includes the created resource in the + "response" field. If false, the provider sets the resource's Terraform ID before the resource is + created, based only on the resource configuration. Default: `false`. + +Example: + +```yaml +async: !ruby/object:Api::OpAsync + actions: ['create', 'update', 'delete'] + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' + result: !ruby/object:Api::OpAsync::Result + resource_inside_response: true +``` + +## IAM resources + +### `iam_policy` + +Allows configuration of generated IAM resources. Supports the following common +attributes – for a full reference, see +[iam_policy.rb ↗](https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/api/resource/iam_policy.rb): + +- `parent_resource_attribute`: Name of the field on the terraform IAM resources + which references the parent resource. +- `method_name_separator`: Character preceding setIamPolicy in the full URL for + the API method. Usually `:`. +- `fetch_iam_policy_verb`: HTTP method for getIamPolicy. Usually `:POST`. + Allowed values: `:GET`, `:POST`. Default: `:GET` +- `set_iam_policy_verb`: HTTP method for getIamPolicy. Usually `:POST`. + Allowed values: :POST, :PUT. Default: :POST +- `import_format`: Must match the parent resource's `import_format` (or `self_link` if + `import_format` is unset), but with the `parent_resource_attribute` + value substituted for the final field. +- `allowed_iam_role`: Valid IAM role that can be set by generated tests. Default: `'roles/viewer'` +- `iam_conditions_request_type`: If IAM conditions are supported, set this attribute to indicate how the + conditions should be passed to the API. Allowed values: `:QUERY_PARAM`, + `:REQUEST_BODY`, `:QUERY_PARAM_NESTED`. Note: `:QUERY_PARAM_NESTED` should + only be used if the query param field contains a `.` +- `min_version: beta`: Marks IAM support as beta-only. + +Example: + +```yaml +iam_policy: !ruby/object:Api::Resource::IamPolicy + parent_resource_attribute: 'cloud_function' + method_name_separator: ':' + fetch_iam_policy_verb: :POST + import_format: [ + 'projects/{{project}}/locations/{{location}}/resourcenames/{{cloud_function}}', + '{{cloud_function}}' + ] + allowed_iam_role: 'roles/viewer' + iam_conditions_request_type: :REQUEST_BODY + min_version: beta +``` + +## Resource behavior + +### `custom_code` + +Injects arbitrary logic into a generated resource. For more information, see [Add custom resource code]({{< ref "/develop/custom-code" >}}). + +### `mutex` + +All resources (of all kinds) that share a mutex value will block rather than +executing concurrent API requests. Terraform field names enclosed in double +curly braces are replaced with the field values from the resource at runtime. + +Example: + +```yaml +mutex: alloydb/instance/{{name}} +``` + +## Fields + +### `parameters` + +Contains a list of [fields]({{< ref "/develop/field-reference" >}}). By convention, +these should be the fields that are part URL parameters such as `location` and `name`. + +### `properties` + +Contains a list of [fields]({{< ref "/develop/field-reference" >}}). By convention, +these should be fields that aren't part of the URL parameters. + +Example: + +```yaml +properties: + - !ruby/object:Api::Type::String + name: 'fieldOne' +``` diff --git a/docs/content/develop/resource.md b/docs/content/develop/resource.md index 97a96e84fad7..7eb9d67b5929 100644 --- a/docs/content/develop/resource.md +++ b/docs/content/develop/resource.md @@ -80,14 +80,6 @@ For more information about types of resources and the generation process overall # provider.yaml. # min_version: beta - # Inserts styled markdown into the header of the resource's page in the - # provider documentation. - # docs: !ruby/object:Provider::Terraform::Docs - # warning: | - # MULTILINE_WARNING_MARKDOWN - # note: | - # MULTILINE_NOTE_MARKDOWN - # URL for the resource's standard List method. https://google.aip.dev/132 # Terraform field names enclosed in double curly braces are replaced with # the field values from the resource at runtime. @@ -102,20 +94,11 @@ For more information about types of resources and the generation process overall # have a custom update method in the API. # immutable: true - # Overrides one or more timeouts, in minutes. All timeouts default to 20. - # timeouts: !ruby/object:Api::Timeouts - # insert_minutes: 20 - # update_minutes: 20 - # delete_minutes: 20 - # URL for the resource's standard Create method, including query parameters. # https://google.aip.dev/133 # Terraform field names enclosed in double curly braces are replaced with # the field values from the resource at runtime. create_url: 'projects/{{project}}/locations/{{location}}/resourcenames?resourceId={{name}}' - # Overrides the HTTP verb used to create a new resource. - # Allowed values: :POST, :PUT, :PATCH. Default: :POST - # create_verb: :POST # Overrides the URL for the resource's standard Update method. (If unset, the # self_link URL is used by default.) https://google.aip.dev/134 @@ -128,15 +111,6 @@ For more information about types of resources and the generation process overall # fields when updating the resource. If false, it does not. update_mask: true - # Overrides the URL for the resource's standard Delete method. (If unset, the - # self_link URL is used by default.) https://google.aip.dev/135 - # Terraform field names enclosed in double curly braces are replaced with - # the field values from the resource at runtime. - # delete_url: 'projects/{{project}}/locations/{{location}}/resourcenames/{{name}}' - # Overrides the HTTP verb used to delete a resource. - # Allowed values: :POST, :PUT, :PATCH, :DELETE. Default: :DELETE - # delete_verb: :DELETE - # If true, code for handling long-running operations is generated along with # the resource. If false, that code is not generated. autogen_async: true @@ -148,20 +122,6 @@ For more information about types of resources and the generation process overall operation: !ruby/object:Api::OpAsync::Operation base_url: '{{op_id}}' - # If true, the provider sets the resource's Terraform ID after the resource is created, - # taking into account values that are set by the API at create time. This is only possible - # when the completed operation's JSON includes the created resource in the "response" field. - # If false (or unset), the provider sets the resource's Terraform ID before the resource is - # created, based only on the resource configuration. - # result: !ruby/object:Api::OpAsync::Result - # resource_inside_response: true - - # All resources (of all kinds) that share a mutex value block rather than - # executing concurrent API requests. - # Terraform field names enclosed in double curly braces are replaced with - # the field values from the resource at runtime. - # mutex: RESOURCE_NAME/{{name}} - parameters: - !ruby/object:Api::Type::String name: 'location' @@ -185,7 +145,7 @@ For more information about types of resources and the generation process overall 3. Modify the template as needed to match the API resource's documented behavior. 4. Delete all remaining comments in the resource configuration (including attribute descriptions) that were copied from the above template. -> **Note:** The template includes the most commonly-used fields. For a comprehensive reference, see [ResourceName.yaml reference ↗]({{}}). +> **Note:** The template includes the most commonly-used fields. For a comprehensive reference, see [MMv1 resource reference ↗]({{}}). {{< /tab >}} {{< tab "Handwritten" >}} > **Warning:** Handwritten resources are more difficult to develop and maintain. New handwritten resources will only be accepted if implementing the resource in MMv1 would require entirely overriding two or more CRUD methods. @@ -415,10 +375,8 @@ iam_policy: !ruby/object:Api::Resource::IamPolicy # Usually `:` method_name_separator: ':' # HTTP method for getIamPolicy. Usually :POST. - # Allowed values: :GET, :POST. Default: :GET fetch_iam_policy_verb: :POST - # Overrides the HTTP method for setIamPolicy. - # Allowed values: :POST, :PUT. Default: :POST + # Overrides the HTTP method for setIamPolicy. Default: :POST # set_iam_policy_verb: :POST # Must match the parent resource's `import_format` (or `self_link` if @@ -427,8 +385,6 @@ iam_policy: !ruby/object:Api::Resource::IamPolicy import_format: [ 'projects/{{project}}/locations/{{location}}/resourcenames/{{resource_name}}' ] - # Valid IAM role that can be set by generated tests. Default: 'roles/viewer' - # allowed_iam_role: 'roles/viewer' # If IAM conditions are supported, set this attribute to indicate how the # conditions should be passed to the API. Allowed values: :QUERY_PARAM, @@ -440,7 +396,7 @@ iam_policy: !ruby/object:Api::Resource::IamPolicy # min_version: beta ``` -2. Modify the template as needed to match the API resource's documented behavior. These are the most commonly-used fields. For a comprehensive reference, see [IAM policy YAML reference ↗]({{}}). +2. Modify the template as needed to match the API resource's documented behavior. These are the most commonly-used fields. For a comprehensive reference, see [MMv1 resource reference: `iam_policy` ↗]({{}}). 3. Delete all remaining comments in the IAM configuration (including attribute descriptions) that were copied from the above template. {{< /tab >}} {{< tab "Handwritten" >}} diff --git a/docs/content/reference/iam-policy-reference.md b/docs/content/reference/iam-policy-reference.md deleted file mode 100644 index 0b16f5a993f9..000000000000 --- a/docs/content/reference/iam-policy-reference.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "IAM policy YAML reference ↗" -weight: 30 -bookHref: "https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/api/resource/iam_policy.rb" ---- -FORCE MENU RENDER \ No newline at end of file diff --git a/docs/content/reference/resource-reference.md b/docs/content/reference/resource-reference.md deleted file mode 100644 index e900ac89204d..000000000000 --- a/docs/content/reference/resource-reference.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: "Resource YAML reference ↗" -weight: 20 -bookHref: "https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/api/resource.rb" ---- -FORCE MENU RENDER \ No newline at end of file From 2a90bc404313b9f09b12daed3a4fa83019ad1d4b Mon Sep 17 00:00:00 2001 From: "Bob \"Wombat\" Hogg" Date: Tue, 11 Jun 2024 11:55:26 -0400 Subject: [PATCH 113/356] Sweep up Firestore (default) databases to prevent errant ALREADY_EXISTS errors (#10930) --- .../firestore/resource_firestore_database_sweeper.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/mmv1/third_party/terraform/services/firestore/resource_firestore_database_sweeper.go b/mmv1/third_party/terraform/services/firestore/resource_firestore_database_sweeper.go index 6f097b680a81..f1b4fdd8aeea 100644 --- a/mmv1/third_party/terraform/services/firestore/resource_firestore_database_sweeper.go +++ b/mmv1/third_party/terraform/services/firestore/resource_firestore_database_sweeper.go @@ -17,7 +17,9 @@ func init() { } // At the time of writing, the CI only passes us-central1 as the region +// But all Firestore examples use nam5, so we will force that instead func testSweepFirestoreDatabase(region string) error { + actualRegion := "nam5" resourceName := "FirestoreDatabase" log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) @@ -36,12 +38,12 @@ func testSweepFirestoreDatabase(region string) error { t := &testing.T{} billingId := envvar.GetTestBillingAccountFromEnv(t) - // Setup variables to replace in list template + // Set up variables to replace in list template d := &tpgresource.ResourceDataMock{ FieldsInSchema: map[string]interface{}{ "project": config.Project, - "region": region, - "location": region, + "region": actualRegion, + "location": actualRegion, "zone": "-", "billing_account": billingId, }, @@ -86,7 +88,7 @@ func testSweepFirestoreDatabase(region string) error { name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) // Skip resources that shouldn't be sweeped - if !sweeper.IsSweepableTestResource(name) { + if !sweeper.IsSweepableTestResource(name) && name != "(default)" { nonPrefixCount++ continue } From 524a2a827a32a2365f2dc708d8f3aef3fa7abe54 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Tue, 11 Jun 2024 09:31:58 -0700 Subject: [PATCH 114/356] Added a note that custom_code.constants funcs should be unit tested (#10943) --- docs/content/develop/custom-code.md | 5 ++++- docs/content/develop/test/test.md | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/content/develop/custom-code.md b/docs/content/develop/custom-code.md index a6d608efa0d1..b9f96b02ad47 100644 --- a/docs/content/develop/custom-code.md +++ b/docs/content/develop/custom-code.md @@ -32,9 +32,12 @@ Use `custom_code.constants` to inject top-level code in a resource file. This is - Constants - Regexes compiled at build time -- Functions, such as diff suppress functions +- Functions, such as [diff suppress functions]({{}}), + [validation functions]({{}}), + CustomizeDiff functions, and so on. - Methods +Any custom functions added should have thorough [unit tests]({{< ref "/develop/test/test#add-unit-tests" >}}). ## Modify the API request or response diff --git a/docs/content/develop/test/test.md b/docs/content/develop/test/test.md index bbec189143b6..fe638ed52903 100644 --- a/docs/content/develop/test/test.md +++ b/docs/content/develop/test/test.md @@ -225,7 +225,7 @@ An update test is a test that creates the target resource and then makes updates ## Add unit tests A unit test verifies functionality that is not related to interactions with the API, such as -[diff suppress functions]({{}})), +[diff suppress functions]({{}}), [validation functions]({{}}), CustomizeDiff functions, and so on. From d2afb5fb6815f189adfd5e167c85e2146ccfed48 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 11 Jun 2024 10:02:59 -0700 Subject: [PATCH 115/356] Skip tpuv2 vm tests in VCR (#10911) --- mmv1/products/tpuv2/Vm.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mmv1/products/tpuv2/Vm.yaml b/mmv1/products/tpuv2/Vm.yaml index a04ab4a68394..2556129d4525 100644 --- a/mmv1/products/tpuv2/Vm.yaml +++ b/mmv1/products/tpuv2/Vm.yaml @@ -55,6 +55,7 @@ examples: primary_resource_id: 'tpu' vars: vm_name: 'test-tpu' + skip_vcr: true - !ruby/object:Provider::Terraform::Examples name: 'tpu_v2_vm_full' min_version: 'beta' @@ -66,6 +67,7 @@ examples: subnet_name: 'tpu-subnet' sa_id: 'tpu-sa' disk_name: 'tpu-disk' + skip_vcr: true parameters: - !ruby/object:Api::Type::String name: 'zone' From 4982932857a2423f3c8cbd684d8dcb31110404d9 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 11 Jun 2024 13:15:59 -0500 Subject: [PATCH 116/356] go rewrite - copy script and adjustments for compute (#10929) --- mmv1/api/compiler.go | 2 +- mmv1/api/resource.rb | 6 + mmv1/api/type.rb | 4 +- mmv1/compiler.rb | 7 +- mmv1/description-copy.go | 147 +++++++++++++++ mmv1/google/yaml_validator.go | 4 +- mmv1/main.go | 10 ++ mmv1/products/compute/ForwardingRule.yaml | 12 +- mmv1/products/compute/HaVpnGateway.yaml | 167 ++++++++++++++++++ mmv1/products/compute/InstanceGroup.yaml | 20 --- mmv1/products/compute/Interconnect.yaml | 2 +- ...r.yaml => RegionInstanceGroupManager.yaml} | 0 mmv1/products/compute/TargetVpnGateway.yaml | 106 ----------- mmv1/products/compute/VpnGateway.yaml | 117 +++--------- mmv1/provider/terraform.rb | 32 ++-- mmv1/provider/terraform_kcc.rb | 6 +- mmv1/provider/terraform_oics.rb | 7 +- mmv1/provider/terraform_tgc.rb | 7 +- mmv1/provider/terraform_tgc_cai2hcl.rb | 4 +- .../terraform/product_yaml_conversion.erb | 1 + mmv1/templates/terraform/yaml_conversion.erb | 47 ++--- .../terraform/yaml_conversion_field.erb | 82 +++++++-- 22 files changed, 498 insertions(+), 292 deletions(-) create mode 100644 mmv1/description-copy.go create mode 100644 mmv1/products/compute/HaVpnGateway.yaml rename mmv1/products/compute/{RegionGroupInstanceManager.yaml => RegionInstanceGroupManager.yaml} (100%) delete mode 100644 mmv1/products/compute/TargetVpnGateway.yaml diff --git a/mmv1/api/compiler.go b/mmv1/api/compiler.go index 00a2bd136d14..b62aab20f7e3 100644 --- a/mmv1/api/compiler.go +++ b/mmv1/api/compiler.go @@ -27,5 +27,5 @@ func Compile(yamlPath string, obj interface{}) { } yamlValidator := google.YamlValidator{} - yamlValidator.Parse(objYaml, obj) + yamlValidator.Parse(objYaml, obj, yamlPath) } diff --git a/mmv1/api/resource.rb b/mmv1/api/resource.rb index b4c6eea2515d..de04aafe0e83 100644 --- a/mmv1/api/resource.rb +++ b/mmv1/api/resource.rb @@ -393,6 +393,12 @@ def all_nested_properties(props) nested end + def convert_go_file(file) + dir, base = File.split(file) + base.slice! '.erb' + "#{dir}/go/#{base}.tmpl" + end + # All settable properties in the resource. # Fingerprints aren't *really" settable properties, but they behave like one. # At Create, they have no value but they can just be read in anyways, and after a Read diff --git a/mmv1/api/type.rb b/mmv1/api/type.rb index dd66b3d8a800..9e78817fcabc 100644 --- a/mmv1/api/type.rb +++ b/mmv1/api/type.rb @@ -583,7 +583,9 @@ def nested_properties def item_type_class return @item_type \ - if @item_type.instance_of?(Class) + if @item_type.instance_of?(Class) \ + || @item_type.is_a?(Api::Type::ResourceRef) \ + || @item_type.is_a?(Api::Type::Enum) Object.const_get(@item_type) end diff --git a/mmv1/compiler.rb b/mmv1/compiler.rb index 0f61a8eb4ccc..43211deb1a8e 100755 --- a/mmv1/compiler.rb +++ b/mmv1/compiler.rb @@ -36,6 +36,7 @@ products_to_generate = nil all_products = false yaml_dump = false +go_yaml = false generate_code = true generate_docs = true output_path = nil @@ -94,6 +95,9 @@ opt.on('--openapi-generate', 'Generate MMv1 YAML from openapi directory (Experimental)') do openapi_generate = true end + opt.on('--go-yaml', 'Generate MMv1 Go YAML from Ruby YAML') do + go_yaml = true + end end.parse! # rubocop:enable Metrics/BlockLength @@ -270,7 +274,8 @@ product_name, yaml_dump, generate_code, - generate_docs + generate_docs, + go_yaml ) # we need to preserve a single provider instance to use outside of this loop. diff --git a/mmv1/description-copy.go b/mmv1/description-copy.go new file mode 100644 index 000000000000..5e16c0758e4d --- /dev/null +++ b/mmv1/description-copy.go @@ -0,0 +1,147 @@ +package main + +import ( + "bufio" + "fmt" + "log" + "os" + "path/filepath" + "regexp" + "strings" +) + +// Used to copy/paste text from Ruby -> Go YAML files +func CopyText(identifier string) { + var allProductFiles []string = make([]string, 0) + files, err := filepath.Glob("products/**/go_product.yaml") + if err != nil { + return + } + for _, filePath := range files { + dir := filepath.Dir(filePath) + allProductFiles = append(allProductFiles, fmt.Sprintf("products/%s", filepath.Base(dir))) + } + + for _, productPath := range allProductFiles { + // Gather go and ruby file pairs + yamlMap := make(map[string][]string) + yamlPaths, err := filepath.Glob(fmt.Sprintf("%s/*", productPath)) + if err != nil { + log.Fatalf("Cannot get yaml files: %v", err) + } + for _, yamlPath := range yamlPaths { + if strings.HasSuffix(yamlPath, "_new") { + continue + } + fileName := filepath.Base(yamlPath) + baseName, found := strings.CutPrefix(fileName, "go_") + if yamlMap[baseName] == nil { + yamlMap[baseName] = make([]string, 2) + } + if found { + yamlMap[baseName][1] = yamlPath + } else { + yamlMap[baseName][0] = yamlPath + } + } + + for _, files := range yamlMap { + rubyPath := files[0] + goPath := files[1] + var text []string + currText := "" + recording := false + + if strings.Contains(rubyPath, "product.yaml") { + // log.Printf("skipping %s", rubyPath) + continue + } + + // Ready Ruby yaml + file, _ := os.Open(rubyPath) + defer file.Close() + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, identifier) && !strings.HasPrefix(strings.TrimSpace(line), "#") { + currText = strings.SplitAfter(line, identifier)[1] + recording = true + } else if recording { + if terminateText(line) { + text = append(text, currText) + currText = "" + recording = false + } else { + currText = fmt.Sprintf("%s\n%s", currText, line) + } + } + } + if recording { + text = append(text, currText) + } + + // Read Go yaml while writing to a temp file + index := 0 + firstLine := true + newFilePath := fmt.Sprintf("%s_new", goPath) + fo, _ := os.Create(newFilePath) + w := bufio.NewWriter(fo) + file, _ = os.Open(goPath) + defer file.Close() + scanner = bufio.NewScanner(file) + for scanner.Scan() { + line := scanner.Text() + if firstLine { + if line != "NOT CONVERTED - RUN YAML MODE" { + // log.Printf("skipping %s", goPath) + break + } else { + firstLine = false + continue + } + } + if strings.Contains(line, identifier) { + if index >= len(text) { + log.Printf("did not replace %s correctly! Is the file named correctly?", goPath) + w.Flush() + break + } + line = fmt.Sprintf("%s%s", line, text[index]) + index += 1 + } + w.WriteString(fmt.Sprintf("%s\n", line)) + } + + if !firstLine { + if index != len(text) { + log.Printf("potential issue with %s, only completed %d index out of %d replacements", goPath, index, len(text)) + } + if err = w.Flush(); err != nil { + panic(err) + } + + // Overwrite original file with temp + os.Rename(newFilePath, goPath) + } else { + os.Remove(newFilePath) + } + } + + } + +} + +// quick and dirty logic to determine if a description/note is terminated +func terminateText(line string) bool { + terminalStrings := []string{ + "!ruby/", + } + + for _, t := range terminalStrings { + if strings.Contains(line, t) { + return true + } + } + + return regexp.MustCompile(`^\s*[a-z_]+:[\s$]*`).MatchString(line) +} diff --git a/mmv1/google/yaml_validator.go b/mmv1/google/yaml_validator.go index 4986e21e8e94..485db94d1dd0 100644 --- a/mmv1/google/yaml_validator.go +++ b/mmv1/google/yaml_validator.go @@ -22,12 +22,12 @@ import ( // A helper class to validate contents coming from YAML files. type YamlValidator struct{} -func (v *YamlValidator) Parse(content []byte, obj interface{}) { +func (v *YamlValidator) Parse(content []byte, obj interface{}, yamlPath string) { // TODO(nelsonjr): Allow specifying which symbols to restrict it further. // But it requires inspecting all configuration files for symbol sources, // such as Enum values. Leaving it as a nice-to-have for the future. if err := yaml.Unmarshal(content, obj); err != nil { - log.Fatalf("Cannot unmarshal data: %v", err) + log.Fatalf("Cannot unmarshal data from file %s: %v", yamlPath, err) } } diff --git a/mmv1/main.go b/mmv1/main.go index 59088cf3bb2d..24795b8ddaf3 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -27,8 +27,18 @@ var version = flag.String("version", "", "optional version name. If specified, t var product = flag.String("product", "", "optional product name. If specified, the resources under the specific product will be generated. Otherwise, resources under all products will be generated.") +// Example usage: --yaml +var yamlMode = flag.Bool("yaml", false, "strictly copy text over from ruby yaml to go yaml") + func main() { flag.Parse() + + if *yamlMode { + CopyText("description:") + CopyText("note:") + return + } + var generateCode = true var generateDocs = true diff --git a/mmv1/products/compute/ForwardingRule.yaml b/mmv1/products/compute/ForwardingRule.yaml index d0258cfdf70a..e2cfde7e7d5d 100644 --- a/mmv1/products/compute/ForwardingRule.yaml +++ b/mmv1/products/compute/ForwardingRule.yaml @@ -220,6 +220,12 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode custom_diff: [ 'forwardingRuleCustomizeDiff', ] +virtual_fields: + - !ruby/object:Api::Type::Boolean + name: recreate_closed_psc + description: + This is used in PSC consumer ForwardingRule to make terraform recreate the ForwardingRule when the status is closed + default_value: false parameters: - !ruby/object:Api::Type::ResourceRef name: 'region' @@ -656,9 +662,3 @@ properties: - :IPV6 immutable: true default_from_api: true -virtual_fields: - - !ruby/object:Api::Type::Boolean - name: recreate_closed_psc - description: - This is used in PSC consumer ForwardingRule to make terraform recreate the ForwardingRule when the status is closed - default_value: false diff --git a/mmv1/products/compute/HaVpnGateway.yaml b/mmv1/products/compute/HaVpnGateway.yaml new file mode 100644 index 000000000000..a0435e33e39f --- /dev/null +++ b/mmv1/products/compute/HaVpnGateway.yaml @@ -0,0 +1,167 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'HaVpnGateway' +kind: 'compute#vpnGateway' +base_url: projects/{{project}}/regions/{{region}}/vpnGateways +collection_url_key: 'items' +immutable: true +has_self_link: true +description: | + Represents a VPN gateway running in GCP. This virtual device is managed + by Google, but used only by you. This type of VPN Gateway allows for the creation + of VPN solutions with higher availability than classic Target VPN Gateways. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Choosing a VPN': https://cloud.google.com/vpn/docs/how-to/choosing-a-vpn + 'Cloud VPN Overview': 'https://cloud.google.com/vpn/docs/concepts/overview' + api: https://cloud.google.com/compute/docs/reference/rest/v1/vpnGateways +async: !ruby/object:Api::OpAsync + operation: !ruby/object:Api::OpAsync::Operation + kind: 'compute#operation' + path: 'name' + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'targetLink' + status: !ruby/object:Api::OpAsync::Status + path: 'status' + complete: 'DONE' + allowed: + - 'PENDING' + - 'RUNNING' + - 'DONE' + error: !ruby/object:Api::OpAsync::Error + path: 'error/errors' + message: 'message' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'ha_vpn_gateway_basic' + primary_resource_id: 'ha_gateway1' + vars: + ha_vpn_gateway1_name: 'ha-vpn-1' + network1_name: 'network1' + - !ruby/object:Provider::Terraform::Examples + name: 'ha_vpn_gateway_ipv6' + primary_resource_id: 'ha_gateway1' + vars: + ha_vpn_gateway1_name: 'ha-vpn-1' + network1_name: 'network1' + - !ruby/object:Provider::Terraform::Examples + name: 'ha_vpn_gateway_gcp_to_gcp' + primary_resource_id: + 'ha_gateway1' + # Multiple fine-grained resources + skip_vcr: true + skip_test: true + skip_docs: true + vars: + ha_vpn_gateway1_name: 'ha-vpn-1' + network1_name: 'network1' + router1_name: 'ha-vpn-router1' + ha_vpn_gateway2_name: 'ha-vpn-2' + network2_name: 'network2' + router2_name: 'ha-vpn-router2' + - !ruby/object:Provider::Terraform::Examples + name: 'compute_ha_vpn_gateway_encrypted_interconnect' + primary_resource_id: + 'vpn-gateway' + # TODO: https://github.com/hashicorp/terraform-provider-google/issues/11504 + skip_test: true + vars: + ha_vpn_gateway_name: 'test-ha-vpngw' + interconnect_attachment1_name: 'test-interconnect-attachment1' + interconnect_attachment2_name: 'test-interconnect-attachment2' + address1_name: 'test-address1' + address2_name: 'test-address2' + router_name: 'test-router' + network_name: 'test-network' +parameters: + - !ruby/object:Api::Type::ResourceRef + name: 'region' + resource: 'Region' + imports: 'name' + description: | + The region this gateway should sit in. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' + custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' +properties: + - !ruby/object:Api::Type::String + name: 'description' + description: 'An optional description of this resource.' + immutable: true + - !ruby/object:Api::Type::String + name: 'name' + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + validation: !ruby/object:Provider::Terraform::Validation + function: 'verify.ValidateGCEName' + - !ruby/object:Api::Type::ResourceRef + name: 'network' + resource: 'Network' + imports: 'selfLink' + description: | + The network this VPN gateway is accepting traffic for. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' + - !ruby/object:Api::Type::Enum + name: 'stackType' + description: | + The stack type for this VPN gateway to identify the IP protocols that are enabled. + If not specified, IPV4_ONLY will be used. + default_value: :IPV4_ONLY + values: + - :IPV4_ONLY + - :IPV4_IPV6 + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/default_if_empty.erb' + - !ruby/object:Api::Type::Array + name: 'vpnInterfaces' + description: | + A list of interfaces on this VPN gateway. + default_from_api: true + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Integer + name: 'id' + description: 'The numeric ID of this VPN gateway interface.' + - !ruby/object:Api::Type::String + name: 'ipAddress' + description: 'The external IP address for this VPN gateway interface.' + output: true + - !ruby/object:Api::Type::ResourceRef + name: 'interconnectAttachment' + resource: 'InterconnectAttachment' + imports: 'selfLink' + description: | + URL of the interconnect attachment resource. When the value + of this field is present, the VPN Gateway will be used for + IPsec-encrypted Cloud Interconnect; all Egress or Ingress + traffic for this VPN Gateway interface will go through the + specified interconnect attachment resource. + + Not currently available publicly. + custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' + immutable: true diff --git a/mmv1/products/compute/InstanceGroup.yaml b/mmv1/products/compute/InstanceGroup.yaml index 933fc5894f6d..e0f5cd7f1126 100644 --- a/mmv1/products/compute/InstanceGroup.yaml +++ b/mmv1/products/compute/InstanceGroup.yaml @@ -50,26 +50,6 @@ parameters: description: 'A reference to the zone where the instance group resides.' required: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - - !ruby/object:Api::Type::Array - name: 'instances' - description: | - The list of instances associated with this InstanceGroup. - All instances must be created before being added to an InstanceGroup. - All instances not in this list will be removed from the InstanceGroup - and will not be deleted. - Only the full identifier of the instance will be returned. - - !> **WARNING** If a user will be recreating instances under the same name - (eg. via `terraform taint`), please consider adding instances to an instance - group via the `instance_group_membership` resource, along side the - `replace_triggered_by` lifecycle method with an instance's ID. - exclude: true - item_type: !ruby/object:Api::Type::ResourceRef - name: 'instance' - description: 'An instance being added to the InstanceGroup' - resource: 'Instance' - imports: 'selfLink' - custom_expand: 'templates/terraform/custom_expand/array_resourceref_with_validation.go.erb' properties: - !ruby/object:Api::Type::Time name: 'creationTimestamp' diff --git a/mmv1/products/compute/Interconnect.yaml b/mmv1/products/compute/Interconnect.yaml index 4ccbbf8062d3..1a242cbe5e4b 100644 --- a/mmv1/products/compute/Interconnect.yaml +++ b/mmv1/products/compute/Interconnect.yaml @@ -146,7 +146,7 @@ properties: - !ruby/object:Api::Type::Enum name: 'operationalStatus' description: | - The current status of this Interconnect's functionality, which can take one of the following values: + The current status of this Interconnect's functionality, which can take one of the following: - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may be provisioned on this Interconnect. - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be diff --git a/mmv1/products/compute/RegionGroupInstanceManager.yaml b/mmv1/products/compute/RegionInstanceGroupManager.yaml similarity index 100% rename from mmv1/products/compute/RegionGroupInstanceManager.yaml rename to mmv1/products/compute/RegionInstanceGroupManager.yaml diff --git a/mmv1/products/compute/TargetVpnGateway.yaml b/mmv1/products/compute/TargetVpnGateway.yaml deleted file mode 100644 index 7087bb6be486..000000000000 --- a/mmv1/products/compute/TargetVpnGateway.yaml +++ /dev/null @@ -1,106 +0,0 @@ -# Copyright 2023 Google Inc. -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - ---- !ruby/object:Api::Resource -name: 'VpnGateway' -kind: 'compute#targetVpnGateway' -base_url: projects/{{project}}/regions/{{region}}/targetVpnGateways -collection_url_key: 'items' -immutable: true -has_self_link: true -description: | - Represents a VPN gateway running in GCP. This virtual device is managed - by Google, but used only by you. -references: !ruby/object:Api::Resource::ReferenceLinks - api: https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways -async: !ruby/object:Api::OpAsync - operation: !ruby/object:Api::OpAsync::Operation - kind: 'compute#operation' - path: 'name' - base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' - wait_ms: 1000 - result: !ruby/object:Api::OpAsync::Result - path: 'targetLink' - status: !ruby/object:Api::OpAsync::Status - path: 'status' - complete: 'DONE' - allowed: - - 'PENDING' - - 'RUNNING' - - 'DONE' - error: !ruby/object:Api::OpAsync::Error - path: 'error/errors' - message: 'message' -docs: !ruby/object:Provider::Terraform::Docs - warning: | - Classic VPN is deprecating certain functionality on October 31, 2021. For more information, - see the [Classic VPN partial deprecation page](https://cloud.google.com/network-connectivity/docs/vpn/deprecations/classic-vpn-deprecation). -examples: - - !ruby/object:Provider::Terraform::Examples - name: 'target_vpn_gateway_basic' - primary_resource_id: 'target_gateway' - vars: - target_vpn_gateway_name: 'vpn-1' - network_name: 'network-1' - address_name: 'vpn-static-ip' - esp_forwarding_rule_name: 'fr-esp' - udp500_forwarding_rule_name: 'fr-udp500' - udp4500_forwarding_rule_name: 'fr-udp4500' - vpn_tunnel_name: 'tunnel1' - route_name: 'route1' -parameters: - - !ruby/object:Api::Type::ResourceRef - name: 'region' - resource: 'Region' - imports: 'name' - description: | - The region this gateway should sit in. - required: false - default_from_api: true - custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' - custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' -properties: - - !ruby/object:Api::Type::Time - name: 'creationTimestamp' - description: 'Creation timestamp in RFC3339 text format.' - output: true - - !ruby/object:Api::Type::String - name: 'description' - description: 'An optional description of this resource.' - immutable: true - - !ruby/object:Api::Type::String - name: 'name' - description: | - Name of the resource. Provided by the client when the resource is - created. The name must be 1-63 characters long, and comply with - RFC1035. Specifically, the name must be 1-63 characters long and - match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means - the first character must be a lowercase letter, and all following - characters must be a dash, lowercase letter, or digit, except the last - character, which cannot be a dash. - required: true - immutable: true - - !ruby/object:Api::Type::Integer - name: 'gateway_id' - api_name: 'id' - description: 'The unique identifier for the resource.' - output: true - - !ruby/object:Api::Type::ResourceRef - name: 'network' - resource: 'Network' - imports: 'selfLink' - description: | - The network this VPN gateway is accepting traffic for. - required: true - custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - immutable: true diff --git a/mmv1/products/compute/VpnGateway.yaml b/mmv1/products/compute/VpnGateway.yaml index a0435e33e39f..7087bb6be486 100644 --- a/mmv1/products/compute/VpnGateway.yaml +++ b/mmv1/products/compute/VpnGateway.yaml @@ -12,21 +12,17 @@ # limitations under the License. --- !ruby/object:Api::Resource -name: 'HaVpnGateway' -kind: 'compute#vpnGateway' -base_url: projects/{{project}}/regions/{{region}}/vpnGateways +name: 'VpnGateway' +kind: 'compute#targetVpnGateway' +base_url: projects/{{project}}/regions/{{region}}/targetVpnGateways collection_url_key: 'items' immutable: true has_self_link: true description: | Represents a VPN gateway running in GCP. This virtual device is managed - by Google, but used only by you. This type of VPN Gateway allows for the creation - of VPN solutions with higher availability than classic Target VPN Gateways. + by Google, but used only by you. references: !ruby/object:Api::Resource::ReferenceLinks - guides: - 'Choosing a VPN': https://cloud.google.com/vpn/docs/how-to/choosing-a-vpn - 'Cloud VPN Overview': 'https://cloud.google.com/vpn/docs/concepts/overview' - api: https://cloud.google.com/compute/docs/reference/rest/v1/vpnGateways + api: https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways async: !ruby/object:Api::OpAsync operation: !ruby/object:Api::OpAsync::Operation kind: 'compute#operation' @@ -45,48 +41,23 @@ async: !ruby/object:Api::OpAsync error: !ruby/object:Api::OpAsync::Error path: 'error/errors' message: 'message' +docs: !ruby/object:Provider::Terraform::Docs + warning: | + Classic VPN is deprecating certain functionality on October 31, 2021. For more information, + see the [Classic VPN partial deprecation page](https://cloud.google.com/network-connectivity/docs/vpn/deprecations/classic-vpn-deprecation). examples: - !ruby/object:Provider::Terraform::Examples - name: 'ha_vpn_gateway_basic' - primary_resource_id: 'ha_gateway1' + name: 'target_vpn_gateway_basic' + primary_resource_id: 'target_gateway' vars: - ha_vpn_gateway1_name: 'ha-vpn-1' - network1_name: 'network1' - - !ruby/object:Provider::Terraform::Examples - name: 'ha_vpn_gateway_ipv6' - primary_resource_id: 'ha_gateway1' - vars: - ha_vpn_gateway1_name: 'ha-vpn-1' - network1_name: 'network1' - - !ruby/object:Provider::Terraform::Examples - name: 'ha_vpn_gateway_gcp_to_gcp' - primary_resource_id: - 'ha_gateway1' - # Multiple fine-grained resources - skip_vcr: true - skip_test: true - skip_docs: true - vars: - ha_vpn_gateway1_name: 'ha-vpn-1' - network1_name: 'network1' - router1_name: 'ha-vpn-router1' - ha_vpn_gateway2_name: 'ha-vpn-2' - network2_name: 'network2' - router2_name: 'ha-vpn-router2' - - !ruby/object:Provider::Terraform::Examples - name: 'compute_ha_vpn_gateway_encrypted_interconnect' - primary_resource_id: - 'vpn-gateway' - # TODO: https://github.com/hashicorp/terraform-provider-google/issues/11504 - skip_test: true - vars: - ha_vpn_gateway_name: 'test-ha-vpngw' - interconnect_attachment1_name: 'test-interconnect-attachment1' - interconnect_attachment2_name: 'test-interconnect-attachment2' - address1_name: 'test-address1' - address2_name: 'test-address2' - router_name: 'test-router' - network_name: 'test-network' + target_vpn_gateway_name: 'vpn-1' + network_name: 'network-1' + address_name: 'vpn-static-ip' + esp_forwarding_rule_name: 'fr-esp' + udp500_forwarding_rule_name: 'fr-udp500' + udp4500_forwarding_rule_name: 'fr-udp4500' + vpn_tunnel_name: 'tunnel1' + route_name: 'route1' parameters: - !ruby/object:Api::Type::ResourceRef name: 'region' @@ -99,6 +70,10 @@ parameters: custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' properties: + - !ruby/object:Api::Type::Time + name: 'creationTimestamp' + description: 'Creation timestamp in RFC3339 text format.' + output: true - !ruby/object:Api::Type::String name: 'description' description: 'An optional description of this resource.' @@ -115,8 +90,11 @@ properties: character, which cannot be a dash. required: true immutable: true - validation: !ruby/object:Provider::Terraform::Validation - function: 'verify.ValidateGCEName' + - !ruby/object:Api::Type::Integer + name: 'gateway_id' + api_name: 'id' + description: 'The unique identifier for the resource.' + output: true - !ruby/object:Api::Type::ResourceRef name: 'network' resource: 'Network' @@ -124,44 +102,5 @@ properties: description: | The network this VPN gateway is accepting traffic for. required: true - immutable: true custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - - !ruby/object:Api::Type::Enum - name: 'stackType' - description: | - The stack type for this VPN gateway to identify the IP protocols that are enabled. - If not specified, IPV4_ONLY will be used. - default_value: :IPV4_ONLY - values: - - :IPV4_ONLY - - :IPV4_IPV6 immutable: true - custom_flatten: 'templates/terraform/custom_flatten/default_if_empty.erb' - - !ruby/object:Api::Type::Array - name: 'vpnInterfaces' - description: | - A list of interfaces on this VPN gateway. - default_from_api: true - item_type: !ruby/object:Api::Type::NestedObject - properties: - - !ruby/object:Api::Type::Integer - name: 'id' - description: 'The numeric ID of this VPN gateway interface.' - - !ruby/object:Api::Type::String - name: 'ipAddress' - description: 'The external IP address for this VPN gateway interface.' - output: true - - !ruby/object:Api::Type::ResourceRef - name: 'interconnectAttachment' - resource: 'InterconnectAttachment' - imports: 'selfLink' - description: | - URL of the interconnect attachment resource. When the value - of this field is present, the VPN Gateway will be used for - IPsec-encrypted Cloud Interconnect; all Egress or Ingress - traffic for this VPN Gateway interface will go through the - specified interconnect attachment resource. - - Not currently available publicly. - custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - immutable: true diff --git a/mmv1/provider/terraform.rb b/mmv1/provider/terraform.rb index a7bc76a516af..89e7f21b099e 100644 --- a/mmv1/provider/terraform.rb +++ b/mmv1/provider/terraform.rb @@ -89,8 +89,9 @@ def check_goformat end # Main entry point for generation. - def generate(output_folder, types, product_path, dump_yaml, generate_code, generate_docs) - generate_objects(output_folder, types, generate_code, generate_docs) + def generate(output_folder, types, product_path, dump_yaml, generate_code, generate_docs, \ + go_yaml) + generate_objects(output_folder, types, generate_code, generate_docs, product_path, go_yaml) FileUtils.mkpath output_folder pwd = Dir.pwd @@ -358,7 +359,8 @@ def get_mmv1_services_in_version(products, version) services end - def generate_objects(output_folder, types, generate_code, generate_docs) + def generate_objects(output_folder, types, generate_code, generate_docs, product_path, \ + go_yaml) (@api.objects || []).each do |object| if !types.empty? && !types.include?(object.name) Google::LOGGER.info "Excluding #{object.name} per user request" @@ -378,8 +380,7 @@ def generate_objects(output_folder, types, generate_code, generate_docs) generate_object object, output_folder, @target_version_name, generate_code, generate_docs end - # Uncomment for go YAML - # generate_object_modified object, output_folder, @target_version_name + generate_object_modified object, product_path, @target_version_name if go_yaml end end @@ -411,28 +412,23 @@ def generate_object(object, output_folder, version_name, generate_code, generate def generate_object_modified(object, output_folder, version_name) pwd = Dir.pwd data = build_object_data(pwd, object, output_folder, version_name) - FileUtils.mkpath output_folder Dir.chdir output_folder - Google::LOGGER.debug "Generating #{object.name} rewrite yaml" + Google::LOGGER.info "Generating #{object.name} rewrite yaml" generate_newyaml(pwd, data.clone) Dir.chdir pwd end def generate_newyaml(pwd, data) - # @api.api_name is the service folder name - product_name = @api.api_name - target_folder = File.join(folder_name(data.version), 'services', product_name) - FileUtils.mkpath target_folder data.generate(pwd, '/templates/terraform/yaml_conversion.erb', - "#{target_folder}/go_#{data.object.name}.yaml", - self) - return if File.exist?("#{target_folder}/go_product.yaml") - - data.generate(pwd, - '/templates/terraform/product_yaml_conversion.erb', - "#{target_folder}/go_product.yaml", + "go_#{data.object.name}.yaml", self) + unless File.exist?('go_product.yaml') && File.mtime('go_product.yaml') > data.env[:start_time] + data.generate(pwd, + '/templates/terraform/product_yaml_conversion.erb', + 'go_product.yaml', + self) + end end def build_env diff --git a/mmv1/provider/terraform_kcc.rb b/mmv1/provider/terraform_kcc.rb index 682f9c5bc640..54a7c2552231 100644 --- a/mmv1/provider/terraform_kcc.rb +++ b/mmv1/provider/terraform_kcc.rb @@ -48,9 +48,11 @@ def generating_hashicorp_repo? false end - def generate(output_folder, types, _product_path, _dump_yaml, generate_code, generate_docs) + def generate(output_folder, types, product_path, _dump_yaml, generate_code, generate_docs, \ + go_yaml) @base_url = @version.base_url - generate_objects(output_folder, types, generate_code, generate_docs) + generate_objects(output_folder, types, generate_code, generate_docs, product_path, \ + go_yaml) compile_product_files(output_folder) end diff --git a/mmv1/provider/terraform_oics.rb b/mmv1/provider/terraform_oics.rb index a4e1a6c16d7d..d5d44a134176 100644 --- a/mmv1/provider/terraform_oics.rb +++ b/mmv1/provider/terraform_oics.rb @@ -24,12 +24,15 @@ def generating_hashicorp_repo? # We don't want *any* static generation, so we override generate to only # generate objects. - def generate(output_folder, types, _product_path, _dump_yaml, generate_code, generate_docs) + def generate(output_folder, types, product_path, _dump_yaml, generate_code, generate_docs, \ + go_yaml) generate_objects( output_folder, types, generate_code, - generate_docs + generate_docs, + product_path, + go_yaml ) end diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 3674fe99cd77..2ea63de82a4a 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -23,7 +23,8 @@ def generating_hashicorp_repo? false end - def generate(output_folder, types, _product_path, _dump_yaml, generate_code, generate_docs) + def generate(output_folder, types, product_path, _dump_yaml, generate_code, generate_docs, \ + go_yaml) # Temporary shim to generate the missing resources directory. Can be removed # once the folder exists downstream. resources_folder = File.join(output_folder, 'converters/google/resources') @@ -34,7 +35,9 @@ def generate(output_folder, types, _product_path, _dump_yaml, generate_code, gen output_folder, types, generate_code, - generate_docs + generate_docs, + product_path, + go_yaml ) end diff --git a/mmv1/provider/terraform_tgc_cai2hcl.rb b/mmv1/provider/terraform_tgc_cai2hcl.rb index fb5a0c8d96e0..4daed0648ec6 100644 --- a/mmv1/provider/terraform_tgc_cai2hcl.rb +++ b/mmv1/provider/terraform_tgc_cai2hcl.rb @@ -22,7 +22,9 @@ def generating_hashicorp_repo? false end - def generate(output_folder, types, _product_path, _dump_yaml, generate_code, generate_docs) end + # rubocop:disable Layout/LineLength + def generate(output_folder, types, _product_path, _dump_yaml, generate_code, generate_docs, _go_yaml) end + # rubocop:enable Layout/LineLength def generate_resource(pwd, data, _generate_code, _generate_docs) end diff --git a/mmv1/templates/terraform/product_yaml_conversion.erb b/mmv1/templates/terraform/product_yaml_conversion.erb index cade809c4334..9888415c054f 100644 --- a/mmv1/templates/terraform/product_yaml_conversion.erb +++ b/mmv1/templates/terraform/product_yaml_conversion.erb @@ -55,6 +55,7 @@ scopes: -%> <% unless object.__product.async.nil? -%> async: + type: "OpAsync" <% if object.__product.async.is_a? Provider::Terraform::PollAsync -%> <% unless object.__product.async.check_response_func_existence.nil? -%> check_response_func_existence: '<%= object.__product.async.check_response_func_existence %>' diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 63497552b8b7..2dcd831c565f 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -1,3 +1,4 @@ +NOT CONVERTED - RUN YAML MODE # Copyright 2024 Google Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,8 +24,7 @@ kind: '<%= object.kind %>' <% unless object.legacy_name.nil? -%> legacy_name: '<%= object.legacy_name %>' <% end -%> -description: | - <%= object.description.gsub(/\n/, "\n ") %> +description: <% unless object.min_version.nil? -%> <% unless object.min_version.name == 'ga' -%> min_version: '<%= object.min_version.name %>' @@ -63,7 +63,7 @@ docs: warning: '<%= object.docs.warning %>' <% end -%> <% unless object.docs.note.nil? -%> - note: '<%= object.docs.note %>' + note: <% end -%> <% unless object.docs.required_properties.nil? -%> required_properties: '<%= object.docs.required_properties %>' @@ -184,6 +184,7 @@ async: <% unless object.async.target_occurrences.nil? -%> target_occurrences: <%= object.async.target_occurrences %> <% end -%> + actions: ['<%= object.async.actions.join('\',\'') %>'] <% end -%> <% if object.async.is_a? Api::OpAsync -%> type: 'OpAsync' @@ -251,8 +252,8 @@ collection_url_key: '<%= object.collection_url_key %>' nested_query: <% unless object.nested_query.keys.nil? -%> keys: -<% object.nested_query.keys.each do |key| %> - - <%= key -%> +<% object.nested_query.keys.each do |key| -%> + - '<%= key -%>' <% end -%> <% end -%> <% unless object.nested_query.is_list_of_ids.nil? -%> @@ -340,52 +341,52 @@ iam_policy: <% unless object.custom_code.nil? -%> custom_code: <% unless object.custom_code.extra_schema_entry.nil? -%> - extra_schema_entry: '<%= object.custom_code.extra_schema_entry %>' + extra_schema_entry: '<%= object.convert_go_file( object.custom_code.extra_schema_entry) %>' <% end -%> <% unless object.custom_code.constants.nil? -%> - constants: '<%= object.custom_code.constants %>' + constants: '<%= object.convert_go_file( object.custom_code.constants) %>' <% end -%> <% unless object.custom_code.encoder.nil? -%> - encoder: '<%= object.custom_code.encoder %>' + encoder: '<%= object.convert_go_file( object.custom_code.encoder )%>' <% end -%> <% unless object.custom_code.update_encoder.nil? -%> - update_encoder: '<%= object.custom_code.update_encoder %>' + update_encoder: '<%= object.convert_go_file( object.custom_code.update_encoder )%>' <% end -%> <% unless object.custom_code.decoder.nil? -%> - decoder: '<%= object.custom_code.decoder %>' + decoder: '<%= object.convert_go_file( object.custom_code.decoder )%>' <% end -%> <% unless object.custom_code.pre_create.nil? -%> - pre_create: '<%= object.custom_code.pre_create %>' + pre_create: '<%= object.convert_go_file( object.custom_code.pre_create )%>' <% end -%> <% unless object.custom_code.post_create.nil? -%> - post_create: '<%= object.custom_code.post_create %>' + post_create: '<%= object.convert_go_file( object.custom_code.post_create )%>' <% end -%> <% unless object.custom_code.custom_create.nil? -%> - custom_create: '<%= object.custom_code.custom_create %>' + custom_create: '<%= object.convert_go_file( object.custom_code.custom_create )%>' <% end -%> <% unless object.custom_code.pre_read.nil? -%> - pre_read: '<%= object.custom_code.pre_read %>' + pre_read: '<%= object.convert_go_file( object.custom_code.pre_read )%>' <% end -%> <% unless object.custom_code.pre_update.nil? -%> - pre_update: '<%= object.custom_code.pre_update %>' + pre_update: '<%= object.convert_go_file( object.custom_code.pre_update )%>' <% end -%> <% unless object.custom_code.post_update.nil? -%> - post_update: '<%= object.custom_code.post_update %>' + post_update: '<%= object.convert_go_file( object.custom_code.post_update )%>' <% end -%> <% unless object.custom_code.custom_update.nil? -%> - custom_update: '<%= object.custom_code.custom_update %>' + custom_update: '<%= object.convert_go_file( object.custom_code.custom_update )%>' <% end -%> <% unless object.custom_code.pre_delete.nil? -%> - pre_delete: '<%= object.custom_code.pre_delete %>' + pre_delete: '<%= object.convert_go_file( object.custom_code.pre_delete )%>' <% end -%> <% unless object.custom_code.custom_import.nil? -%> - custom_import: '<%= object.custom_code.custom_import %>' + custom_import: '<%= object.convert_go_file( object.custom_code.custom_import )%>' <% end -%> <% unless object.custom_code.post_import.nil? -%> - post_import: '<%= object.custom_code.post_import %>' + post_import: '<%= object.convert_go_file( object.custom_code.post_import )%>' <% end -%> <% unless object.custom_code.test_check_destroy.nil? -%> - test_check_destroy: '<%= object.custom_code.test_check_destroy %>' + test_check_destroy: '<%= object.convert_go_file( object.custom_code.test_check_destroy )%>' <% end -%> <% end -%> <% unless object.custom_diff.empty? || (object.custom_diff.size == 1 && object.custom_diff.include?("tpgresource.SetLabelsDiff")) -%> @@ -494,7 +495,7 @@ examples: <% unless example.ignore_read_extra.empty? -%> ignore_read_extra: <% example.ignore_read_extra.each do |irextra| -%> - '<%= irextra %>' + - '<%= irextra %>' <% end -%> <% end -%> <% unless example.external_providers.nil? -%> @@ -521,7 +522,7 @@ examples: virtual_fields: <% object.virtual_fields.each do |vfield| -%> - name: '<%= vfield.name %>' - description: '<%= vfield.description %>' + description: <% unless vfield.type.nil? -%> type: <%= tf_type(vfield.type) %> <% end -%> diff --git a/mmv1/templates/terraform/yaml_conversion_field.erb b/mmv1/templates/terraform/yaml_conversion_field.erb index 4432130ab1a7..3a56f8e8f0aa 100644 --- a/mmv1/templates/terraform/yaml_conversion_field.erb +++ b/mmv1/templates/terraform/yaml_conversion_field.erb @@ -2,15 +2,18 @@ <% unless property.class.to_s == 'Api::Type::KeyValueTerraformLabels' || property.class.to_s == 'Api::Type::KeyValueEffectiveLabels' -%> - name: '<%= property.name -%>' type: <%= property.class.to_s.gsub("Api::Type::", "") %> -<% unless property.description.nil? -%> +<% unless property.description.nil? || property.description == "A nested object resource" -%> <% des = property.description.strip.gsub('"', '\'') -%> <% if property.is_a?(Api::Type::KeyValueLabels) || property.is_a?(Api::Type::KeyValueAnnotations) -%> <% index = des.index("\n\n**Note**: This field is non-authoritative") -%> - description: "<%= des[0, index] -%>" + description: <% else -%> - description: "<%= des -%>" + description: <% end -%> <% end -%> +<% unless property.api_name == property.name -%> + api_name: <%= property.api_name %> +<% end -%> <% unless !property.unordered_list -%> unordered_list: <%= property.unordered_list %> <% end -%> @@ -67,7 +70,7 @@ <% unless property.update_url.nil? -%> update_url: '<%= property.update_url %>' <% end -%> -<% unless property.update_verb == property.__resource&.update_verb -%> +<% unless property.update_verb == property.__resource&.update_verb || property.update_verb.to_s.strip.empty? -%> update_verb: '<%= property.update_verb.to_s %>' <% end -%> <% unless property.update_id.nil? -%> @@ -114,7 +117,7 @@ <% end -%> <% end -%> <% end -%> -<% unless property.key_expander == 'tpgresource.ExpandString' -%> +<% unless property.key_expander == 'tpgresource.ExpandString' || property.update_verb.to_s.strip.empty? -%> key_expander: '<%= property.key_expander %>' <% end -%> <% unless property.key_diff_suppress_func.nil? -%> @@ -130,13 +133,13 @@ set_hash_func: '<%= property.set_hash_func %>' <% end -%> <% unless property.custom_flatten.nil? -%> - custom_flatten: '<%= property.custom_flatten %>' + custom_flatten: '<%= object.convert_go_file( property.custom_flatten )%>' <% end -%> <% unless property.custom_expand.nil? -%> - custom_expand: '<%= property.custom_expand %>' + custom_expand: '<%= object.convert_go_file(property.custom_expand )%>' <% end -%> <% unless property.flatten_object.nil? -%> - flatten_object: '<%= property.flatten_object %>' + flatten_object: <%= property.flatten_object %> <% end -%> <% unless property.validation.nil? -%> validation: @@ -158,15 +161,46 @@ <% end -%> <% if property.is_a?(Api::Type::Array) -%> <% if property.item_type.is_a?(Api::Type::NestedObject) -%> - item_type: <%= property.item_type.type.to_s %> + item_type: +<% unless property.item_type.description.nil? || property.item_type.description == "A nested object resource" -%> + description: +<% end -%> + type: <%= property.item_type.type.to_s.gsub("Api::Type::", "") %> <% unless property.item_type.properties.nil? -%> - properties: + properties: <% property.item_type.properties.each do |prop| -%> -<%= lines(indent(build_newyaml_field(prop, object, pwd), 4)) -%> +<%= lines(indent(build_newyaml_field(prop, object, pwd), 6)) -%> <% end -%> <% end -%> +<% elsif property.item_type.is_a?(Api::Type::ResourceRef) -%> + item_type: + name: '<%= property.item_type.name -%>' + type: ResourceRef +<% unless property.item_type.description.nil? || property.item_type.description == "A nested object resource" -%> + description: +<% end -%> +<% unless property.item_type.resource.nil? -%> + resource: '<%= property.item_type.resource -%>' +<% end -%> +<% unless property.item_type.imports.nil? -%> + imports: '<%= property.item_type.imports.to_s -%>' +<% end -%> <% else -%> - item_type: <%= property.item_type.to_s %> + item_type: +<% if property.item_type.is_a?(Api::Type::Enum) -%> + type: Enum +<% unless property.item_type.description.nil? || property.item_type.description == "A nested object resource" -%> + description: +<% end -%> +<% unless property.item_type.values.nil? -%> + enum_values: +<% property.item_type.values.reject{|v| v == '' }.each do |enumval| -%> + - '<%= enumval %>' +<% end -%> +<% end -%> +<% else -%> + type: <%= property.item_type_class.to_s.gsub("Api::Type::", "") %> +<% end -%> <% end -%> <% unless property.min_size.nil? -%> min_size: <%= property.min_size %> @@ -186,7 +220,7 @@ <% if property.is_a?(Api::Type::Enum) -%> <% unless property.values.nil? -%> enum_values: -<% property.values.each do |enumval| -%> +<% property.values.reject{|v| v == '' }.each do |enumval| -%> - '<%= enumval %>' <% end -%> <% end -%> @@ -195,14 +229,28 @@ <% end -%> <% end -%> <% if property.is_a?(Api::Type::Map) -%> -<% unless property.value_type.nil? -%> - value_type: '<%= property.value_type.to_s %>' -<% end -%> <% unless property.key_name.nil? -%> key_name: '<%= property.key_name %>' <% end -%> <% unless property.key_description.nil? -%> - key_description: '<%= property.key_description %>' + key_description: +<% end -%> +<% unless property.value_type.nil? -%> +<% if property.value_type.is_a?(Api::Type::NestedObject) -%> + value_type: +<% unless property.value_type.description.nil? || property.value_type.description == "A nested object resource" -%> + description: +<% end -%> + type: <%= property.value_type.type.to_s.gsub("Api::Type::", "") %> +<% unless property.value_type.properties.nil? -%> + properties: +<% property.value_type.properties.each do |prop| -%> +<%= lines(indent(build_newyaml_field(prop, object, pwd), 6)) -%> +<% end -%> +<% end -%> +<% else -%> + value_type: '<%= property.value_type.to_s %>' +<% end -%> <% end -%> <% end -%> <% if property.is_a?(Api::Type::NestedObject) -%> From 767c38c1af2a0b555bdfb39b3a61406dc27da3b5 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Tue, 11 Jun 2024 12:00:50 -0700 Subject: [PATCH 117/356] Fixed review requests on issue comment from author (#10937) --- .github/workflows/request-reviewer.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/request-reviewer.yml b/.github/workflows/request-reviewer.yml index 79bec4dd6ece..78e2e4f9bdec 100644 --- a/.github/workflows/request-reviewer.yml +++ b/.github/workflows/request-reviewer.yml @@ -15,7 +15,7 @@ on: jobs: request-review: - if: github.event.pull_request && github.event.pull_request.draft == false && (github.event.sender.login == github.event.pull_request.user.login || github.event.action != 'created') + if: (github.event.action == 'created' && github.event.issue.draft == false && github.event.comment.user.login == github.event.issue.user.login) || (github.event.action != 'created' && github.event.pull_request.draft == false) runs-on: ubuntu-latest permissions: pull-requests: write From d1808d2885870f080ceb1cc58c567baedf07f94f Mon Sep 17 00:00:00 2001 From: Pawel Jasinski <56267784+pawelJas@users.noreply.github.com> Date: Tue, 11 Jun 2024 21:38:12 +0200 Subject: [PATCH 118/356] Updated description of connection_draining_timeout_sec, balancing_mode and outlier_detection in backend_service and regional_backend_service before changing defaults (#10927) --- mmv1/products/compute/BackendService.yaml | 5 +++++ mmv1/products/compute/RegionBackendService.yaml | 7 +++++++ 2 files changed, 12 insertions(+) diff --git a/mmv1/products/compute/BackendService.yaml b/mmv1/products/compute/BackendService.yaml index d88fa9c1084c..3b87ac0b4767 100644 --- a/mmv1/products/compute/BackendService.yaml +++ b/mmv1/products/compute/BackendService.yaml @@ -165,6 +165,8 @@ properties: See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) for an explanation of load balancing modes. + + From version 6.0.0 default value will be UTILIZATION to match default GCP value. - !ruby/object:Api::Type::Double name: 'capacityScaler' send_empty_value: true @@ -964,6 +966,9 @@ properties: Settings controlling eviction of unhealthy hosts from the load balancing pool. Applicable backend service types can be a global backend service with the loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. + + From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. + Default values are enforce by GCP without providing them. properties: - !ruby/object:Api::Type::NestedObject name: 'baseEjectionTime' diff --git a/mmv1/products/compute/RegionBackendService.yaml b/mmv1/products/compute/RegionBackendService.yaml index 681687d2cd76..797c5043cf73 100644 --- a/mmv1/products/compute/RegionBackendService.yaml +++ b/mmv1/products/compute/RegionBackendService.yaml @@ -164,6 +164,8 @@ properties: See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) for an explanation of load balancing modes. + + From version 6.0.0 default value will be UTILIZATION to match default GCP value. - !ruby/object:Api::Type::Double name: 'capacityScaler' description: | @@ -658,6 +660,8 @@ properties: description: | Time for which instance will be drained (not accept new connections, but still work to finish started). + + From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. - !ruby/object:Api::Type::Time name: 'creationTimestamp' description: | @@ -873,6 +877,9 @@ properties: Settings controlling eviction of unhealthy hosts from the load balancing pool. This field is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. + + From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. + Default values are enforce by GCP without providing them. properties: - !ruby/object:Api::Type::NestedObject name: 'baseEjectionTime' From ff9f57ef52d73d1d232a28fb4ffdeae0693d36e0 Mon Sep 17 00:00:00 2001 From: Pawel Jasinski <56267784+pawelJas@users.noreply.github.com> Date: Tue, 11 Jun 2024 21:41:24 +0200 Subject: [PATCH 119/356] update BackendService timeoutSec description (#10882) --- mmv1/products/compute/BackendService.yaml | 6 ++++-- mmv1/products/compute/RegionBackendService.yaml | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/mmv1/products/compute/BackendService.yaml b/mmv1/products/compute/BackendService.yaml index 3b87ac0b4767..150fa6c33361 100644 --- a/mmv1/products/compute/BackendService.yaml +++ b/mmv1/products/compute/BackendService.yaml @@ -1286,8 +1286,10 @@ properties: - !ruby/object:Api::Type::Integer name: 'timeoutSec' description: | - How many seconds to wait for the backend before considering it a - failed request. Default is 30 seconds. Valid range is [1, 86400]. + The backend service timeout has a different meaning depending on the type of load balancer. + For more information see, [Backend service settings](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + The default is 30 seconds. + The full range of timeout values allowed goes from 1 through 2,147,483,647 seconds. default_from_api: true - !ruby/object:Api::Type::NestedObject name: 'logConfig' diff --git a/mmv1/products/compute/RegionBackendService.yaml b/mmv1/products/compute/RegionBackendService.yaml index 797c5043cf73..4cb367ee521d 100644 --- a/mmv1/products/compute/RegionBackendService.yaml +++ b/mmv1/products/compute/RegionBackendService.yaml @@ -1232,8 +1232,10 @@ properties: - !ruby/object:Api::Type::Integer name: 'timeoutSec' description: | - How many seconds to wait for the backend before considering it a - failed request. Default is 30 seconds. Valid range is [1, 86400]. + The backend service timeout has a different meaning depending on the type of load balancer. + For more information see, [Backend service settings](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + The default is 30 seconds. + The full range of timeout values allowed goes from 1 through 2,147,483,647 seconds. default_from_api: true - !ruby/object:Api::Type::NestedObject name: 'logConfig' From 7eda06641d85aeb31375df839c0b6d5ab487560a Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Tue, 11 Jun 2024 16:02:01 -0400 Subject: [PATCH 120/356] Support absolute override dir for generator (#10942) --- mmv1/compiler.rb | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mmv1/compiler.rb b/mmv1/compiler.rb index 43211deb1a8e..4d0175a3f7d2 100755 --- a/mmv1/compiler.rb +++ b/mmv1/compiler.rb @@ -123,6 +123,14 @@ if override_dir Google::LOGGER.info "Using override directory '#{override_dir}'" + + # Normalize override dir to a path that is relative to the magic-modules directory + # This is needed for templates that concatenate pwd + override dir + path + if Pathname.new(override_dir).absolute? + override_dir = Pathname.new(override_dir).relative_path_from(__dir__).to_s + Google::LOGGER.info "Override directory normalized to relative path '#{override_dir}'" + end + Dir["#{override_dir}/products/**/product.yaml"].each do |file_path| product = File.dirname(Pathname.new(file_path).relative_path_from(override_dir)) all_product_files.push(product) unless all_product_files.include? product From 90d184014297ccf2c1d06fd6fc01c539832ce638 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Tue, 11 Jun 2024 13:16:52 -0700 Subject: [PATCH 121/356] Fixed review requests on PR comment (#10948) --- .github/workflows/request-reviewer.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/request-reviewer.yml b/.github/workflows/request-reviewer.yml index 78e2e4f9bdec..45c5ffacd4e6 100644 --- a/.github/workflows/request-reviewer.yml +++ b/.github/workflows/request-reviewer.yml @@ -35,5 +35,5 @@ jobs: cd .ci/magician go build . - name: Request reviewer - run: .ci/magician/magician request-reviewer ${{ github.event.pull_request.number }} + run: .ci/magician/magician request-reviewer ${{ github.event.pull_request.number || github.event.issue.number }} From 7486b728dc94ab0cb47b64e61e0414da4b65c47d Mon Sep 17 00:00:00 2001 From: Daniel Vega-Myhre <105610547+danielvegamyhre@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:27:52 -0700 Subject: [PATCH 122/356] Bump min gke version to fix TestAccContainerNodePool_fastSocket (#10945) --- .../services/container/resource_container_node_pool_test.go.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index 735d39b34e4e..b89e0bae993d 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -1878,7 +1878,7 @@ resource "google_container_cluster" "cluster" { name = "%s" location = "us-central1-f" initial_node_count = 1 - min_master_version = "1.25" + min_master_version = "1.28" deletion_protection = false network = "%s" subnetwork = "%s" From 1a50dca365227e563f105288d6324700ed0f7dab Mon Sep 17 00:00:00 2001 From: bcreddy-gcp <123543489+bcreddy-gcp@users.noreply.github.com> Date: Tue, 11 Jun 2024 14:57:28 -0700 Subject: [PATCH 123/356] remove network url custom expand for workbench instance (#10931) --- mmv1/products/workbench/Instance.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/mmv1/products/workbench/Instance.yaml b/mmv1/products/workbench/Instance.yaml index 0134ea3eb8eb..fbac9a9a0e51 100644 --- a/mmv1/products/workbench/Instance.yaml +++ b/mmv1/products/workbench/Instance.yaml @@ -367,7 +367,6 @@ properties: name: network description: 'Optional. The name of the VPC that this VM instance is in.' immutable: true - custom_expand: templates/terraform/custom_expand/network_full_url.erb diff_suppress_func: tpgresource.CompareSelfLinkRelativePaths default_from_api: true - !ruby/object:Api::Type::String From 037390f1e6ea93e7b540dca24d376c03d415d2d0 Mon Sep 17 00:00:00 2001 From: Salome Papiashvili Date: Wed, 12 Jun 2024 02:06:29 +0200 Subject: [PATCH 124/356] Adding Datasource: google_composer_user_workloads_config_map (#10680) --- .../provider/provider_mmv1_resources.go.erb | 1 + ..._composer_user_workloads_config_map.go.erb | 52 ++++++++++++++++ ...oser_user_workloads_config_map_test.go.erb | 60 ++++++++++++++++++ ...er_user_workloads_config_map.html.markdown | 62 +++++++++++++++++++ 4 files changed, 175 insertions(+) create mode 100644 mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map.go.erb create mode 100644 mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map_test.go.erb create mode 100644 mmv1/third_party/terraform/website/docs/d/composer_user_workloads_config_map.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 19bd44541e4f..35ee6bdf1d40 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -61,6 +61,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_cloud_run_v2_service": cloudrunv2.DataSourceGoogleCloudRunV2Service(), "google_composer_environment": composer.DataSourceGoogleComposerEnvironment(), <% unless version == 'ga' -%> + "google_composer_user_workloads_config_map": composer.DataSourceGoogleComposerUserWorkloadsConfigMap(), "google_composer_user_workloads_secret": composer.DataSourceGoogleComposerUserWorkloadsSecret(), <% end -%> "google_composer_image_versions": composer.DataSourceGoogleComposerImageVersions(), diff --git a/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map.go.erb b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map.go.erb new file mode 100644 index 000000000000..685020132443 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map.go.erb @@ -0,0 +1,52 @@ +<% autogen_exception -%> +package composer + +<% unless version == 'ga' -%> +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComposerUserWorkloadsConfigMap() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComposerUserWorkloadsConfigMap().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "environment", "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleComposerUserWorkloadsConfigMapRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComposerUserWorkloadsConfigMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsConfigMaps/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = resourceComposerUserWorkloadsConfigMapRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} +<% end -%> diff --git a/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map_test.go.erb b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map_test.go.erb new file mode 100644 index 000000000000..7609bb3b9c76 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/data_source_google_composer_user_workloads_config_map_test.go.erb @@ -0,0 +1,60 @@ +<% autogen_exception -%> +package composer_test + +<% unless version == 'ga' -%> +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceComposerUserWorkloadsConfigMap_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "env_name": fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)), + "config_map_name": fmt.Sprintf("tf-test-composer-config-map-%d", acctest.RandInt(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComposerUserWorkloadsConfigMap_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_composer_user_workloads_config_map.test", + "google_composer_user_workloads_config_map.test"), + ), + }, + }, + }) +} + +func testAccDataSourceComposerUserWorkloadsConfigMap_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "test" { + name = "%{env_name}" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_config_map" "test" { + environment = google_composer_environment.test.name + name = "%{config_map_name}" + data = { + db_host: "dbhost:5432", + api_host: "apihost:443", + } +} +data "google_composer_user_workloads_config_map" "test" { + name = google_composer_user_workloads_config_map.test.name + environment = google_composer_environment.test.name +} +`, context) +} +<% end -%> diff --git a/mmv1/third_party/terraform/website/docs/d/composer_user_workloads_config_map.html.markdown b/mmv1/third_party/terraform/website/docs/d/composer_user_workloads_config_map.html.markdown new file mode 100644 index 000000000000..6640e7fbecc3 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/composer_user_workloads_config_map.html.markdown @@ -0,0 +1,62 @@ +--- +subcategory: "Cloud Composer" +description: |- + User workloads ConfigMap used by Airflow tasks that run with Kubernetes Executor or KubernetesPodOperator. +--- + +# google\_composer\_user\_workloads\_config\_map + +Provides access to Kubernetes ConfigMap configuration for a given project, region and Composer Environment. + +> **Warning:** This data source is in beta, and should be used with the terraform-provider-google-beta provider. +See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. + +## Example Usage + +```hcl +resource "google_composer_environment" "example" { + name = "example-environment" + config{ + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_config_map" "example" { + environment = google_composer_environment.example.name + name = "example-config-map" + data = { + db_host: "dbhost:5432", + api_host: "apihost:443", + } +} +data "google_composer_user_workloads_config_map" "example" { + environment = google_composer_environment.example.name + name = resource.google_composer_user_workloads_config_map.example.name +} +output "debug" { + value = data.google_composer_user_workloads_config_map.example +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Name of the ConfigMap. + +* `environment` - (Required) Environment where the ConfigMap is stored. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + +* `region` - (Optional) The location or Compute Engine region of the environment. + +## Attributes Reference + +The following attributes are exported: + +* `id` - An identifier for the resource in format `projects/{{project}}/locations/{{region}}/environments/{{environment}}/userWorkloadsConfigMaps/{{name}}` + +* `data` - The "data" field of Kubernetes ConfigMap, organized in key-value pairs. + For details see: https://kubernetes.io/docs/concepts/configuration/configmap/ From f7ca92e658da40c9e85caf6e10ec8ca2ea4c0798 Mon Sep 17 00:00:00 2001 From: Matheus Guilherme Souza Aleixo <82680416+matheusaleixo-cit@users.noreply.github.com> Date: Wed, 12 Jun 2024 14:02:30 -0300 Subject: [PATCH 125/356] Added "ActionTokenSiteKeys" and "SessionTokenSiteKeys" to "compute_security_policy" and "compute_security_policy_rule" (#10761) --- mmv1/products/compute/SecurityPolicyRule.yaml | 21 +++ .../resource_compute_security_policy.go.erb | 102 ++++++++++++ ...e_compute_security_policy_rule_test.go.erb | 132 ++++++++++++++++ ...source_compute_security_policy_test.go.erb | 147 ++++++++++++++++++ 4 files changed, 402 insertions(+) diff --git a/mmv1/products/compute/SecurityPolicyRule.yaml b/mmv1/products/compute/SecurityPolicyRule.yaml index 2ec41e82d35f..7a82d63cbaea 100644 --- a/mmv1/products/compute/SecurityPolicyRule.yaml +++ b/mmv1/products/compute/SecurityPolicyRule.yaml @@ -123,6 +123,27 @@ properties: # name: 'location' # description: | # Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::NestedObject + name: 'exprOptions' + description: | + The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr'). + properties: + - !ruby/object:Api::Type::NestedObject + name: 'recaptchaOptions' + required: true + description: | + reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect. + properties: + - !ruby/object:Api::Type::Array + name: 'actionTokenSiteKeys' + description: | + A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: 'sessionTokenSiteKeys' + description: | + A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + item_type: Api::Type::String - !ruby/object:Api::Type::NestedObject name: 'config' description: | diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb index 3add4c9093d7..472586c5785d 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy.go.erb @@ -155,6 +155,45 @@ func ResourceComputeSecurityPolicy() *schema.Resource { }, Description: `User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header.`, }, + + "expr_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr').`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recaptcha_options": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_token_site_keys": { + Type: schema.TypeList, + Optional: true, + Description: `A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "session_token_site_keys": { + Type: schema.TypeList, + Optional: true, + Description: `A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, }, }, Description: `A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding action is enforced.`, @@ -1024,6 +1063,7 @@ func expandSecurityPolicyMatch(configured []interface{}) *compute.SecurityPolicy VersionedExpr: data["versioned_expr"].(string), Config: expandSecurityPolicyMatchConfig(data["config"].([]interface{})), Expr: expandSecurityPolicyMatchExpr(data["expr"].([]interface{})), + ExprOptions: expandSecurityPolicyMatchExprOptions(data["expr_options"].([]interface{})), } } @@ -1053,6 +1093,42 @@ func expandSecurityPolicyMatchExpr(expr []interface{}) *compute.Expr { } } +func expandSecurityPolicyMatchExprOptions(exprOptions []interface{}) *compute.SecurityPolicyRuleMatcherExprOptions { + if len(exprOptions) == 0 || exprOptions[0] == nil { + return nil + } + + data := exprOptions[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleMatcherExprOptions{ + RecaptchaOptions: expandSecurityPolicyMatchExprOptionsRecaptchaOptions(data["recaptcha_options"].([]interface{})), + } +} + +func expandSecurityPolicyMatchExprOptionsRecaptchaOptions(recaptchaOptions []interface{}) *compute.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions { + if len(recaptchaOptions) == 0 || recaptchaOptions[0] == nil { + return nil + } + + data := recaptchaOptions[0].(map[string]interface{}) + + actionTokenKeysInterface := data["action_token_site_keys"].([]interface{}) + actionTokenKeys := make([]string, len(actionTokenKeysInterface)) + for i, v := range actionTokenKeysInterface { + actionTokenKeys[i] = v.(string) + } + + sessionTokenKeysInterface := data["session_token_site_keys"].([]interface{}) + sessionTokenKeys := make([]string, len(sessionTokenKeysInterface)) + for i, v := range sessionTokenKeysInterface { + sessionTokenKeys[i] = v.(string) + } + + return &compute.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions{ + ActionTokenSiteKeys: actionTokenKeys, + SessionTokenSiteKeys: sessionTokenKeys, + } +} + <% unless version == 'ga' -%> func expandSecurityPolicyPreconfiguredWafConfig(configured []interface{}) *compute.SecurityPolicyRulePreconfiguredWafConfig { if len(configured) == 0 || configured[0] == nil { @@ -1132,6 +1208,7 @@ func flattenMatch(match *compute.SecurityPolicyRuleMatcher) []map[string]interfa "versioned_expr": match.VersionedExpr, "config": flattenMatchConfig(match.Config), "expr": flattenMatchExpr(match), + "expr_options": flattenMatchExprOptions(match.ExprOptions), } return []map[string]interface{}{data} @@ -1149,6 +1226,31 @@ func flattenMatchConfig(conf *compute.SecurityPolicyRuleMatcherConfig) []map[str return []map[string]interface{}{data} } +func flattenMatchExprOptions(exprOptions *compute.SecurityPolicyRuleMatcherExprOptions) []map[string]interface{} { + if exprOptions == nil { + return nil + } + + data := map[string]interface{}{ + "recaptcha_options": flattenMatchExprOptionsRecaptchaOptions(exprOptions.RecaptchaOptions), + } + + return []map[string]interface{}{data} +} + +func flattenMatchExprOptionsRecaptchaOptions(recaptchaOptions *compute.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) []map[string]interface{} { + if recaptchaOptions == nil { + return nil + } + + data := map[string]interface{}{ + "action_token_site_keys": recaptchaOptions.ActionTokenSiteKeys, + "session_token_site_keys": recaptchaOptions.SessionTokenSiteKeys, + } + + return []map[string]interface{}{data} +} + func flattenMatchExpr(match *compute.SecurityPolicyRuleMatcher) []map[string]interface{} { if match.Expr == nil { return nil diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_rule_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_rule_test.go.erb index c731b650bd6a..29578b86685b 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_rule_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_rule_test.go.erb @@ -297,6 +297,70 @@ func TestAccComputeSecurityPolicyRule_EnforceOnKeyUpdates(t *testing.T) { }) } +func TestAccComputeSecurityPolicyRule_withExprOptions(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withExprOptions(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_modifyExprOptions(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRuleExpr(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withExprOptions(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_modifyExprOptions(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccComputeSecurityPolicyRule_preBasicUpdate(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_compute_security_policy" "default" { @@ -919,3 +983,71 @@ resource "google_compute_security_policy_rule" "policy_rule" { } `, spName) } + +func testAccComputeSecurityPolicyRule_withExprOptions(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "reCAPTCHA rule" + action = "deny(403)" + priority = "2000" + preview = true + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-01", + "placeholder-recaptcha-action-site-key-02" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1", + "placeholder-recaptcha-session-site-key-2" + ] + } + } + } +} +`, context) +} + +func testAccComputeSecurityPolicyRule_modifyExprOptions(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "modified reCAPTCHA rule" + action = "deny(403)" + priority = "2000" + preview = true + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-09", + "placeholder-recaptcha-action-site-key-08", + "placeholder-recaptcha-action-site-key-07" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1" + ] + } + } + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb index 0e9fdd517043..3b2747637f15 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_security_policy_test.go.erb @@ -588,6 +588,67 @@ func TestAccComputeSecurityPolicy_withHeadAction(t *testing.T) { }, }) } + +func TestAccComputeSecurityPolicy_withExprOptions(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withExprOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_modifyExprOptions(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRule(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withExprOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_modifyExprOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func testAccComputeSecurityPolicy_withRecaptchaOptionsConfig(project, spName string) string { return fmt.Sprintf(` resource "google_recaptcha_enterprise_key" "primary" { @@ -1759,3 +1820,89 @@ resource "google_compute_security_policy" "policy" { } `, spName) } + +func testAccComputeSecurityPolicy_withExprOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + description = "default rule" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + } + + rule { + action = "deny(403)" + priority = "2000" + description = "reCAPTCHA rule" + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-01", + "placeholder-recaptcha-action-site-key-02" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1", + "placeholder-recaptcha-session-site-key-2" + ] + } + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_modifyExprOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + description = "default rule" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + } + + rule { + action = "deny(403)" + priority = "2000" + description = "reCAPTCHA rule" + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-09", + "placeholder-recaptcha-action-site-key-08", + "placeholder-recaptcha-action-site-key-07" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1" + ] + } + } + } + } +} +`, spName) +} From 716507e1ae25949b6d9835a07c9b69ecf88fbae2 Mon Sep 17 00:00:00 2001 From: SizzleHsu Date: Wed, 12 Jun 2024 18:07:33 +0000 Subject: [PATCH 126/356] Add handling of amd64 canonical ubuntu lts images on compute disk. (#10952) --- .../terraform/services/compute/image.go | 2 +- .../compute/resource_compute_disk_test.go.erb | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/compute/image.go b/mmv1/third_party/terraform/services/compute/image.go index 063c7e4cd167..9b833bf5faff 100644 --- a/mmv1/third_party/terraform/services/compute/image.go +++ b/mmv1/third_party/terraform/services/compute/image.go @@ -30,7 +30,7 @@ var ( resolveImageLink = regexp.MustCompile(fmt.Sprintf("^https://www.googleapis.com/compute/[a-z0-9]+/projects/(%s)/global/images/(%s)", verify.ProjectRegex, resolveImageImageRegex)) windowsSqlImage = regexp.MustCompile("^sql-(?:server-)?([0-9]{4})-([a-z]+)-windows-(?:server-)?([0-9]{4})(?:-r([0-9]+))?-dc-v[0-9]+$") - canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)(?:.*(arm64))?.*$") + canonicalUbuntuLtsImage = regexp.MustCompile("^ubuntu-(minimal-)?([0-9]+)(?:.*(arm64|amd64))?.*$") cosLtsImage = regexp.MustCompile("^cos-([0-9]+)-") ) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb index d116108ebd69..67e3f01f2730 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb @@ -268,6 +268,16 @@ func TestDiskImageDiffSuppress(t *testing.T) { New: "ubuntu-minimal-2210-amd64", ExpectDiffSuppress: true, }, + "matching image ubuntu amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2404-noble-amd64-v20240423", + New: "ubuntu-2404-lts-amd64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu minimal amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2404-noble-amd64-v20240423", + New: "ubuntu-minimal-2404-lts-amd64", + ExpectDiffSuppress: true, + }, "different architecture image ubuntu amd64 self_link": { Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2210-kinetic-amd64-v20221022", New: "ubuntu-2210", @@ -288,6 +298,26 @@ func TestDiskImageDiffSuppress(t *testing.T) { New: "ubuntu-minimal-2210-amd64", ExpectDiffSuppress: false, }, + "different image ubuntu amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2404-noble-amd64-v20240423", + New: "ubuntu-2404-lts", + ExpectDiffSuppress: false, + }, + "different image ubuntu minimal amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2404-noble-amd64-v20240423", + New: "ubuntu-minimal-2404-lts", + ExpectDiffSuppress: false, + }, + "different image ubuntu amd64 canonical lts family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2404-noble-v20240423", + New: "ubuntu-2404-lts-amd64", + ExpectDiffSuppress: false, + }, + "different image ubuntu minimal amd64 canonical lts family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2404-noble-v20240423", + New: "ubuntu-minimal-2404-lts-amd64", + ExpectDiffSuppress: false, + }, } for tn, tc := range cases { From 2a013068adc20befed96b3b5eb0195c41966c37d Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 12 Jun 2024 11:13:01 -0700 Subject: [PATCH 127/356] Create activedirectory Go yaml files and add more fields to yaml_conversion.erb (#10896) --- mmv1/products/activedirectory/go_Domain.yaml | 116 ++++++++++++++++++ .../activedirectory/go_DomainTrust.yaml | 115 +++++++++++++++++ mmv1/products/activedirectory/go_Peering.yaml | 104 ++++++++++++++++ mmv1/products/activedirectory/go_product.yaml | 24 ++++ mmv1/templates/terraform/yaml_conversion.erb | 13 +- 5 files changed, 370 insertions(+), 2 deletions(-) create mode 100644 mmv1/products/activedirectory/go_Domain.yaml create mode 100644 mmv1/products/activedirectory/go_DomainTrust.yaml create mode 100644 mmv1/products/activedirectory/go_Peering.yaml create mode 100644 mmv1/products/activedirectory/go_product.yaml diff --git a/mmv1/products/activedirectory/go_Domain.yaml b/mmv1/products/activedirectory/go_Domain.yaml new file mode 100644 index 000000000000..71935c55730c --- /dev/null +++ b/mmv1/products/activedirectory/go_Domain.yaml @@ -0,0 +1,116 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Domain' +kind: 'activedirectory#domain' +description: | + Creates a Microsoft AD domain +references: + guides: + 'Managed Microsoft Active Directory Quickstart': 'https://cloud.google.com/managed-microsoft-ad/docs/quickstarts' + api: 'https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains' +docs: +id_format: '{{name}}' +base_url: 'projects/{{project}}/locations/global/domains' +self_link: '{{name}}' +create_url: 'projects/{{project}}/locations/global/domains?domainName={{domain_name}}' +update_verb: 'PATCH' +update_mask: true +delete_url: 'projects/{{project}}/locations/global/domains/{{domain_name}}' +import_format: + - '{{name}}' +timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 60 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/self_link_as_name.erb' +error_abort_predicates: + + - 'transport_tpg.Is429QuotaError' +examples: + - name: 'active_directory_domain_basic' + primary_resource_id: 'ad-domain' + vars: + name: 'myorg' + domain_name: 'tfgen' + skip_test: true +parameters: + - name: 'domainName' + type: String + description: "The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, +https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains." + url_param_only: true + required: true + immutable: true + validation: + function: 'verify.ValidateADDomainName()' +properties: + - name: 'name' + type: String + description: "The unique name of the domain using the format: `projects/{project}/locations/global/domains/{domainName}`." + output: true + - name: 'labels' + type: KeyValueLabels + description: "Resource labels that can contain user-provided metadata" + immutable: false + - name: 'authorizedNetworks' + type: Array + description: "The full names of the Google Compute Engine networks the domain instance is connected to. The domain is only available on networks listed in authorizedNetworks. +If CIDR subnets overlap between networks, domain creation will fail." + is_set: true + item_type: + type: String + - name: 'reservedIpRange' + type: String + description: "The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. +Ranges must be unique and non-overlapping with existing subnets in authorizedNetworks" + required: true + immutable: true + - name: 'locations' + type: Array + description: "Locations where domain needs to be provisioned. [regions][compute/docs/regions-zones/] +e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block." + required: true + item_type: + type: String + - name: 'admin' + type: String + description: "The name of delegated administrator account used to perform Active Directory operations. +If not specified, setupadmin will be used." + immutable: true + default_value: setupadmin + - name: 'fqdn' + type: String + description: "The fully-qualified domain name of the exposed domain used by clients to connect to the service. +Similar to what would be chosen for an Active Directory set up on an internal network." + output: true diff --git a/mmv1/products/activedirectory/go_DomainTrust.yaml b/mmv1/products/activedirectory/go_DomainTrust.yaml new file mode 100644 index 000000000000..9b69dd0caa28 --- /dev/null +++ b/mmv1/products/activedirectory/go_DomainTrust.yaml @@ -0,0 +1,115 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DomainTrust' +kind: 'activedirectory#trust' +description: | + Adds a trust between Active Directory domains +references: + guides: + 'Active Directory Trust': 'https://cloud.google.com/managed-microsoft-ad/docs/create-one-way-trust' + api: 'https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains/attachTrust' +docs: +id_format: 'projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}' +base_url: 'projects/{{project}}/locations/global/domains' +self_link: 'projects/{{project}}/locations/global/domains/{{domain}}' +create_url: 'projects/{{project}}/locations/global/domains/{{domain}}:attachTrust' +update_url: 'projects/{{project}}/locations/global/domains/{{domain}}:reconfigureTrust' +update_verb: 'POST' +delete_url: 'projects/{{project}}/locations/global/domains/{{domain}}:detachTrust' +delete_verb: 'POST' +import_format: + - 'projects/{{project}}/locations/global/domains/{{domain}}/{{target_domain_name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - targetDomainName +nested_query: + keys: + - trusts + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/active_directory_domain_trust.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/active_directory_domain_trust.go.tmpl' + decoder: 'templates/terraform/decoders/go/unwrap_resource.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/active_directory_domain_trust.go.tmpl' +examples: + - name: 'active_directory_domain_trust_basic' + primary_resource_id: 'ad-domain-trust' + skip_test: true +parameters: + - name: 'domain' + type: String + description: "The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, +https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains." + url_param_only: true + required: true + immutable: true +properties: + - name: 'targetDomainName' + type: String + description: "The fully qualified target domain name which will be in trust with the current domain." + required: true + - name: 'trustType' + type: Enum + description: "The type of trust represented by the trust resource." + required: true + immutable: true + enum_values: + - 'FOREST' + - 'EXTERNAL' + - name: 'trustDirection' + type: Enum + description: "The trust direction, which decides if the current domain is trusted, trusting, or both." + required: true + immutable: true + enum_values: + - 'INBOUND' + - 'OUTBOUND' + - 'BIDIRECTIONAL' + - name: 'selectiveAuthentication' + type: Boolean + description: "Whether the trusted side has forest/domain wide access or selective access to an approved set of resources." + immutable: true + - name: 'targetDnsIpAddresses' + type: Array + description: "The target DNS server IP addresses which can resolve the remote domain involved in the trust." + is_set: true + required: true + item_type: + type: String + - name: 'trustHandshakeSecret' + type: String + description: "The trust secret used for the handshake with the target domain. This will not be stored." + required: true + immutable: true + ignore_read: true + sensitive: true diff --git a/mmv1/products/activedirectory/go_Peering.yaml b/mmv1/products/activedirectory/go_Peering.yaml new file mode 100644 index 000000000000..bc798f5bf0f0 --- /dev/null +++ b/mmv1/products/activedirectory/go_Peering.yaml @@ -0,0 +1,104 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Peering' +kind: 'activedirectory#peering' +description: | + Creates a Peering for Managed AD instance. +min_version: 'beta' +references: + guides: + 'Active Directory Domain Peering': 'https://cloud.google.com/managed-microsoft-ad/docs/domain-peering' + api: 'https://cloud.google.com/managed-microsoft-ad/reference/rest/v1beta1/projects.locations.global.peerings' +docs: +id_format: 'projects/{{project}}/locations/global/domains/{{peering_id}}' +base_url: 'projects/{{project}}/locations/global/peerings' +self_link: '{{name}}' +create_url: 'projects/{{project}}/locations/global/peerings?peeringId={{peering_id}}' +update_verb: 'PATCH' +update_mask: false +delete_url: 'projects/{{project}}/locations/global/peerings/{{peering_id}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: +examples: + - name: 'active_directory_peering_basic' + primary_resource_id: 'ad-domain-peering' + vars: + domain_name: 'ad.test.hashicorptest.com' + project_id: 'my-peered-project' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_test: true + skip_import_test: true +parameters: + - name: 'peeringId' + type: String + description: "" + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: "Unique name of the peering in this scope including projects and location using the form: projects/{projectId}/locations/global/peerings/{peeringId}." + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: "Resource labels that can contain user-provided metadata" + min_version: 'beta' + immutable: false + - name: 'authorizedNetwork' + type: String + description: "The full names of the Google Compute Engine networks to which the instance is connected. Caller needs to make sure that CIDR subnets do not overlap between networks, else peering creation will fail." + min_version: 'beta' + required: true + immutable: true + - name: 'domainResource' + type: String + description: "Full domain resource path for the Managed AD Domain involved in peering. The resource path should be in the form projects/{projectId}/locations/global/domains/{domainName}" + min_version: 'beta' + required: true + immutable: true + - name: 'status' + type: String + description: "The current state of this Peering." + min_version: 'beta' + url_param_only: true + ignore_read: true + - name: 'statusMessage' + type: String + description: "Additional information about the current status of this peering, if available." + min_version: 'beta' + ignore_read: true diff --git a/mmv1/products/activedirectory/go_product.yaml b/mmv1/products/activedirectory/go_product.yaml new file mode 100644 index 000000000000..7ed90e7f66b8 --- /dev/null +++ b/mmv1/products/activedirectory/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ActiveDirectory' +display_name: 'Managed Microsoft Active Directory' +versions: + - name: 'ga' + base_url: 'https://managedidentities.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://managedidentities.googleapis.com/v1beta1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 2dcd831c565f..f7208713bbf9 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -138,7 +138,7 @@ mutex: <%= object.mutex %> <% unless object.import_format.empty? -%> import_format: <% object.import_format.each do |iformat| -%> - - '<%= iformat %>' + - '<%= iformat %>' <% end -%> <% end -%> <% unless !object.exclude_import -%> @@ -248,12 +248,18 @@ async: <% unless object.collection_url_key == object.name.plural.camelize(:lower) -%> collection_url_key: '<%= object.collection_url_key %>' <% end -%> +<% unless object.instance_variable_get("@identity").nil? -%> +identity: +<% object.instance_variable_get("@identity").each do |id| -%> + - <%= id %> +<% end -%> +<% end -%> <% unless object.nested_query.nil? -%> nested_query: <% unless object.nested_query.keys.nil? -%> keys: <% object.nested_query.keys.each do |key| -%> - - '<%= key -%>' + - <%= key %> <% end -%> <% end -%> <% unless object.nested_query.is_list_of_ids.nil? -%> @@ -379,6 +385,9 @@ custom_code: <% unless object.custom_code.pre_delete.nil? -%> pre_delete: '<%= object.convert_go_file( object.custom_code.pre_delete )%>' <% end -%> +<% unless object.custom_code.custom_delete.nil? -%> + custom_delete: '<%= object.convert_go_file( object.custom_code.custom_delete ) %>' +<% end -%> <% unless object.custom_code.custom_import.nil? -%> custom_import: '<%= object.convert_go_file( object.custom_code.custom_import )%>' <% end -%> From 3f3c75e1fd37c6f4f9d195b0ac06acf6bcea811f Mon Sep 17 00:00:00 2001 From: Simone Ruffilli Date: Wed, 12 Jun 2024 21:24:32 +0200 Subject: [PATCH 128/356] [#18217] Update schedulingHasChangeWithoutReboot to force reboot if min_node_cpus is updated (#10904) --- .../compute/compute_instance_helpers.go.erb | 74 +++++++++---------- 1 file changed, 35 insertions(+), 39 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index c4c3f624529a..fa5cea8fc74d 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -702,45 +702,41 @@ func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { // Terraform doesn't correctly calculate changes on schema.Set, so we do it manually // https://github.com/hashicorp/terraform-plugin-sdk/issues/98 func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { - if !d.HasChange("scheduling") { - // This doesn't work correctly, which is why this method exists - // But it is here for posterity - return false - } - o, n := d.GetChange("scheduling") - oScheduling := o.([]interface{})[0].(map[string]interface{}) - newScheduling := n.([]interface{})[0].(map[string]interface{}) - - if schedulingHasChangeRequiringReboot(d) { - return false - } - - if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { - return true - } - - if oScheduling["preemptible"] != newScheduling["preemptible"] { - return true - } - - if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { - return true - } - - if oScheduling["min_node_cpus"] != newScheduling["min_node_cpus"] { - return true - } - - if oScheduling["provisioning_model"] != newScheduling["provisioning_model"] { - return true - } - - if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { - return true - } - - return false -} + if !d.HasChange("scheduling") { + // This doesn't work correctly, which is why this method exists + // But it is here for posterity + return false + } + o, n := d.GetChange("scheduling") + oScheduling := o.([]interface{})[0].(map[string]interface{}) + newScheduling := n.([]interface{})[0].(map[string]interface{}) + + if schedulingHasChangeRequiringReboot(d) { + return false + } + + if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { + return true + } + + if oScheduling["preemptible"] != newScheduling["preemptible"] { + return true + } + + if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { + return true + } + + if oScheduling["provisioning_model"] != newScheduling["provisioning_model"] { + return true + } + + if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { + return true + } + + return false + } <% unless version == 'ga' -%> func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { From 14ebe14a862cb94aa14d7ac76055597e3c8945cf Mon Sep 17 00:00:00 2001 From: Yanwei Guo Date: Wed, 12 Jun 2024 13:10:51 -0700 Subject: [PATCH 129/356] Add executing job support for `google_cloud_run_v2_job` resource (#10734) --- mmv1/products/cloudrunv2/Job.yaml | 23 ++++ .../examples/cloudrunv2_job_basic.tf.erb | 2 +- .../examples/cloudrunv2_job_emptydir.tf.erb | 2 +- .../examples/cloudrunv2_job_limits.tf.erb | 2 +- .../examples/cloudrunv2_job_run_job.tf.erb | 12 ++ .../examples/cloudrunv2_job_secret.tf.erb | 2 +- .../examples/cloudrunv2_job_sql.tf.erb | 2 +- .../examples/cloudrunv2_job_vpcaccess.tf.erb | 2 +- .../resource_cloud_run_v2_job_test.go.erb | 114 ++++++++++++++++++ 9 files changed, 155 insertions(+), 6 deletions(-) create mode 100644 mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb diff --git a/mmv1/products/cloudrunv2/Job.yaml b/mmv1/products/cloudrunv2/Job.yaml index f28ed56409ce..024f32e0a788 100644 --- a/mmv1/products/cloudrunv2/Job.yaml +++ b/mmv1/products/cloudrunv2/Job.yaml @@ -114,6 +114,13 @@ examples: ])" vars: cloud_run_job_name: 'cloudrun-job' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudrunv2_job_run_job' + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-cloudrun-job%s\", context[\"random_suffix\"\ + ])" + vars: + cloud_run_job_name: 'cloudrun-job' parameters: - !ruby/object:Api::Type::String name: 'location' @@ -226,6 +233,22 @@ properties: name: 'useDefault' description: | If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + - !ruby/object:Api::Type::String + name: 'startExecutionToken' + description: |- + A unique string used as a suffix creating a new execution upon job create or update. The Job will become ready when the execution is successfully started. + The sum of job name and token length must be fewer than 63 characters. + conflicts: + - run_execution_token + min_version: beta + - !ruby/object:Api::Type::String + name: 'runExecutionToken' + description: |- + A unique string used as a suffix creating a new execution upon job create or update. The Job will become ready when the execution is successfully completed. + The sum of job name and token length must be fewer than 63 characters. + conflicts: + - start_execution_token + min_version: beta - !ruby/object:Api::Type::NestedObject name: 'template' required: true diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_basic.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_basic.tf.erb index c37ab87a9f3d..95f66200f790 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_basic.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_basic.tf.erb @@ -5,7 +5,7 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { template { template { containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" } } } diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_emptydir.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_emptydir.tf.erb index 0c51bbcd36df..7095215262ce 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_emptydir.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_emptydir.tf.erb @@ -6,7 +6,7 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { template { template { containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" volume_mounts { name = "empty-dir-volume" mount_path = "/mnt" diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb index 52c1523ac7d7..d68c75898eeb 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_limits.tf.erb @@ -5,7 +5,7 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { template { template { containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" resources { limits = { cpu = "2" diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb new file mode 100644 index 000000000000..8197e5833aeb --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb @@ -0,0 +1,12 @@ +resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['cloud_run_job_name'] %>" + location = "us-central1" + start_execution_token = "start-once-created" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } +} diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_secret.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_secret.tf.erb index 137234fdcea2..102b4ee6dae0 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_secret.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_secret.tf.erb @@ -17,7 +17,7 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { } } containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" volume_mounts { name = "a-volume" mount_path = "/secrets" diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_sql.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_sql.tf.erb index d94410960200..235d596612df 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_sql.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_sql.tf.erb @@ -12,7 +12,7 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { } containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" env { name = "FOO" diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_vpcaccess.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_vpcaccess.tf.erb index 409c48dfef9a..dad5db7c6c9f 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_vpcaccess.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_vpcaccess.tf.erb @@ -5,7 +5,7 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { template { template{ containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" } vpc_access{ connector = google_vpc_access_connector.connector.id diff --git a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb index c46a43fb3e51..b272ca36f435 100644 --- a/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudrunv2/resource_cloud_run_v2_job_test.go.erb @@ -450,4 +450,118 @@ func testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context map[string]interfac } `, context) } + +func TestAccCloudRunV2Job_cloudrunv2JobWithStartExecutionTokenUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context1 := map[string]interface{}{ + "job_name": jobName, + "token": "token1", + } + context2 := map[string]interface{}{ + "job_name": jobName, + "token": "token2", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithStartExecutionToken(context1), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithStartExecutionToken(context2), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithStartExecutionToken(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + start_execution_token = "%{token}" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } + } +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobWithRunExecutionTokenUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context1 := map[string]interface{}{ + "job_name": jobName, + "token": "token1", + } + context2 := map[string]interface{}{ + "job_name": jobName, + "token": "token2", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithRunExecutionToken(context1), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithRunExecutionToken(context2), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithRunExecutionToken(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + run_execution_token = "%{token}" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } + } +`, context) +} <% end -%> From 93513224f345c31d84b80e4d3d9670f0c5b23239 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 12 Jun 2024 16:11:50 -0500 Subject: [PATCH 130/356] go rewrite - script to convert custom templates (#10951) Co-authored-by: Zhenhua Li --- GNUmakefile | 5 + mmv1/main.go | 20 ++- mmv1/template-converter.go | 344 +++++++++++++++++++++++++++++++++++++ 3 files changed, 363 insertions(+), 6 deletions(-) create mode 100644 mmv1/template-converter.go diff --git a/GNUmakefile b/GNUmakefile index 905ed552009c..68ee86de465d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -133,3 +133,8 @@ doctor: ./scripts/doctor .PHONY: mmv1 tpgtools test + +refresh-go: + cd mmv1;\ + bundle exec compiler.rb -e terraform -o $(OUTPUT_PATH) -v $(VERSION) $(mmv1_compile) --go-yaml; \ + go run . --yaml --template \ No newline at end of file diff --git a/mmv1/main.go b/mmv1/main.go index 24795b8ddaf3..0ef3e24c2b30 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -28,28 +28,36 @@ var version = flag.String("version", "", "optional version name. If specified, t var product = flag.String("product", "", "optional product name. If specified, the resources under the specific product will be generated. Otherwise, resources under all products will be generated.") // Example usage: --yaml -var yamlMode = flag.Bool("yaml", false, "strictly copy text over from ruby yaml to go yaml") +var yamlMode = flag.Bool("yaml", false, "copy text over from ruby yaml to go yaml") + +// Example usage: --template +var templateMode = flag.Bool("template", false, "copy templates over from .erb to go .tmpl") func main() { + flag.Parse() if *yamlMode { CopyText("description:") CopyText("note:") - return } - var generateCode = true - var generateDocs = true + if *templateMode { + convertTemplates() + } if outputPath == nil || *outputPath == "" { - log.Fatalf("No output path specified") + log.Printf("No output path specified, exiting") + return } if version == nil || *version == "" { - log.Fatalf("No version specified") + log.Printf("No version specified, assuming ga") + *version = "ga" } + var generateCode = true + var generateDocs = true var productsToGenerate []string var allProducts = false if product == nil || *product == "" { diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go new file mode 100644 index 000000000000..02249d8fd782 --- /dev/null +++ b/mmv1/template-converter.go @@ -0,0 +1,344 @@ +package main + +import ( + "fmt" + "io/ioutil" + "log" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/golang/glog" +) + +func find(root, ext string) []string { + var a []string + + files, err := ioutil.ReadDir(root) + if err != nil { + log.Fatal(err) + } + + for _, file := range files { + if filepath.Ext(file.Name()) == ext { + a = append(a, file.Name()) + } + } + return a +} + +func convertTemplates() { + folders := []string{"examples", "constants", "custom_check_destroy", "custom_create", "custom_delete", "custom_import", "custom_update", "decoders", "encoders", "extra_schema_entry", "post_create", "post_create_failure", "post_delete", "post_import", "post_update", "pre_create", "pre_delete", "pre_read", "pre_update", "state_migrations", "update_encoder", "custom_expand", "custom_flatten", "iam", "iam/example_config_body"} + counts := 0 + for _, folder := range folders { + counts += convertTemplate(folder) + } + log.Printf("%d template files in %d subfolders total", counts, len(folders)) +} + +func convertTemplate(folder string) int { + rubyDir := fmt.Sprintf("templates/terraform/%s", folder) + goDir := fmt.Sprintf("templates/terraform/%s/go", folder) + + if err := os.MkdirAll(goDir, os.ModePerm); err != nil { + glog.Error(fmt.Errorf("error creating directory %v: %v", goDir, err)) + } + + templates := find(rubyDir, ".erb") + log.Printf("%d template files in folder %s", len(templates), folder) + + for _, file := range templates { + data, err := os.ReadFile(path.Join(rubyDir, file)) + if err != nil { + log.Fatalf("Cannot open the file: %v", file) + } + + // Replace {{}} + r, err := regexp.Compile(`{{(.*?)}}`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{"{{"}}$1{{"}}"}}`)) + + // Replace primary_resource_id + r, err = regexp.Compile(`<%=\s*ctx\[:primary_resource_id\]\s*-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("{{$.PrimaryResourceId}}")) + + // Replace vars + r, err = regexp.Compile(`<%=\s*ctx\[:vars\]\[('|")([a-zA-Z0-9_-]+)('|")\]\s*-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{index $.Vars "$2"}}`)) + + // Replace test_env_vars + r, err = regexp.Compile(`<%=\s*ctx\[:test_env_vars\]\[('|")([a-zA-Z0-9_-]+)('|")\]\s*-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{index $.TestEnvVars "$2"}}`)) + + // Replace <% unless compiler == "terraformgoogleconversion-codegen" -%> + r, err = regexp.Compile(`<% unless compiler == "terraformgoogleconversion-codegen" -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if ne $.Compiler "terraformgoogleconversion-codegen" }}`)) + + // Replace <% unless version == 'ga' -%> + r, err = regexp.Compile(`<% unless version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if ne $.TargetVersionName "ga" }}`)) + + // Replace <% if version == 'ga' -%> + r, err = regexp.Compile(`<% if version == 'ga' -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if eq $.TargetVersionName "ga" }}`)) + + // Replace <% else -%> + r, err = regexp.Compile(`<% else -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- else }}`)) + + // Replace <%= object.name -%> + r, err = regexp.Compile(`<%= object.name -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.Name}}`)) + + // Replace <%= object.resource_name -%> + r, err = regexp.Compile(`<%= object.resource_name -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.ResourceName}}`)) + + // Replace <%=object.self_link_uri-%> + r, err = regexp.Compile(`<%=object.self_link_uri-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.SelfLinkUri}}`)) + + // Replace <%=object.create_uri-%> + r, err = regexp.Compile(`<%=object.create_uri-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.CreateUri}}`)) + + // Replace <%=object.base_url-%> + r, err = regexp.Compile(`<%=object.base_url-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.BaseUrl}}`)) + + // Replace <%=object.__product.name-%> + r, err = regexp.Compile(`<%=object.__product.name-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.ProductMetadata.Name}}`)) + + // Replace <% if object.name == 'Disk' -%> + r, err = regexp.Compile(`<% if object.name == 'Disk' -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if eq $.Name "Disk" }}`)) + + // Replace <% elsif object.name == 'RegionDisk' -%> + r, err = regexp.Compile(`<% elsif object.name == 'RegionDisk' -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- else if eq $.Name "RegionDisk" }}`)) + + // Replace <% if object.properties.any?{ |p| p.name == "labels" } -%> + r, err = regexp.Compile(`<% if object\.properties.any\?\{ \|p\| p\.name == "labels" \} -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.HasLabelsField }}`)) + + // Replace <% if object.error_retry_predicates -%> + r, err = regexp.Compile(`<% if object.error_retry_predicates -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.ErrorRetryPredicates }}`)) + + // Replace <% if object.error_abort_predicates -%> + r, err = regexp.Compile(`<% if object.error_abort_predicates -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.ErrorAbortPredicates }}`)) + + // Replace <%= object.error_retry_predicates.join(',') -%> + r, err = regexp.Compile(`<%= object.error_retry_predicates.join\(','\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(` {{- join $.ErrorRetryPredicates "," -}} `)) + + // Replace <%= object.error_abort_predicates.join(',') -%> + r, err = regexp.Compile(`<%= object.error_abort_predicates.join\(','\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(` {{- join $.ErrorAbortPredicates "," -}} `)) + + // Replace <%= object.name.camelize(:lower) -%> + r, err = regexp.Compile(`<%= object.name.camelize\(:lower\) -?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{camelize $.Name "lower"}}`)) + + // Replace <%= object.name.plural.camelize(:lower) -%> + r, err = regexp.Compile(`<%= object.name.plural.camelize\(:lower\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{camelize (plural $.Name) "lower"}}`)) + + // Replace <%= id_format(object) -%> + r, err = regexp.Compile(`<%= id_format\(object\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.GetIdFormat}}`)) + + // Replace <%= prefix -%> + r, err = regexp.Compile(`<%= prefix -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.GetPrefix}}`)) + + // Replace <%= titlelize_property(property) -%> + r, err = regexp.Compile(`<%= titlelize_property\(property\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.TitlelizeProperty}}`)) + + // Replace <%= prop_path -%> + r, err = regexp.Compile(`<%= prop_path -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.PropPath}}`)) + + // Replace <%= go_literal(property.default_value) -%> + r, err = regexp.Compile(`<%= go_literal\(property.default_value\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.DefaultValue}}`)) + + // Replace <%= build_expand_resource_ref('v.(string)', property, pwd) %> + r, err = regexp.Compile(`<%= build_expand_resource_ref\('v\.\(string\)', property, pwd\) %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ template "expandResourceRef" dict "VarName" "v.(string)" "ResourceRef" $.ResourceRef "ResourceType" $.ResourceType}}`)) + + // Replace <%= build_expand_resource_ref('raw.(string)', property.item_type, pwd) %> + r, err = regexp.Compile(`<%= build_expand_resource_ref\('raw\.\(string\)', property\.item_type, pwd\) %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ template "expandResourceRef" dict "VarName" "raw.(string)" "ResourceRef" $.ItemType.ResourceRef "ResourceType" $.ItemType.ResourceType}}`)) + + // Replace <%- if property.is_a?(Api::Type::Integer) -%> + r, err = regexp.Compile(`<%- if property.is_a\?\(Api::Type::Integer\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.IsA "Integer" }}`)) + + // Replace <%= property.name.underscore -%> + r, err = regexp.Compile(`<%= property.name.underscore -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{underscore $.Name}}`)) + + // Replace <%= resource_type -%> + r, err = regexp.Compile(`<%= resource_type -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.ResourceType}}`)) + + // Replace <% if property.is_set -%> + r, err = regexp.Compile(`<% if property.is_set -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.IsSet }}`)) + + // Replace <% end -%> + r, err = regexp.Compile(`<%[\s-]*end[\s-]*%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- end }}`)) + + copyRight := `{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}}` + // Replace copyright + r, err = regexp.Compile(`(?s)<%[-\s#]*[tT]he license inside this.*?limitations under the License..*?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(copyRight)) + + // Replace comments + r, err = regexp.Compile(`(?s)<%#-?\s?(.*?)\s?-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- /* $1 */}}`)) + + // Replace .erb + r, err = regexp.Compile(`\.erb`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`.tmpl`)) + + goTemplate := strings.Replace(file, "erb", "tmpl", 1) + err = ioutil.WriteFile(path.Join(goDir, goTemplate), data, 0644) + if err != nil { + glog.Exit(err) + } + } + + return len(templates) +} From 2a923f68059613893df1a567df95fc002b119e91 Mon Sep 17 00:00:00 2001 From: Yang Du <107223786+yangspirit@users.noreply.github.com> Date: Wed, 12 Jun 2024 14:14:02 -0700 Subject: [PATCH 131/356] add redis self service update support (#10922) --- mmv1/products/redis/Instance.yaml | 5 ++ .../redis/resource_redis_instance_test.go | 54 +++++++++++++++++++ 2 files changed, 59 insertions(+) diff --git a/mmv1/products/redis/Instance.yaml b/mmv1/products/redis/Instance.yaml index 14c7b8e5ec85..a64e6a233bc9 100644 --- a/mmv1/products/redis/Instance.yaml +++ b/mmv1/products/redis/Instance.yaml @@ -399,6 +399,11 @@ properties: can not go beyond, including reschedule. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + - !ruby/object:Api::Type::String + name: maintenanceVersion + description: The self service update maintenance version. + required: false + default_from_api: true - !ruby/object:Api::Type::Integer name: memorySizeGb description: Redis memory size in GiB. diff --git a/mmv1/third_party/terraform/services/redis/resource_redis_instance_test.go b/mmv1/third_party/terraform/services/redis/resource_redis_instance_test.go index dd5579c54cf2..79daba4208f5 100644 --- a/mmv1/third_party/terraform/services/redis/resource_redis_instance_test.go +++ b/mmv1/third_party/terraform/services/redis/resource_redis_instance_test.go @@ -255,6 +255,40 @@ func TestAccRedisInstance_redisInstanceAuthEnabled(t *testing.T) { }) } +func TestAccRedisInstance_selfServiceUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRedisInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRedisInstance_selfServiceUpdate20240411_00_00(context), + }, + { + ResourceName: "google_redis_instance.cache", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + { + Config: testAccRedisInstance_selfServiceUpdate20240503_00_00(context), + }, + { + ResourceName: "google_redis_instance.cache", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + func TestAccRedisInstance_downgradeRedisVersion(t *testing.T) { t.Parallel() @@ -374,6 +408,26 @@ resource "google_redis_instance" "cache" { `, context) } +func testAccRedisInstance_selfServiceUpdate20240411_00_00(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_redis_instance" "cache" { + name = "tf-test-memory-cache%{random_suffix}" + memory_size_gb = 1 + maintenance_version = "20240411_00_00" +} +`, context) +} + +func testAccRedisInstance_selfServiceUpdate20240503_00_00(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_redis_instance" "cache" { + name = "tf-test-memory-cache%{random_suffix}" + memory_size_gb = 1 + maintenance_version = "20240503_00_00" +} +`, context) +} + func testAccRedisInstance_redis5(name string) string { return fmt.Sprintf(` resource "google_redis_instance" "test" { From 6742c3a25b388ebbd567b462693c4855e5c8c179 Mon Sep 17 00:00:00 2001 From: brandon-m-hansen <103201169+brandon-m-hansen@users.noreply.github.com> Date: Wed, 12 Jun 2024 21:34:32 +0000 Subject: [PATCH 132/356] Add autoscaling to DPMS terraform (#10522) --- mmv1/products/metastore/Service.yaml | 48 +++++++++++++++++++ ...vice_autoscaling_max_scaling_factor.tf.erb | 21 ++++++++ ...oscaling_min_and_max_scaling_factor.tf.erb | 22 +++++++++ ...vice_autoscaling_min_scaling_factor.tf.erb | 21 ++++++++ ...service_autoscaling_no_limit_config.tf.erb | 18 +++++++ ...ice_autoscaling_max_scaling_factor.tf.tmpl | 21 ++++++++ ...scaling_min_and_max_scaling_factor.tf.tmpl | 22 +++++++++ ...ice_autoscaling_min_scaling_factor.tf.tmpl | 21 ++++++++ ...ervice_autoscaling_no_limit_config.tf.tmpl | 18 +++++++ 9 files changed, 212 insertions(+) create mode 100644 mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.erb create mode 100644 mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.erb create mode 100644 mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.erb create mode 100644 mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb create mode 100644 mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_no_limit_config.tf.tmpl diff --git a/mmv1/products/metastore/Service.yaml b/mmv1/products/metastore/Service.yaml index e85d012ae75b..764d61d23cfe 100644 --- a/mmv1/products/metastore/Service.yaml +++ b/mmv1/products/metastore/Service.yaml @@ -143,6 +143,26 @@ examples: primary_resource_id: 'backup' vars: metastore_service_name: 'backup' + - !ruby/object:Provider::Terraform::Examples + name: 'dataproc_metastore_service_autoscaling_max_scaling_factor' + primary_resource_id: 'test_resource' + vars: + metastore_service_name: 'test-service' + - !ruby/object:Provider::Terraform::Examples + name: 'dataproc_metastore_service_autoscaling_min_and_max_scaling_factor' + primary_resource_id: 'test_resource' + vars: + metastore_service_name: 'test-service' + - !ruby/object:Provider::Terraform::Examples + name: 'dataproc_metastore_service_autoscaling_min_scaling_factor' + primary_resource_id: 'test_resource' + vars: + metastore_service_name: 'test-service' + - !ruby/object:Provider::Terraform::Examples + name: 'dataproc_metastore_service_autoscaling_no_limit_config' + primary_resource_id: 'test_resource' + vars: + metastore_service_name: 'test-service' parameters: - !ruby/object:Api::Type::String name: 'serviceId' @@ -228,6 +248,7 @@ properties: exactly_one_of: - scaling_config.0.instance_size - scaling_config.0.scaling_factor + - scaling_config.0.autoscaling_config conflicts: - tier values: @@ -241,6 +262,33 @@ properties: description: | Scaling factor, in increments of 0.1 for values less than 1.0, and increments of 1.0 for values greater than 1.0. required: false + - !ruby/object:Api::Type::NestedObject + name: 'autoscalingConfig' + min_version: beta + description: | + Represents the autoscaling configuration of a metastore service. + required: false + properties: + - !ruby/object:Api::Type::Boolean + name: 'autoscalingEnabled' + description: | + Defines whether autoscaling is enabled. The default value is false. + - !ruby/object:Api::Type::NestedObject + name: 'limitConfig' + default_from_api: true + description: | + Represents the limit configuration of a metastore service. + properties: + - !ruby/object:Api::Type::Double + name: 'minScalingFactor' + description: | + The minimum scaling factor that the service will autoscale to. The default value is 0.1. + default_from_api: true + - !ruby/object:Api::Type::Double + name: 'maxScalingFactor' + description: | + The maximum scaling factor that the service will autoscale to. The default value is 6.0. + default_from_api: true - !ruby/object:Api::Type::NestedObject name: 'scheduledBackup' description: | diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.erb new file mode 100644 index 000000000000..92df03031c00 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.erb @@ -0,0 +1,21 @@ +resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { + service_id = "<%= ctx[:vars]['metastore_service_name'] %>" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + limit_config { + max_scaling_factor = 1.0 + } + } + } +} diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.erb new file mode 100644 index 000000000000..3601bf231716 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.erb @@ -0,0 +1,22 @@ +resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { + service_id = "<%= ctx[:vars]['metastore_service_name'] %>" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + limit_config { + min_scaling_factor = 0.1 + max_scaling_factor = 1.0 + } + } + } +} diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.erb new file mode 100644 index 000000000000..27268dda15d0 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.erb @@ -0,0 +1,21 @@ +resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { + service_id = "<%= ctx[:vars]['metastore_service_name'] %>" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + limit_config { + min_scaling_factor = 0.1 + } + } + } +} diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb new file mode 100644 index 000000000000..069df1b6e767 --- /dev/null +++ b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb @@ -0,0 +1,18 @@ +resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { + service_id = "<%= ctx[:vars]['metastore_service_name'] %>" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + } + } +} diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.tmpl new file mode 100644 index 000000000000..be032f873ff1 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_max_scaling_factor.tf.tmpl @@ -0,0 +1,21 @@ +resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { + service_id = "{{index $.Vars "metastore_service_name"}}" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + limit_config { + max_scaling_factor = 1.0 + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.tmpl new file mode 100644 index 000000000000..9cc583eced99 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_and_max_scaling_factor.tf.tmpl @@ -0,0 +1,22 @@ +resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { + service_id = "{{index $.Vars "metastore_service_name"}}" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + limit_config { + min_scaling_factor = 0.1 + max_scaling_factor = 1.0 + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.tmpl new file mode 100644 index 000000000000..60b281b9aaf9 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_min_scaling_factor.tf.tmpl @@ -0,0 +1,21 @@ +resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { + service_id = "{{index $.Vars "metastore_service_name"}}" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + limit_config { + min_scaling_factor = 0.1 + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_no_limit_config.tf.tmpl b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_no_limit_config.tf.tmpl new file mode 100644 index 000000000000..cd2307db9938 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/dataproc_metastore_service_autoscaling_no_limit_config.tf.tmpl @@ -0,0 +1,18 @@ +resource "google_dataproc_metastore_service" "{{$.PrimaryResourceId}}" { + service_id = "{{index $.Vars "metastore_service_name"}}" + location = "us-central1" + + # DPMS 2 requires SPANNER database type, and does not require + # a maintenance window. + database_type = "SPANNER" + + hive_metastore_config { + version = "3.1.2" + } + + scaling_config { + autoscaling_config { + autoscaling_enabled = true + } + } +} From 9f14ec08fbc1c3d2c14e359fbb9078bc5bbc4eb7 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 12 Jun 2024 15:10:38 -0700 Subject: [PATCH 133/356] Go rewrite remaining templates (#10953) --- mmv1/api/resource.go | 6 +++ mmv1/provider/template_data.go | 1 + mmv1/templates/terraform/resource.go.tmpl | 6 +++ mmv1/templates/terraform/self_link_query.erb | 37 ----------------- .../unordered_list_customize_diff.go.tmpl | 41 +++++++++++++++++++ 5 files changed, 54 insertions(+), 37 deletions(-) delete mode 100644 mmv1/templates/terraform/self_link_query.erb create mode 100644 mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 97b3f7a7e4fa..f5abc6b7800d 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -449,6 +449,12 @@ func (r Resource) IsSettableProperty(t *Type) bool { return slices.Contains(r.SettableProperties(), t) } +func (r Resource) UnorderedListProperties() []*Type { + return google.Select(r.SettableProperties(), func(t *Type) bool { + return t.UnorderedList + }) +} + // Properties that will be returned in the API body // def gettable_properties diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index fcd04a8903f4..dfbab55293d6 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -78,6 +78,7 @@ func (td *TemplateData) GenerateResourceFile(filePath string, resource api.Resou "templates/terraform/expand_property_method.go.tmpl", "templates/terraform/update_mask.go.tmpl", "templates/terraform/nested_query.go.tmpl", + "templates/terraform/unordered_list_customize_diff.go.tmpl", } td.GenerateFile(filePath, templatePath, resource, true, templates...) } diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 6d8ca5368bfa..f8a341a1c3fd 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -176,6 +176,12 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{end}} {{- end}} +{{- range $prop := $.UnorderedListProperties }} +func resource{{ $.ResourceName }}{{ camelize $prop.Name "upper" }}SetStyleDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { +{{template "UnorderedListCustomizeDiff" $prop}} +} +{{- end}} + func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{}) error { {{- if and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "Create") -}} var project string diff --git a/mmv1/templates/terraform/self_link_query.erb b/mmv1/templates/terraform/self_link_query.erb deleted file mode 100644 index c7ed618038d5..000000000000 --- a/mmv1/templates/terraform/self_link_query.erb +++ /dev/null @@ -1,37 +0,0 @@ - // Extract the object we're interested in from the list response. - itemsList_ := res["<%= object.self_link_query.items -%>"] - var itemsList []interface{} - if itemsList_ != nil { - itemsList = itemsList_.([]interface{}) - } - listObj := make([]map[string]interface{}, len(itemsList)) - for i, item := range itemsList { - listObj[i] = item.(map[string]interface{}) - } - res = nil - for _, item := range listObj { - <% object.identity.each do |prop| -%> - <% if settable_properties.include?(prop) -%> - this<%= titlelize_property(prop) -%>, err := expand<%= object.resource_name -%><%= titlelize_property(prop) -%>(d.Get("<%= prop.name.underscore -%>"), d, config) - if err != nil { - return err - } - <% else -%> - this<%= titlelize_property(prop) -%> := d.Get("<%= prop.name.underscore -%>") - <% end -%> - that<%= titlelize_property(prop) -%> := flatten<%= object.resource_name -%><%= titlelize_property(prop) -%>(item["<%= prop.api_name -%>"], d, config) - log.Printf("[DEBUG] Checking equality of %#v, %#v", that<%= titlelize_property(prop) -%>, this<%= titlelize_property(prop) -%>) - if !reflect.DeepEqual(that<%= titlelize_property(prop) -%>, this<%= titlelize_property(prop) -%>) { - continue - } - <% end -%> - res = item - break - } - if res == nil { - // Object isn't there any more - remove it from the state. - log.Printf("[DEBUG] Removing <%= object.resource_name -%> because it couldn't be matched.") - d.SetId("") - return nil - } - diff --git a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl new file mode 100644 index 000000000000..eb42e9f0f556 --- /dev/null +++ b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl @@ -0,0 +1,41 @@ +{{- define "UnorderedListCustomizeDiff" }} +keys := diff.GetChangedKeysPrefix({{ underscore $.Name }}) +if len(keys) == 0 { + return nil +} +oldCount, newCount := diff.GetChange("{{ underscore $.Name }}.#") +var count int +// There could be duplicates - worth continuing even if the counts are unequal. +if oldCount.(int) < newCount.(int) { + count = newCount.(int) +} else { + count = oldCount.(int) +} + +if count < 1 { + return nil +} +old := make([]interface{}, count) +new := make([]interface{}, count) +for i := 0; i < count; i++ { + o, n := diff.GetChange(fmt.Sprintf("{{ underscore $.Name }}.%d", i)) + + if o != nil { + old = append(old, o) + } + if n != nil { + new = append(new, n) + } +} + +oldSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), old) +newSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), new) + +if oldSet.Equal(newSet) { + if err := diff.Clear({{ underscore $.Name }}); err != nil { + return err + } +} + +return nil +{{- end }} \ No newline at end of file From 40a0f1d6b14b154c81215e2b2840ae242548f74c Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Thu, 13 Jun 2024 09:44:51 -0400 Subject: [PATCH 134/356] Revert extraneous whitespace changes from #10904 (#10958) --- .../compute/compute_instance_helpers.go.erb | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index fa5cea8fc74d..297a10389cda 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -702,41 +702,41 @@ func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { // Terraform doesn't correctly calculate changes on schema.Set, so we do it manually // https://github.com/hashicorp/terraform-plugin-sdk/issues/98 func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { - if !d.HasChange("scheduling") { - // This doesn't work correctly, which is why this method exists - // But it is here for posterity - return false - } - o, n := d.GetChange("scheduling") - oScheduling := o.([]interface{})[0].(map[string]interface{}) - newScheduling := n.([]interface{})[0].(map[string]interface{}) - - if schedulingHasChangeRequiringReboot(d) { - return false - } - - if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { - return true - } - - if oScheduling["preemptible"] != newScheduling["preemptible"] { - return true - } - - if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { - return true - } - - if oScheduling["provisioning_model"] != newScheduling["provisioning_model"] { - return true - } - - if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { - return true - } - - return false - } + if !d.HasChange("scheduling") { + // This doesn't work correctly, which is why this method exists + // But it is here for posterity + return false + } + o, n := d.GetChange("scheduling") + oScheduling := o.([]interface{})[0].(map[string]interface{}) + newScheduling := n.([]interface{})[0].(map[string]interface{}) + + if schedulingHasChangeRequiringReboot(d) { + return false + } + + if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { + return true + } + + if oScheduling["preemptible"] != newScheduling["preemptible"] { + return true + } + + if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { + return true + } + + if oScheduling["provisioning_model"] != newScheduling["provisioning_model"] { + return true + } + + if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { + return true + } + + return false +} <% unless version == 'ga' -%> func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { From 6504cb3d8c85e1cc20b4bd2a913bf80a6032aa77 Mon Sep 17 00:00:00 2001 From: dvfons <167889585+dvfons@users.noreply.github.com> Date: Thu, 13 Jun 2024 15:00:15 +0000 Subject: [PATCH 135/356] add support for 'apikeys.googleapis.com/Key' to TGC (#10788) --- mmv1/provider/terraform_tgc.rb | 4 +- mmv1/templates/tgc/resource_converters.go.erb | 1 + mmv1/third_party/tgc/apikeys_key.go | 244 ++++++++++++++++++ .../tgc/tests/data/example_apikeys_key.json | 30 +++ .../tgc/tests/data/example_apikeys_key.tf | 33 +++ 5 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 mmv1/third_party/tgc/apikeys_key.go create mode 100644 mmv1/third_party/tgc/tests/data/example_apikeys_key.json create mode 100644 mmv1/third_party/tgc/tests/data/example_apikeys_key.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 2ea63de82a4a..ad518c969eac 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -322,7 +322,9 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/commitment.go', 'third_party/tgc/commitment.go'], ['converters/google/resources/firebase_project.go', - 'third_party/tgc/firebase_project.go'] + 'third_party/tgc/firebase_project.go'], + ['converters/google/resources/apikeys_key.go', + 'third_party/tgc/apikeys_key.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index 5f1d0474b3d3..78ee459ca3f0 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -38,6 +38,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_artifact_registry_repository": {artifactregistry.ResourceConverterArtifactRegistryRepository()}, "google_alloydb_cluster": {alloydb.ResourceConverterAlloydbCluster()}, "google_alloydb_instance": {alloydb.ResourceConverterAlloydbInstance()}, + "google_apikeys_key": {resourceConverterApikeysKey()}, "google_compute_address": {compute.ResourceConverterComputeAddress()}, "google_compute_autoscaler": {compute.ResourceConverterComputeAutoscaler()}, "google_compute_firewall": {compute.ResourceConverterComputeFirewall()}, diff --git a/mmv1/third_party/tgc/apikeys_key.go b/mmv1/third_party/tgc/apikeys_key.go new file mode 100644 index 000000000000..33fbb101640d --- /dev/null +++ b/mmv1/third_party/tgc/apikeys_key.go @@ -0,0 +1,244 @@ +package google + +import ( + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const ApikeysKeyAssetType string = "apikeys.googleapis.com/Key" + +func resourceConverterApikeysKey() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: ApikeysKeyAssetType, + Convert: GetApikeysKeyCaiObject, + } +} + +func GetApikeysKeyCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//apikeys.googleapis.com/v2/projects/{{project}}/locations/global/keys/{{key}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetApikeysKeyApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: ApikeysKeyAssetType, + Resource: &cai.AssetResource{ + Version: "v2", + DiscoveryDocumentURI: "https://apikeys.googleapis.com/$discovery/rest?version=v2", + DiscoveryName: "Apikeyskey", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetApikeysKeyApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + uidProp, err := expandApikeysKeyUid(d.Get("uid"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("uid"); !tpgresource.IsEmptyValue(reflect.ValueOf(uidProp)) && (ok || !reflect.DeepEqual(v, uidProp)) { + obj["uid"] = uidProp + } + + displayNameProp, err := expandApikeysKeyDisplayName(d.Get("displayName"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("displayName"); !tpgresource.IsEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + + keyStringProp, err := expandApikeysKeyKeyString(d.Get("keyString"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("keyString"); !tpgresource.IsEmptyValue(reflect.ValueOf(keyStringProp)) && (ok || !reflect.DeepEqual(v, keyStringProp)) { + obj["keyString"] = keyStringProp + } + + createTimeProp, err := expandApikeysKeyCreateTime(d.Get("createTime"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("createTime"); !tpgresource.IsEmptyValue(reflect.ValueOf(createTimeProp)) && (ok || !reflect.DeepEqual(v, createTimeProp)) { + obj["createTime"] = createTimeProp + } + + updateTimeProp, err := expandApikeysKeyUpdateTime(d.Get("updateTime"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("updateTime"); !tpgresource.IsEmptyValue(reflect.ValueOf(updateTimeProp)) && (ok || !reflect.DeepEqual(v, updateTimeProp)) { + obj["updateTime"] = updateTimeProp + } + + deleteTimeProp, err := expandApikeysKeyDeleteTime(d.Get("deleteTime"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("deleteTime"); !tpgresource.IsEmptyValue(reflect.ValueOf(deleteTimeProp)) && (ok || !reflect.DeepEqual(v, deleteTimeProp)) { + obj["deleteTime"] = deleteTimeProp + } + + restrictionsProp, err := expandApikeysKeyRestrictions(d.Get("restrictions"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("restrictions"); !tpgresource.IsEmptyValue(reflect.ValueOf(restrictionsProp)) && (ok || !reflect.DeepEqual(v, restrictionsProp)) { + obj["restrictions"] = restrictionsProp + } + + etagProp, err := expandApikeysKeyDEtag(d.Get("etag"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("etag"); !tpgresource.IsEmptyValue(reflect.ValueOf(etagProp)) && (ok || !reflect.DeepEqual(v, etagProp)) { + obj["etag"] = etagProp + } + + return obj, nil +} + +func expandApikeysKeyUid(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyDisplayName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyKeyString(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyUpdateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyDeleteTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyRestrictions(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAndroidKeyRestrictions, err := expandApikeysKeyAndroidKeyRestriction(original["android_key_restrictions"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAndroidKeyRestrictions); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["android_key_restrictions"] = transformedAndroidKeyRestrictions + } + + transformedApiTargets, err := expandApikeysKeyApiTargets(original["api_targets"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApiTargets); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["api_targets"] = transformedApiTargets + } + + return transformed, nil +} +func expandApikeysKeyAndroidKeyRestriction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedAllowedServices, err := expandApikeysKeyAllowedApplications(original["allowed_applications"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAllowedServices); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["allowed_applications"] = transformedAllowedServices + } + + return transformed, nil +} + +func expandApikeysKeyAllowedApplications(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPackageName, err := expandApikeysKeyPackageName(original["package_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPackageName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["package_name"] = transformedPackageName + } + + transformedSha1Fingerprint, err := expandApikeysKeySha1Fingerprint(original["sha1_fingerprint"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha1Fingerprint); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha1_fingerprint"] = transformedSha1Fingerprint + } + + return transformed, nil +} + +func expandApikeysKeyPackageName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeySha1Fingerprint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + + +func expandApikeysKeyApiTargets(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedService, err := expandApikeysKeyService(original["service"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedService); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["service"] = transformedService + } + + transformedMethods, err := expandApikeysKeyMethods(original["methods"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMethods); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["methods"] = transformedMethods + } + + return transformed, nil +} + +func expandApikeysKeyService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandApikeysKeyMethods(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return convertInterfaceToStringArray(v.([]interface{})), nil +} + + +func expandApikeysKeyDEtag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_apikeys_key.json b/mmv1/third_party/tgc/tests/data/example_apikeys_key.json new file mode 100644 index 000000000000..2cba56941844 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_apikeys_key.json @@ -0,0 +1,30 @@ +[ + { + "name": "//apikeys.googleapis.com/v2/projects/{{.Provider.project}}/locations/global/keys/placeholder-JHfWqOQi", + "asset_type": "apikeys.googleapis.com/Key", + "resource": { + "version": "v2", + "discovery_document_uri": "https://apikeys.googleapis.com/$discovery/rest?version=v2", + "discovery_name": "Apikeyskey", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "restrictions": { + "android_key_restrictions": { + "allowed_applications": { + "package_name": "com.example.app123", + "sha1_fingerprint": "1699466a142d4682a5f91b50fdf400f2358e2b0b" + } + }, + "api_targets": { + "methods": [ + "GET" + ], + "service": "translate.googleapis.com" + } + } + } + }, + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] + } +] diff --git a/mmv1/third_party/tgc/tests/data/example_apikeys_key.tf b/mmv1/third_party/tgc/tests/data/example_apikeys_key.tf new file mode 100644 index 000000000000..b1bc934f35ac --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_apikeys_key.tf @@ -0,0 +1,33 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + + +resource "google_apikeys_key" "primary" { + name = "key" + display_name = "sample-key" + project = "{{.Provider.project}}" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "com.example.app123" + sha1_fingerprint = "1699466a142d4682a5f91b50fdf400f2358e2b0b" + } + } + + api_targets { + service = "translate.googleapis.com" + methods = ["GET"] + } + } +} From 5e23d34e0912807f0b7ba28cb8102e4fcd667222 Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Thu, 13 Jun 2024 15:02:36 +0000 Subject: [PATCH 136/356] Add support for ```composer.googleapis.com/Environment``` to TGC (#10872) --- mmv1/provider/terraform_tgc.rb | 2 + mmv1/templates/tgc/resource_converters.go.erb | 1 + mmv1/third_party/tgc/composer_environment.go | 432 ++++++++++++++++++ .../example_google_composer_environment.json | 132 ++++++ .../example_google_composer_environment.tf | 82 ++++ 5 files changed, 649 insertions(+) create mode 100644 mmv1/third_party/tgc/composer_environment.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_composer_environment.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_composer_environment.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index ad518c969eac..4e1c972b6f48 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -319,6 +319,8 @@ def copy_common_files(output_folder, generate_code, _generate_docs) 'third_party/tgc/compute_target_pool.go'], ['converters/google/resources/dataproc_cluster.go', 'third_party/tgc/dataproc_cluster.go'], + ['converters/google/resources/composer_environment.go', + 'third_party/tgc/composer_environment.go'], ['converters/google/resources/commitment.go', 'third_party/tgc/commitment.go'], ['converters/google/resources/firebase_project.go', diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index 78ee459ca3f0..d11367627c50 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -66,6 +66,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_compute_target_https_proxy": {compute.ResourceConverterComputeTargetHttpsProxy()}, "google_compute_target_ssl_proxy": {compute.ResourceConverterComputeTargetSslProxy()}, "google_compute_target_pool": {resourceConverterComputeTargetPool()}, + "google_composer_environment": {resourceConverterComposerEnvironment()}, "google_compute_region_commitment": {resourceConverterCommitment()}, "google_dataflow_job": {resourceDataflowJob()}, "google_dataproc_autoscaling_policy": {dataproc.ResourceConverterDataprocAutoscalingPolicy()}, diff --git a/mmv1/third_party/tgc/composer_environment.go b/mmv1/third_party/tgc/composer_environment.go new file mode 100644 index 000000000000..fff555854d56 --- /dev/null +++ b/mmv1/third_party/tgc/composer_environment.go @@ -0,0 +1,432 @@ +package google + +import ( + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const ComposerEnvironmentAssetType string = "composer.googleapis.com/Environment" + +func resourceConverterComposerEnvironment() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: ComposerEnvironmentAssetType, + Convert: GetComposerEnvironmentCaiObject, + } +} + +func GetComposerEnvironmentCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//compute.googleapis.com/projects/{{project}}/locations/{{location}}/environments/{{name}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetComposerEnvironmentApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: ComposerEnvironmentAssetType, + Resource: &cai.AssetResource{ + Version: "v1", + DiscoveryDocumentURI: "https://composer.googleapis.com/$discovery/rest?version=v1", + DiscoveryName: "Environment", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetComposerEnvironmentApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + nameProp, err := expandComputeEnvironmentName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + labelsProp, err := expandComputeEnvironmentLabels(d.Get("labels"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + regionProp, err := expandComputeEnvironmentRegion(d.Get("region"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("region"); !tpgresource.IsEmptyValue(reflect.ValueOf(regionProp)) && (ok || !reflect.DeepEqual(v, regionProp)) { + obj["region"] = regionProp + } + + configProp, err := expandComputeEnvironmentConfig(d.Get("config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("config"); !tpgresource.IsEmptyValue(reflect.ValueOf(configProp)) && (ok || !reflect.DeepEqual(v, configProp)) { + obj["config"] = configProp + } + + storageConfigProp, err := expandComputeEnvironmentStorageConfig(d.Get("storage_config"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("storage_config"); !tpgresource.IsEmptyValue(reflect.ValueOf(storageConfigProp)) && (ok || !reflect.DeepEqual(v, storageConfigProp)) { + obj["storageConfig"] = storageConfigProp + } + + return obj, nil +} + +func expandComputeEnvironmentName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentLabels(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandComputeEnvironmentRegion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentStorageConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedNodeCount, err := expandComputeEnvironmentConfigNodeCount(original["node_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeCount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeCount"] = transformedNodeCount + } + + transformedNodeConfig, err := expandComputeEnvironmentConfigNodeConfig(original["node_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNodeConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["nodeConfig"] = transformedNodeConfig + } + + transformedRecoveryConfig, err := expandComputeEnvironmentConfigRecoveryConfig(original["recovery_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRecoveryConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["recoveryConfig"] = transformedRecoveryConfig + } + + transformedSoftwareConfig, err := expandComputeEnvironmentConfigSoftwareConfig(original["software_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSoftwareConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["softwareConfig"] = transformedSoftwareConfig + } + + transformedPrivateEnvironmentConfig, err := expandComputeEnvironmentConfigPrivateEnvironmentConfig(original["private_environment_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPrivateEnvironmentConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["privateEnvironmentConfig"] = transformedPrivateEnvironmentConfig + } + + transformedWebServerNetworkAccessControl, err := expandComputeEnvironmentConfigWebServerNetworkAccessControl(original["web_server_network_access_control"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWebServerNetworkAccessControl); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["webServerNetworkAccessControl"] = transformedWebServerNetworkAccessControl + } + + transformedDatabaseConfig, err := expandComputeEnvironmentConfigDatabaseConfig(original["database_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDatabaseConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["databaseConfig"] = transformedDatabaseConfig + } + + transformedWebServerConfig, err := expandComputeEnvironmentConfigWebServerConfig(original["web_server_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWebServerConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["webServerConfig"] = transformedWebServerConfig + } + + transformedEncryptionConfig, err := expandComputeEnvironmentConfigEncryptionConfig(original["encryption_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["encryptionConfig"] = transformedEncryptionConfig + } + + transformedMaintenanceWindow, err := expandComputeEnvironmentConfigMaintenanceWindow(original["maintenance_window"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaintenanceWindow); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maintenanceWindow"] = transformedMaintenanceWindow + } + + transformedWorkloadsConfig, err := expandComputeEnvironmentConfigWorkloadsConfig(original["workloads_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkloadsConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["workloadsConfig"] = transformedWorkloadsConfig + } + + transformedDataRetentionConfig, err := expandComputeEnvironmentConfigDataRetentionConfig(original["data_retention_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDataRetentionConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["dataRetentionConfig"] = transformedDataRetentionConfig + } + + transformedMasterAuthorizedNetworksConfig, err := expandComputeEnvironmentConfigMasterAuthorizedNetworksConfig(original["master_authorized_networks_config"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMasterAuthorizedNetworksConfig); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["masterAuthorizedNetworksConfig"] = transformedMasterAuthorizedNetworksConfig + } + + transformedResilienceMode, err := expandComputeEnvironmentConfigResilienceMode(original["resilience_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResilienceMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["resilienceMode"] = transformedResilienceMode + } + + return transformed, nil +} + +func expandComputeEnvironmentConfigNodeCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedZone, err := expandComputeEnvironmentConfigNodeConfigZone(original["zone"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZone); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zone"] = transformedZone + } + + transformedMachineType, err := expandComputeEnvironmentConfigNodeConfigMachineType(original["machine_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMachineType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["machineType"] = transformedMachineType + } + + transformedNetwork, err := expandComputeEnvironmentConfigNodeConfigNetwork(original["network"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["network"] = transformedNetwork + } + + transformedSubnetwork, err := expandComputeEnvironmentConfigNodeConfigSubnetwork(original["subnetwork"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSubnetwork); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["subnetwork"] = transformedSubnetwork + } + + transformedDiskSizeGb, err := expandComputeEnvironmentConfigNodeConfigDiskSizeGb(original["disk_size_gb"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDiskSizeGb); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["diskSizeGb"] = transformedDiskSizeGb + } + + transformedServiceAccount, err := expandComputeEnvironmentConfigNodeConfigServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + transformedIpAllocationPolicy, err := expandComputeEnvironmentConfigNodeConfigIpAllocationPolicy(original["ip_allocation_policy"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIpAllocationPolicy); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["ipAllocationPolicy"] = transformedIpAllocationPolicy + } + + transformedOauthScopes, err := expandComputeEnvironmentConfigNodeConfigOauthScopes(original["oauth_scopes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedOauthScopes); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["oauthScopes"] = transformedOauthScopes + } + + transformedMaxPodsPerNode, err := expandComputeEnvironmentConfigNodeConfigMaxPodsPerNode(original["max_pods_per_node"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxPodsPerNode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxPodsPerNode"] = transformedMaxPodsPerNode + } + + transformedEnableIpMasqAgent, err := expandComputeEnvironmentConfigNodeConfigEnableIpMasqAgent(original["enable_ip_masq_agent"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnableIpMasqAgent); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enableIpMasqAgent"] = transformedEnableIpMasqAgent + } + + transformedTags, err := expandComputeEnvironmentConfigNodeConfigTags(original["tags"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTags); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["tags"] = transformedTags + } + + return transformed, nil +} + +func expandComputeEnvironmentConfigNodeConfigOauthScopes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigTags(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigZone(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigMachineType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigNetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigSubnetwork(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigIpAllocationPolicy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigEnableIpMasqAgent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigMaxPodsPerNode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigDiskSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigRecoveryConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigSoftwareConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigPrivateEnvironmentConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigWebServerNetworkAccessControl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigDatabaseConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigWebServerConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigEncryptionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigMaintenanceWindow(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigWorkloadsConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigDataRetentionConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigMasterAuthorizedNetworksConfig(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedEnabled, err := expandComputeEnvironmentConfigNodeConfigEnabled(original["enabled"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnabled); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["enabled"] = transformedEnabled + } + + transformedCidrBlocks, err := expandComputeEnvironmentConfigNodeConfigCidrBlocks(original["cidr_blocks"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCidrBlocks); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["cidrBlocks"] = transformedCidrBlocks + } + + return transformed, nil +} + +func expandComputeEnvironmentConfigNodeConfigEnabled(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandComputeEnvironmentConfigNodeConfigCidrBlocks(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandComputeEnvironmentConfigResilienceMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_composer_environment.json b/mmv1/third_party/tgc/tests/data/example_google_composer_environment.json new file mode 100644 index 000000000000..0a5e6f71a28f --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_composer_environment.json @@ -0,0 +1,132 @@ +[ + { + "name": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "asset_type": "cloudresourcemanager.googleapis.com/Project", + "iam_policy": { + "bindings": [ + { + "role": "roles/composer.worker", + "members": [ + "" + ] + } + ] + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/global/networks/composer-test-network3", + "asset_type": "compute.googleapis.com/Network", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "Network", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "autoCreateSubnetworks": false, + "name": "composer-test-network3", + "networkFirewallPolicyEnforcementOrder": "AFTER_CLASSIC_FIREWALL" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/locations/placeholder-iZKnWdRs/environments/example-composer-env-tf-c2", + "asset_type": "composer.googleapis.com/Environment", + "resource": { + "version": "v1", + "discovery_document_uri": "https://composer.googleapis.com/$discovery/rest?version=v1", + "discovery_name": "Environment", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "config": { + "softwareConfig": [ + { + "airflow_config_overrides": {}, + "cloud_data_lineage_integration": [], + "env_variables": {}, + "image_version": "composer-2-airflow-2", + "pypi_packages": {}, + "python_version": "", + "scheduler_count": 0, + "web_server_plugins_mode": "" + } + ], + "workloadsConfig": [ + { + "dag_processor": [], + "scheduler": [ + { + "count": 1, + "cpu": 0.5, + "memory_gb": 1.875, + "storage_gb": 1 + } + ], + "triggerer": [], + "web_server": [ + { + "cpu": 0.5, + "memory_gb": 1.875, + "storage_gb": 1 + } + ], + "worker": [ + { + "cpu": 0.5, + "max_count": 3, + "memory_gb": 1.875, + "min_count": 1, + "storage_gb": 1 + } + ] + } + ] + }, + "name": "example-composer-env-tf-c2", + "region": "us-central1" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//compute.googleapis.com/projects/{{.Provider.project}}/regions/us-central1/subnetworks/composer-new-subnetwork", + "asset_type": "compute.googleapis.com/Subnetwork", + "resource": { + "version": "beta", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/beta/rest", + "discovery_name": "Subnetwork", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "ipCidrRange": "10.2.0.0/16", + "logConfig": { + "enable": false + }, + "name": "composer-new-subnetwork", + "region": "projects/{{.Provider.project}}/global/regions/us-central1" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + }, + { + "name": "//iam.googleapis.com/projects/{{.Provider.project}}/serviceAccounts/placeholder-CbHGiox0", + "asset_type": "iam.googleapis.com/ServiceAccount", + "resource": { + "version": "v1", + "discovery_document_uri": "https://iam.googleapis.com/$discovery/rest", + "discovery_name": "ServiceAccount", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "displayName": "Test Service Account for Composer Environment", + "email": "composer-new-account@{{.Provider.project}}.iam.gserviceaccount.com", + "projectId": "{{.Provider.project}}" + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_composer_environment.tf b/mmv1/third_party/tgc/tests/data/example_google_composer_environment.tf new file mode 100644 index 000000000000..f5b85b2f194d --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_composer_environment.tf @@ -0,0 +1,82 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_composer_environment" "test" { + name = "example-composer-env-tf-c2" + region = "us-central1" + config { + + software_config { + image_version = "composer-2-airflow-2" + } + + workloads_config { + scheduler { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + count = 1 + } + web_server { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + } + worker { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + min_count = 1 + max_count = 3 + } + + + } + environment_size = "ENVIRONMENT_SIZE_SMALL" + + node_config { + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + service_account = google_service_account.test.name + } + } +} + +resource "google_compute_network" "test" { + name = "composer-test-network3" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "composer-new-subnetwork" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.id +} + +resource "google_service_account" "test" { + account_id = "composer-new-account" + display_name = "Test Service Account for Composer Environment" +} + +resource "random_string" "suffix" { + length = 4 + upper = false + special = false +} + +resource "google_project_iam_member" "composer-worker" { + project = "${random_string.suffix.result}" + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" +} \ No newline at end of file From 2f8d7fea9e1d06cacd8fb8c8e2b680e2f21eddbb Mon Sep 17 00:00:00 2001 From: Salome Papiashvili Date: Thu, 13 Jun 2024 17:54:22 +0200 Subject: [PATCH 137/356] Reararngement of google_composer_environment resource documentation (#10941) --- .../docs/r/composer_environment.html.markdown | 40 ++++++++++++++----- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown b/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown index 27acf1c3bd4f..330f9e76b800 100644 --- a/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown @@ -270,10 +270,6 @@ The following arguments are supported: (Optional) The configuration used for the Kubernetes Engine cluster. Structure is [documented below](#nested_node_config_c1). -* `recovery_config` - - (Optional, Cloud Composer 2 only) - The configuration settings for recovery. Structure is [documented below](#nested_recovery_config_c1). - * `software_config` - (Optional) The configuration settings for software inside the environment. Structure is [documented below](#nested_software_config_c1). @@ -459,11 +455,6 @@ The following arguments are supported: See [documentation](https://cloud.google.com/composer/docs/how-to/managing/configuring-private-ip) for setting up private environments. The `private_environment_config` block supports: -* `connection_type` - - (Optional, Cloud Composer 2 only) - Mode of internal communication within the Composer environment. Must be one - of `"VPC_PEERING"` or `"PRIVATE_SERVICE_CONNECT"`. - * `enable_private_endpoint` - If true, access to the public endpoint of the GKE cluster is denied. If this field is set to true, the `ip_allocation_policy.use_ip_aliases` field must @@ -652,6 +643,10 @@ The following arguments are supported: (Optional) The configuration used for the Kubernetes Engine cluster. Structure is [documented below](#nested_node_config_c2). +* `recovery_config` - + (Optional, Cloud Composer 2 only) + The configuration settings for recovery. Structure is [documented below](#nested_recovery_config_c2). + * `software_config` - (Optional) The configuration settings for software (Airflow) inside the environment. Structure is @@ -695,7 +690,25 @@ The following arguments are supported: master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs. Structure is - [documented below](#nested_master_authorized_networks_config_c1). + [documented below](#nested_master_authorized_networks_config_c2). + +The `master_authorized_networks_config` block supports: +* `enabled` - + (Required) + Whether or not master authorized networks is enabled. + +* `cidr_blocks` - + `cidr_blocks `define up to 50 external networks that could access Kubernetes master through HTTPS. Structure is [documented below](#nested_cidr_blocks_c2). + +The `cidr_blocks` supports: + +* `display_name` - + (Optional) + `display_name` is a field for users to identify CIDR blocks. + +* `cidr_block` - + (Required) + `cidr_block` must be specified in CIDR notation. * `data_retention_config` - (Optional, Cloud Composer 2.0.23 or newer only) @@ -841,6 +854,11 @@ The following arguments are supported: See [documentation](https://cloud.google.com/composer/docs/how-to/managing/configuring-private-ip) for setting up private environments. The `private_environment_config` block supports: +* `connection_type` - + (Optional, Cloud Composer 2 only) + Mode of internal communication within the Composer environment. Must be one + of `"VPC_PEERING"` or `"PRIVATE_SERVICE_CONNECT"`. + * `enable_private_endpoint` - If true, access to the public endpoint of the GKE cluster is denied. @@ -930,7 +948,7 @@ The following arguments are supported: The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'. -The `recovery_config` block supports: +The `recovery_config` block supports: * `scheduled_snapshots_config` - (Optional) From 6a8e040227d82cc5b001c06ae6bef1d333e21fa6 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Thu, 13 Jun 2024 09:25:37 -0700 Subject: [PATCH 138/356] go rewrite custom update (#10955) --- mmv1/api/resource.go | 48 +++++ mmv1/templates/terraform/resource.go.tmpl | 217 ++++++++++------------ 2 files changed, 148 insertions(+), 117 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index f5abc6b7800d..3cfcdfc49508 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1378,3 +1378,51 @@ func (r Resource) FirstIdentityProp() *Type { return idProps[0] } + +type UpdateGroup struct { + UpdateUrl string + UpdateVerb string + UpdateId string + FingerprintName string +} + +// def properties_without_custom_update(properties) +func (r Resource) propertiesWithCustomUpdate(properties []*Type) []*Type { + return google.Reject(properties, func(p *Type) bool { + return p.UpdateUrl == "" || p.UpdateVerb == "" || p.UpdateVerb == "NOOP" + }) +} + +func (r Resource) PropertiesByCustomUpdate() map[UpdateGroup][]*Type { + customUpdateProps := r.propertiesWithCustomUpdate(r.RootProperties()) + groupedCustomUpdateProps := map[UpdateGroup][]*Type{} + for _, prop := range customUpdateProps { + groupedProperty := UpdateGroup{ UpdateUrl: prop.UpdateUrl, + UpdateVerb: prop.UpdateVerb, + UpdateId: prop.UpdateId, + FingerprintName: prop.FingerprintName} + groupedCustomUpdateProps[groupedProperty] = append(groupedCustomUpdateProps[groupedProperty], prop) + } + return groupedCustomUpdateProps +} + +func (r Resource) FieldSpecificUpdateMethods() bool { + return (len(r.PropertiesByCustomUpdate()) > 0) +} + +func (r Resource) CustomUpdatePropertiesByKey(updateUrl string, updateId string, fingerprintName string, updateVerb string) []*Type { + groupedProperties := r.PropertiesByCustomUpdate() + groupedProperty := UpdateGroup{ UpdateUrl: updateUrl, + UpdateVerb: updateVerb, + UpdateId: updateId, + FingerprintName: fingerprintName} + return groupedProperties[groupedProperty] +} + +func (r Resource) PropertyNamesToStrings (properties []*Type) []string{ + var propertyNames []string + for _, prop := range properties { + propertyNames = append(propertyNames, google.Underscore(prop.Name)) + } + return propertyNames +} diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index f8a341a1c3fd..2d014e226cb5 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -674,13 +674,13 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{if $.Updatable -}} func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{}) error { -{{- if and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "update") -}} +{{- if and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "update") -}} var project string -{{- end}} +{{- end}} config := meta.(*transport_tpg.Config) -{{if $.CustomCode.CustomUpdate -}} +{{ if $.CustomCode.CustomUpdate -}} //TODO custom update -{{ else -}} +{{ else -}} userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err @@ -688,58 +688,58 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ billingProject := "" -{{if $.HasProject -}} +{{ if $.HasProject -}} project, err := tpgresource.GetProject(d, config) if err != nil { return fmt.Errorf("Error fetching project for {{ $.Name -}}: %s", err) } -{{if $.LegacyLongFormProject -}} +{{ if $.LegacyLongFormProject -}} billingProject = strings.TrimPrefix(project, "projects/") -{{ else -}} +{{ else -}} billingProject = project -{{- end}} -{{- end}} +{{- end}} +{{- end}} -{{if not $.Immutable -}} +{{ if not $.Immutable -}} obj := make(map[string]interface{}) -{{- range $prop := $.UpdateBodyProperties }} +{{- range $prop := $.UpdateBodyProperties }} {{/* flattened $s won't have something stored in state so instead nil is passed to the next expander. */}} {{- $prop.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{end}}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}({{ if $prop.FlattenObject }}nil{{else}}d.Get("{{underscore $prop.Name}}"){{ end }}, d, config) if err != nil { return err -{{- if $prop.SendEmptyValue -}} +{{- if $prop.SendEmptyValue -}} } else if v, ok := d.GetOkExists("{{ underscore $prop.Name -}}"); ok || !reflect.DeepEqual(v, {{ $prop.ApiName -}}Prop) { -{{- else if $prop.FlattenObject -}} +{{- else if $prop.FlattenObject -}} } else if !tpgresource.IsEmptyValue(reflect.ValueOf({{ $prop.ApiName -}}Prop)) { -{{- else -}} +{{- else -}} } else if v, ok := d.GetOkExists("{{ underscore $prop.Name -}}"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, {{ $prop.ApiName -}}Prop)) { -{{- end}} +{{- end}} obj["{{ $prop.ApiName -}}"] = {{ $prop.ApiName -}}Prop } -{{- end}} +{{- end}} {{/* We need to decide what encoder to use here - if there's an update encoder, use that! -*/}} -{{if $.CustomCode.UpdateEncoder -}} +{{ if $.CustomCode.UpdateEncoder -}} obj, err = resource{{ $.ResourceName -}}UpdateEncoder(d, meta, obj) if err != nil { return err } -{{ else if $.CustomCode.Encoder -}} +{{ else if $.CustomCode.Encoder -}} obj, err = resource{{ $.ResourceName -}}Encoder(d, meta, obj) if err != nil { return err } -{{- end}} +{{- end}} -{{if $.Mutex -}} +{{ if $.Mutex -}} lockName, err := tpgresource.ReplaceVars(d, config, "{{ $.Mutex -}}") if err != nil { return err } transport_tpg.MutexStore.Lock(lockName) defer transport_tpg.MutexStore.Unlock(lockName) -{{- end}} +{{- end}} url, err := tpgresource.ReplaceVars{{if $.LegacyLongFormProject -}}ForId{{ end -}}(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{ $.UpdateUri }}") if err != nil { @@ -748,36 +748,36 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating {{ $.Name }} %q: %#v", d.Id(), obj) headers := make(http.Header) -{{- if $.UpdateMask -}} +{{- if $.UpdateMask -}} {{template "UpdateMask" $ -}} -{{end}} -{{- if $.CustomCode.PreUpdate -}} +{{ end}} +{{- if $.CustomCode.PreUpdate -}} {{- $.CustomTemplate $.CustomCode.PreUpdate true -}} -{{end}} -{{if $.NestedQuery -}} -{{if $.NestedQuery.ModifyByPatch -}} +{{ end}} +{{ if $.NestedQuery -}} +{{ if $.NestedQuery.ModifyByPatch -}} {{/*# Keep this after mutex - patch request data relies on current resource state */}} obj, err = resource{{ $.ResourceName -}}PatchUpdateEncoder(d, meta, obj) if err != nil { return err } -{{- end}} -{{- end}} -{{if $.SupportsIndirectUserProjectOverride -}} +{{- end}} +{{- end}} +{{ if $.SupportsIndirectUserProjectOverride -}} if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { billingProject = parts[1] } -{{- end}} +{{- end}} // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { billingProject = bp } -{{if $.UpdateMask -}} +{{ if $.UpdateMask -}} // if updateMask is empty we are not updating anything so skip the post if len(updateMask) > 0 { -{{- end}} +{{- end}} res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "{{ $.UpdateVerb -}}", @@ -787,12 +787,12 @@ if len(updateMask) > 0 { Body: obj, Timeout: d.Timeout(schema.TimeoutUpdate), Headers: headers, -{{ if $.ErrorRetryPredicates -}} +{{ if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, -{{- end}} -{{- if $.ErrorAbortPredicates -}} +{{- end}} +{{- if $.ErrorAbortPredicates -}} ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, -{{- end}} +{{- end}} }) if err != nil { @@ -801,8 +801,8 @@ if len(updateMask) > 0 { log.Printf("[DEBUG] Finished updating {{ $.Name }} %q: %#v", d.Id(), res) } -{{if $.GetAsync.Allow "update" -}} -{{if $.GetAsync.IsA "OpAsync" -}} +{{ if $.GetAsync.Allow "update" -}} +{{ if $.GetAsync.IsA "OpAsync" -}} err = {{ $.ClientNamePascal -}}OperationWaitTime( config, res, {{if or $.HasProject $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Updating {{ $.Name -}}", userAgent, d.Timeout(schema.TimeoutUpdate)) @@ -810,49 +810,36 @@ if len(updateMask) > 0 { if err != nil { return err } -{{ else if $.GetAsync.IsA "PollAsync" -}} +{{ else if $.GetAsync.IsA "PollAsync" -}} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName -}}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncExistence -}}, "Updating {{ $.Name -}}", d.Timeout(schema.TimeoutUpdate), {{ $.GetAsync.TargetOccurrences -}}) if err != nil { -{{if $.GetAsync.SuppressError -}} +{{ if $.GetAsync.SuppressError -}} log.Printf("[ERROR] Unable to confirm eventually consistent {{ $.Name -}} %q finished updating: %q", d.Id(), err) -{{ else -}} +{{ else -}} return err -{{- end}} +{{- end}} } -{{- end}} -{{- end}} -{{if $.UpdateMask -}} +{{- end}} +{{- end}} +{{ if $.UpdateMask -}} } -{{- end}} -{{ end -}} -{{if eq 0 1 -}} -{{/* TODO THIS BLOCK NEEDS FUNCTIONS TO WORK -- LINE 982 - -* field_specific_update_methods -* properties_by_custom_update -* group_by / key[:] - -*/}} -//TODO field_specific_update_methods($.root_properties) +{{- end}} +{{- end}}{{/*if not immutable*/}} +{{ if $.FieldSpecificUpdateMethods }} d.Partial(true) - -{{/* properties_by_custom_update($.root_properties) - .sort_by {|k, _| k.nil? ? "" : k[:update_id].to_s} - .each do |key, props| --*/}} - if {{/* props.map { |prop| "d.HasChange(\"#{underscore $prop.Name }\")" }.join ' || ' -*/}} { +{{- range $index, $props := $.PropertiesByCustomUpdate }} +if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\""}}") { obj := make(map[string]interface{}) - -{{/*- TODO 878 if key[:fingerprint_name] -*/}} +{{ if $index.FingerprintName }} getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}" -}}") if err != nil { return err } -{{if $.SupportsIndirectUserProjectOverride -}} +{{ if $.SupportsIndirectUserProjectOverride -}} if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { billingProject = parts[1] } -{{- end}} +{{- end}} // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { @@ -865,23 +852,22 @@ if len(updateMask) > 0 { Project: billingProject, RawURL: getUrl, UserAgent: userAgent, -{{if $.ErrorRetryPredicates -}} +{{ if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, -{{- end}} -{{- if $.ErrorAbortPredicates -}} - ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, -{{- end}} +{{- end}} +{{ if $.ErrorAbortPredicates -}} + ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, +{{- end}} }) if err != nil { return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("{{ $.ResourceName -}} %q", d.Id())) } - obj["{{/* key[:fingerprint_name] */}}"] = getRes["{{/* key[:fingerprint_name] */}}"] + obj["{{ $index.FingerprintName }}"] = getRes["{{ $index.FingerprintName }}"] -{{/* end -*/}} -{{/*- TODO range $prop := $.CustomUpdatePropertiesByKey $.AllUserProperties key */}} -{{ range $prop := $.AllProperties }} - {{ $prop.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}({{ if $prop.FlattenObject }}nil{{else}}d.Get("{{underscore $prop.Name}}"){{ end }}, d, config) +{{ end }}{{/*if FingerprintName*/}} +{{ range $propsByKey := $.CustomUpdatePropertiesByKey $index.UpdateUrl $index.UpdateId $index.FingerprintName $index.UpdateVerb }} + {{ $propsByKey.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $propsByKey.Name "upper" -}}({{ if $propsByKey.FlattenObject }}nil{{else}}d.Get("{{underscore $propsByKey.Name}}"){{ end }}, d, config) if err != nil { return err {{/* There is some nuance in when we choose to send a value to an update function. @@ -901,48 +887,47 @@ if len(updateMask) > 0 { `NullFields` is a special case of `send_empty_value` where the empty value in question is go's literal nil. -*/}} -{{if $prop.SendEmptyValue -}} - } else if v, ok := d.GetOkExists("{{ underscore $prop.Name -}}"); ok || !reflect.DeepEqual(v, {{ $prop.ApiName -}}Prop) { -{{ else if $prop.FlattenObject -}} - } else if !tpgresource.IsEmptyValue(reflect.ValueOf({{ $prop.ApiName -}}Prop)) { -{{ else -}} - } else if v, ok := d.GetOkExists("{{ underscore $prop.Name -}}"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, {{ $prop.ApiName -}}Prop)) { -{{- end}} - obj["{{ $prop.ApiName -}}"] = {{ $prop.ApiName -}}Prop +{{ if $propsByKey.SendEmptyValue -}} + } else if v, ok := d.GetOkExists("{{ underscore $propsByKey.Name -}}"); ok || !reflect.DeepEqual(v, {{ $propsByKey.ApiName -}}Prop) { +{{ else if $propsByKey.FlattenObject -}} + } else if !tpgresource.IsEmptyValue(reflect.ValueOf({{ $propsByKey.ApiName -}}Prop)) { +{{ else -}} + } else if v, ok := d.GetOkExists("{{ underscore $propsByKey.Name -}}"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, {{ $propsByKey.ApiName -}}Prop)) { +{{- end}} + obj["{{ $propsByKey.ApiName -}}"] = {{ $propsByKey.ApiName -}}Prop } -{{/* end # props.each -*/}} - +{{ end -}}{{/*range propsByKey*/}} {{/* We need to decide what encoder to use here - if there's an update encoder, use that! -*/}} -{{if $.CustomCode.update_encoder -}} +{{ if $.CustomCode.UpdateEncoder -}} obj, err = resource{{ $.ResourceName -}}UpdateEncoder(d, meta, obj) if err != nil { return err } -{{- end}} +{{- end}} -{{if $.Mutex -}} +{{ if $.Mutex -}} lockName, err := tpgresource.ReplaceVars(d, config, "{{ $.Mutex -}}") if err != nil { return err } transport_tpg.MutexStore.Lock(lockName) defer transport_tpg.MutexStore.Unlock(lockName) -{{- end}} - - url, err := tpgresource.ReplaceVars{{if $.LegacyLongFormProject -}}ForId{{ end -}}(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{ $.UpdateUrl }}")-}}") +{{- end}} + url, err := tpgresource.ReplaceVars{{if $.LegacyLongFormProject -}}ForId{{ end -}}(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{ $index.UpdateUrl }}") if err != nil { return err } -{{ if $.CustomCode.PreUpdate -}} + headers := make(http.Header) +{{ if $.CustomCode.PreUpdate -}} //TODO Preupdate -{{ end}} -{{if $.SupportsIndirectUserProjectOverride -}} +{{ end}} +{{ if $.SupportsIndirectUserProjectOverride -}} if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { billingProject = parts[1] } -{{- end}} +{{- end}} // err == nil indicates that the billing_project value was found if bp, err := tpgresource.GetBillingProject(d, config); err == nil { @@ -951,18 +936,18 @@ if len(updateMask) > 0 { res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "{{/* key[:update_verb] -*/}}", + Method: "{{ $index.UpdateVerb }}", Project: billingProject, RawURL: url, UserAgent: userAgent, Body: obj, Timeout: d.Timeout(schema.TimeoutUpdate), -{{if $.ErrorRetryPredicates -}} +{{ if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, -{{- end}} -{{- if $.ErrorAbortPredicates -}} +{{- end}} +{{- if $.ErrorAbortPredicates -}} ErrorAbortPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorAbortPredicates "," -}}{{"}"}}, -{{- end}} +{{- end}} Headers: headers, }) if err != nil { @@ -971,8 +956,8 @@ if len(updateMask) > 0 { log.Printf("[DEBUG] Finished updating {{ $.Name -}} %q: %#v", d.Id(), res) } -{{if $.GetAsync.Allow "update" -}} -{{if $.GetAsync.IsA "OpAsync" -}} +{{ if $.GetAsync.Allow "update" -}} +{{ if $.GetAsync.IsA "OpAsync" -}} err = {{ $.ClientNamePascal -}}OperationWaitTime( config, res, {{if or $.HasProject $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Updating {{ $.Name -}}", userAgent, d.Timeout(schema.TimeoutUpdate)) @@ -980,29 +965,27 @@ if len(updateMask) > 0 { if err != nil { return err } -{{ else if $.GetAsync.IsA "PollAsync" -}} +{{ else if $.GetAsync.IsA "PollAsync" -}} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName -}}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncExistence -}}, "Updating {{ $.Name -}}", d.Timeout(schema.TimeoutUpdate), {{ $.GetAsync.TargetOccurrences -}}) if err != nil { -{{if $.GetAsync.SuppressError -}} +{{ if $.GetAsync.SuppressError -}} log.Printf("[ERROR] Unable to confirm eventually consistent {{ $.Name -}} %q finished updating: %q", d.Id(), err) -{{ else -}} +{{ else -}} return err -{{- end}} +{{- end}} } -{{- end}} -{{- end}} +{{- end}} +{{- end}} } -{{/* TODO THIS BLOCK NEEDS FUNCTIONS TO WORK -- LINE 824 */}} -{{- end}} - +{{- end }}{{/*range PropertiesByCustomUpdate*/}} d.Partial(false) -{{- end}} +{{- end }}{{/*if FieldSpecificUpdateMethods*/}} -{{ if $.CustomCode.PostUpdate -}} //TODO POST UPDATE {{end}} +{{ if $.CustomCode.PostUpdate -}} //TODO POST UPDATE {{end}} return resource{{ $.ResourceName -}}Read(d, meta) -{{ end -}} +{{ end -}}{{/*if CustomUpdate*/}} } -{{ else if $.RootLabels -}} +{{ else if $.RootLabels -}}{{/*if not immutable*/}} func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{}) error { // Only the root field "labels" and "terraform_labels" are mutable return resource{{ $.ResourceName -}}Read(d, meta) From 11fe43ce7ee36b576190a17f60129d1911b6b187 Mon Sep 17 00:00:00 2001 From: dvfons <167889585+dvfons@users.noreply.github.com> Date: Thu, 13 Jun 2024 17:09:24 +0000 Subject: [PATCH 139/356] add support for `google_app_engine_application` to TCG (#10944) --- mmv1/provider/terraform_tgc.rb | 2 + mmv1/templates/tgc/resource_converters.go.erb | 1 + mmv1/third_party/tgc/appengine_application.go | 69 +++++++++++++++++++ .../data/example_app_engine_application.json | 18 +++++ .../data/example_app_engine_application.tf | 17 +++++ 5 files changed, 107 insertions(+) create mode 100644 mmv1/third_party/tgc/appengine_application.go create mode 100644 mmv1/third_party/tgc/tests/data/example_app_engine_application.json create mode 100644 mmv1/third_party/tgc/tests/data/example_app_engine_application.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 4e1c972b6f48..02f693eaf220 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -325,6 +325,8 @@ def copy_common_files(output_folder, generate_code, _generate_docs) 'third_party/tgc/commitment.go'], ['converters/google/resources/firebase_project.go', 'third_party/tgc/firebase_project.go'], + ['converters/google/resources/appengine_application.go', + 'third_party/tgc/appengine_application.go'], ['converters/google/resources/apikeys_key.go', 'third_party/tgc/apikeys_key.go'] ]) diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index d11367627c50..bebbed41d9e5 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -36,6 +36,7 @@ import ( func ResourceConverters() map[string][]cai.ResourceConverter { return map[string][]cai.ResourceConverter{ "google_artifact_registry_repository": {artifactregistry.ResourceConverterArtifactRegistryRepository()}, + "google_app_engine_application": {resourceConverterAppEngineApplication()}, "google_alloydb_cluster": {alloydb.ResourceConverterAlloydbCluster()}, "google_alloydb_instance": {alloydb.ResourceConverterAlloydbInstance()}, "google_apikeys_key": {resourceConverterApikeysKey()}, diff --git a/mmv1/third_party/tgc/appengine_application.go b/mmv1/third_party/tgc/appengine_application.go new file mode 100644 index 000000000000..c64f8db8cc98 --- /dev/null +++ b/mmv1/third_party/tgc/appengine_application.go @@ -0,0 +1,69 @@ +package google + +import ( + "reflect" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const AppEngineApplicationAssetType string = "appengine.googleapis.com/Application" + +func resourceConverterAppEngineApplication() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: AppEngineApplicationAssetType, + Convert: GetAppEngineApplicationCaiObject, + } +} + +func GetAppEngineApplicationCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//appengine.googleapis.com/v1/{{name}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetAppEngineApplicationApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: AppEngineApplicationAssetType, + Resource: &cai.AssetResource{ + Version: "v1", + DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/appengine/v1beta/rest", + DiscoveryName: "Application", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetAppEngineApplicationApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + idProp, err := expandAppEngineApplicationId(d.Get("id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + + locationIdProp, err := expandAppEngineApplicationLocationId(d.Get("location_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("locationId"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationIdProp)) && (ok || !reflect.DeepEqual(v, locationIdProp)) { + obj["location_id"] = locationIdProp + } + + + return obj, nil +} + +func expandAppEngineApplicationId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + + +func expandAppEngineApplicationLocationId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_app_engine_application.json b/mmv1/third_party/tgc/tests/data/example_app_engine_application.json new file mode 100644 index 000000000000..18558b6fb187 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_app_engine_application.json @@ -0,0 +1,18 @@ +[ + { + "name": "//appengine.googleapis.com/v1/placeholder-GWJVsru5", + "asset_type": "appengine.googleapis.com/Application", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/appengine/v1beta/rest", + "discovery_name": "Application", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "location_id": "us-central" + } + }, + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "ancestors": ["organizations/{{.OrgID}}"] + + } +] diff --git a/mmv1/third_party/tgc/tests/data/example_app_engine_application.tf b/mmv1/third_party/tgc/tests/data/example_app_engine_application.tf new file mode 100644 index 000000000000..d219cf7c638a --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_app_engine_application.tf @@ -0,0 +1,17 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_app_engine_application" "app" { + project = "{{.Provider.project}}" + location_id = "us-central" +} From f105e6a3363e89404c608abd5a011ad74598609c Mon Sep 17 00:00:00 2001 From: annakuo <45879591+annakuo@users.noreply.github.com> Date: Thu, 13 Jun 2024 10:10:17 -0700 Subject: [PATCH 140/356] Fix ArtifactRegistry Yum test (#10956) --- mmv1/products/artifactregistry/Repository.yaml | 4 ++-- .../examples/artifact_registry_repository_remote_yum.tf.erb | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mmv1/products/artifactregistry/Repository.yaml b/mmv1/products/artifactregistry/Repository.yaml index 5823b5f4a1c2..1ffd201923a8 100644 --- a/mmv1/products/artifactregistry/Repository.yaml +++ b/mmv1/products/artifactregistry/Repository.yaml @@ -92,7 +92,7 @@ examples: name: 'artifact_registry_repository_remote_yum' primary_resource_id: 'my-repo' vars: - repository_id: 'centos-8' + repository_id: 'rocky-9' description: 'example remote yum repository' - !ruby/object:Provider::Terraform::Examples name: 'artifact_registry_repository_cleanup' @@ -622,7 +622,7 @@ properties: name: 'repositoryPath' required: true description: |- - Specific repository from the base, e.g. `"centos/8-stream/BaseOS/x86_64/os"` + Specific repository from the base, e.g. `"pub/rocky/9/BaseOS/x86_64/os"` immutable: true - !ruby/object:Api::Type::NestedObject name: 'upstreamCredentials' diff --git a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb index fd78f58d53f6..1e631e677059 100644 --- a/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb +++ b/mmv1/templates/terraform/examples/artifact_registry_repository_remote_yum.tf.erb @@ -5,11 +5,11 @@ resource "google_artifact_registry_repository" "<%= ctx[:primary_resource_id] %> format = "YUM" mode = "REMOTE_REPOSITORY" remote_repository_config { - description = "Centos 8 remote repository" + description = "Rocky 9 remote repository" yum_repository { public_repository { - repository_base = "CENTOS" - repository_path = "centos/8-stream/BaseOS/x86_64/os" + repository_base = "ROCKY" + repository_path = "pub/rocky/9/BaseOS/x86_64/os" } } } From 2410fcdfd6e435d7d2e5ebecfba418f1a8f71d46 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Thu, 13 Jun 2024 12:20:55 -0500 Subject: [PATCH 141/356] refresh templates (#10957) --- mmv1/api/resource/examples.go | 1 + .../compute/PublicAdvertisedPrefix.yaml | 2 +- .../compute/PublicDelegatedPrefix.yaml | 2 +- mmv1/template-converter.go | 27 +- .../go/monitoring_uptime_check_config.go.tmpl | 9 + ...vileged_access_manager_entitlement.go.tmpl | 23 ++ .../go/spanner_instance_config.go.tmpl | 55 +++ .../go/kms_autokey_config.go.tmpl | 22 ++ .../go/only_remove_from_state.go.tmpl | 3 + .../go/compute_router_range.go.tmpl | 42 +++ ..._oauth2_config_client_secret_value.go.tmpl | 29 ++ .../go/app_engine_domain_mapping.go.tmpl | 18 + .../go/spanner_instance_config.go.tmpl | 28 ++ .../go/spanner_instance_config.go.tmpl | 19 + .../examples/go/alloydb_backup_basic.tf.tmpl | 4 +- .../examples/go/alloydb_backup_full.tf.tmpl | 4 +- .../examples/go/alloydb_cluster_basic.tf.tmpl | 4 +- .../examples/go/alloydb_cluster_full.tf.tmpl | 8 +- .../go/alloydb_cluster_restore.tf.tmpl | 4 +- .../go/alloydb_instance_basic.tf.tmpl | 4 +- .../go/bigquery_connection_kms.tf.tmpl | 38 ++ ...ery_dataset_external_reference_aws.tf.tmpl | 5 +- ...ataset_external_reference_aws_test.tf.tmpl | 11 + .../examples/go/bigquery_job_copy.tf.tmpl | 8 +- .../bigquery_job_copy_table_reference.tf.tmpl | 10 +- .../cloudbuildv2_repository_ghe_doc.tf.tmpl | 2 +- ...get_type_gcb_repo_skaffold_modules.tf.tmpl | 17 + .../examples/go/cloudfunctions2_basic.tf.tmpl | 1 - .../examples/go/cloudrunv2_job_basic.tf.tmpl | 2 +- .../go/cloudrunv2_job_directvpc.tf.tmpl | 3 +- .../go/cloudrunv2_job_emptydir.tf.tmpl | 2 +- .../examples/go/cloudrunv2_job_limits.tf.tmpl | 2 +- .../go/cloudrunv2_job_run_job.tf.tmpl | 12 + .../examples/go/cloudrunv2_job_secret.tf.tmpl | 2 +- .../examples/go/cloudrunv2_job_sql.tf.tmpl | 2 +- .../go/cloudrunv2_job_vpcaccess.tf.tmpl | 2 +- .../go/cloudrunv2_service_directvpc.tf.tmpl | 1 - ...er_user_workloads_config_map_basic.tf.tmpl | 20 + .../go/compute_interconnect_basic.tf.tmpl | 10 + .../compute_interconnect_basic_test.tf.tmpl | 15 + ...ute_project_cloud_armor_tier_basic.tf.tmpl | 3 + ...oject_cloud_armor_tier_project_set.tf.tmpl | 17 + .../go/dataplex_aspect_type_basic.tf.tmpl | 32 ++ .../go/dataplex_aspect_type_full.tf.tmpl | 139 +++++++ .../go/dataplex_entry_group_basic.tf.tmpl | 5 + .../go/dataplex_entry_group_full.tf.tmpl | 9 + .../go/dataplex_entry_type_basic.tf.tmpl | 5 + .../go/dataplex_entry_type_full.tf.tmpl | 50 +++ ...ataproc_metastore_federation_basic.tf.tmpl | 2 - ...proc_metastore_federation_bigquery.tf.tmpl | 6 +- ...ream_connection_profile_sql_server.tf.tmpl | 6 +- ...tastore_document_processing_config.tf.tmpl | 20 + ...ore_document_processing_config_ocr.tf.tmpl | 16 + .../go/dlp_discovery_config_cloud_sql.tf.tmpl | 78 ++++ ...e_max_infotype_per_finding_default.tf.tmpl | 22 ++ ..._ai_warehouse_document_schema_text.tf.tmpl | 2 +- ...p_check_play_integrity_config_full.tf.tmpl | 11 + ...heck_play_integrity_config_minimal.tf.tmpl | 11 + ..._recaptcha_enterprise_config_basic.tf.tmpl | 11 + .../gkebackup_backupplan_permissive.tf.tmpl | 44 +++ .../gkebackup_restoreplan_gitops_mode.tf.tmpl | 43 +++ ...kebackup_restoreplan_restore_order.tf.tmpl | 65 ++++ .../gkebackup_restoreplan_volume_res.tf.tmpl | 47 +++ ...ributes_oauth2_config_client_basic.tf.tmpl | 37 ++ ...tributes_oauth2_config_client_full.tf.tmpl | 40 ++ ...ntegration_connectors_managed_zone.tf.tmpl | 57 +++ .../integrations_auth_config_advance.tf.tmpl | 1 - ...ntegrations_auth_config_auth_token.tf.tmpl | 1 - .../go/integrations_auth_config_basic.tf.tmpl | 1 - ...uth_config_client_certificate_only.tf.tmpl | 1 - .../go/integrations_auth_config_jwt.tf.tmpl | 1 - ...h_config_oauth2_authorization_code.tf.tmpl | 1 - ...h_config_oauth2_client_credentials.tf.tmpl | 1 - ...ntegrations_auth_config_oidc_token.tf.tmpl | 1 - ...ations_auth_config_service_account.tf.tmpl | 1 - ..._auth_config_username_and_password.tf.tmpl | 1 - .../go/integrations_client_basic.tf.tmpl | 1 - ...egrations_client_deprecated_fields.tf.tmpl | 5 + .../go/integrations_client_full.tf.tmpl | 35 ++ .../interconnect_attachment_dedicated.tf.tmpl | 36 ++ .../go/kms_autokey_config_all.tf.tmpl | 68 ++++ .../examples/go/kms_key_handle_basic.tf.tmpl | 93 +++++ ...oker_instance_enterprise_full_test.tf.tmpl | 2 + .../go/managedkafka_cluster_basic.tf.tmpl | 27 ++ .../go/managedkafka_cluster_cmek.tf.tmpl | 54 +++ .../go/managedkafka_topic_basic.tf.tmpl | 34 ++ .../go/netapp_active_directory_full.tf.tmpl | 1 + .../examples/go/netapp_backup.tf.tmpl | 36 ++ ...ty_regional_endpoint_global_access.tf.tmpl | 21 ++ ..._regional_endpoint_regional_access.tf.tmpl | 23 ++ ...irewall_endpoint_association_basic.tf.tmpl | 2 - ...k_security_firewall_endpoint_basic.tf.tmpl | 1 - ...curity_policy_tls_inspection_basic.tf.tmpl | 4 +- ...rk_security_security_profile_basic.tf.tmpl | 1 - ...urity_security_profile_group_basic.tf.tmpl | 2 - ...ecurity_security_profile_overrides.tf.tmpl | 1 - ...curity_tls_inspection_policy_basic.tf.tmpl | 39 +- ...urity_tls_inspection_policy_custom.tf.tmpl | 130 +++++++ ..._services_lb_route_extension_basic.tf.tmpl | 352 ++++++++++++++++++ ...ervices_lb_traffic_extension_basic.tf.tmpl | 337 +++++++++++++++++ ...vices_service_lb_policies_advanced.tf.tmpl | 30 ++ ...services_service_lb_policies_basic.tf.tmpl | 6 + ...a_certificate_authority_custom_ski.tf.tmpl | 53 +++ .../privateca_certificate_custom_ski.tf.tmpl | 93 +++++ .../go/privateca_template_basic.tf.tmpl | 56 +++ ...d_access_manager_entitlement_basic.tf.tmpl | 39 ++ .../public_advertised_prefixes_basic.tf.tmpl | 2 +- .../public_delegated_prefixes_basic.tf.tmpl | 2 +- ...sub_subscription_push_cloudstorage.tf.tmpl | 2 +- ...ubscription_push_cloudstorage_avro.tf.tmpl | 2 +- .../examples/go/redis_cluster_ha.tf.tmpl | 3 + .../go/redis_cluster_ha_single_zone.tf.tmpl | 42 +++ ...ion_network_endpoint_group_portmap.tf.tmpl | 22 ++ .../region_network_endpoint_portmap.tf.tmpl | 58 +++ ...rule_with_preconfigured_waf_config.tf.tmpl | 53 +++ .../go/region_target_https_proxy_mtls.tf.tmpl | 101 +++++ ...alth_analytics_custom_module_basic.tf.tmpl | 24 ++ ...ealth_analytics_custom_module_full.tf.tmpl | 38 ++ ...ent_threat_detection_custom_module.tf.tmpl | 19 + ...alth_analytics_custom_module_basic.tf.tmpl | 18 + ...ealth_analytics_custom_module_full.tf.tmpl | 32 ++ ...alth_analytics_custom_module_basic.tf.tmpl | 19 + ...ealth_analytics_custom_module_full.tf.tmpl | 33 ++ .../go/security_policy_rule_basic.tf.tmpl | 19 + .../security_policy_rule_default_rule.tf.tmpl | 37 ++ ...ecurity_policy_rule_multiple_rules.tf.tmpl | 33 ++ .../go/spanner_instance_config_basic.tf.tmpl | 4 +- .../go/uptime_check_config_https.tf.tmpl | 3 + .../go/workstation_config_boost.tf.tmpl | 7 +- .../public_advertised_prefixes_basic.tf.erb | 2 +- .../public_delegated_prefixes_basic.tf.erb | 2 +- .../go/iam_workforce_pool_provider.go.tmpl | 34 +- .../go/iam_workforce_pool_provider.go.tmpl | 36 +- .../pre_create/go/integrations_client.go.tmpl | 5 + .../compute_region_network_endpoint.go.tmpl | 22 +- .../pre_update/go/alloydb_cluster.go.tmpl | 5 - .../go/network_services_gateway.tmpl | 4 + ...vileged_access_manager_entitlement.go.tmpl | 13 + .../go/spanner_instance_config_update.go.tmpl | 8 + 139 files changed, 3428 insertions(+), 119 deletions(-) create mode 100644 mmv1/templates/terraform/constants/go/privileged_access_manager_entitlement.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl create mode 100644 mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl create mode 100644 mmv1/templates/terraform/custom_delete/go/only_remove_from_state.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl create mode 100644 mmv1/templates/terraform/decoders/go/app_engine_domain_mapping.go.tmpl create mode 100644 mmv1/templates/terraform/decoders/go/spanner_instance_config.go.tmpl create mode 100644 mmv1/templates/terraform/encoders/go/spanner_instance_config.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_connection_kms.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws_test.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/clouddeploy_custom_target_type_gcb_repo_skaffold_modules.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloudrunv2_job_run_job.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/composer_user_workloads_config_map_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_interconnect_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_interconnect_basic_test.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_project_set.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataplex_aspect_type_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataplex_aspect_type_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataplex_entry_group_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataplex_entry_group_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataplex_entry_type_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dataplex_entry_type_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/discoveryengine_datastore_document_processing_config.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/discoveryengine_datastore_document_processing_config_ocr.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dlp_discovery_config_cloud_sql.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dlp_inspect_template_max_infotype_per_finding_default.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/gkebackup_backupplan_permissive.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/gkebackup_restoreplan_gitops_mode.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/gkebackup_restoreplan_restore_order.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/gkebackup_restoreplan_volume_res.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/integration_connectors_managed_zone.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/integrations_client_deprecated_fields.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/integrations_client_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/interconnect_attachment_dedicated.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/kms_autokey_config_all.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/kms_key_handle_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/managedkafka_cluster_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/managedkafka_cluster_cmek.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/managedkafka_topic_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/netapp_backup.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_connectivity_regional_endpoint_global_access.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_connectivity_regional_endpoint_regional_access.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_security_tls_inspection_policy_custom.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/privileged_access_manager_entitlement_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/region_network_endpoint_group_portmap.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/region_network_endpoint_portmap.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/region_security_policy_rule_with_preconfigured_waf_config.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/region_target_https_proxy_mtls.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_organization_event_threat_detection_custom_module.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/security_policy_rule_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/security_policy_rule_default_rule.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/security_policy_rule_multiple_rules.tf.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/integrations_client.go.tmpl create mode 100644 mmv1/templates/terraform/pre_update/go/network_services_gateway.tmpl create mode 100644 mmv1/templates/terraform/pre_update/go/privileged_access_manager_entitlement.go.tmpl create mode 100644 mmv1/templates/terraform/update_encoder/go/spanner_instance_config_update.go.tmpl diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index 705a17fe484d..f41a02d98357 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -233,6 +233,7 @@ func (e *Examples) SetHCLText() { func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { templates := []string{ templatePath, + "templates/terraform/expand_resource_ref.tmpl", } templateFileName := filepath.Base(templatePath) diff --git a/mmv1/products/compute/PublicAdvertisedPrefix.yaml b/mmv1/products/compute/PublicAdvertisedPrefix.yaml index f7a0ad456ddd..183e43cc64cb 100644 --- a/mmv1/products/compute/PublicAdvertisedPrefix.yaml +++ b/mmv1/products/compute/PublicAdvertisedPrefix.yaml @@ -50,7 +50,7 @@ examples: vars: prefixes_name: 'my-prefix' test_env_vars: - description: :PAP_DESCRIPTION + desc: :PAP_DESCRIPTION properties: - !ruby/object:Api::Type::String name: 'description' diff --git a/mmv1/products/compute/PublicDelegatedPrefix.yaml b/mmv1/products/compute/PublicDelegatedPrefix.yaml index acc9de931b7d..7c29df155474 100644 --- a/mmv1/products/compute/PublicDelegatedPrefix.yaml +++ b/mmv1/products/compute/PublicDelegatedPrefix.yaml @@ -50,7 +50,7 @@ examples: vars: prefixes_name: 'my-prefix' test_env_vars: - description: :PAP_DESCRIPTION + desc: :PAP_DESCRIPTION properties: - !ruby/object:Api::Type::String name: 'region' diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index 02249d8fd782..d336f47629b2 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -30,7 +30,8 @@ func find(root, ext string) []string { } func convertTemplates() { - folders := []string{"examples", "constants", "custom_check_destroy", "custom_create", "custom_delete", "custom_import", "custom_update", "decoders", "encoders", "extra_schema_entry", "post_create", "post_create_failure", "post_delete", "post_import", "post_update", "pre_create", "pre_delete", "pre_read", "pre_update", "state_migrations", "update_encoder", "custom_expand", "custom_flatten", "iam", "iam/example_config_body"} + // exculding iam + folders := []string{"examples", "constants", "custom_check_destroy", "custom_create", "custom_delete", "custom_import", "custom_update", "decoders", "encoders", "extra_schema_entry", "post_create", "post_create_failure", "post_delete", "post_import", "post_update", "pre_create", "pre_delete", "pre_read", "pre_update", "state_migrations", "update_encoder", "custom_expand", "custom_flatten", "iam/example_config_body"} counts := 0 for _, folder := range folders { counts += convertTemplate(folder) @@ -50,7 +51,11 @@ func convertTemplate(folder string) int { log.Printf("%d template files in folder %s", len(templates), folder) for _, file := range templates { - data, err := os.ReadFile(path.Join(rubyDir, file)) + filePath := path.Join(rubyDir, file) + if checkExceptionList(filePath) { + continue + } + data, err := os.ReadFile(filePath) if err != nil { log.Fatalf("Cannot open the file: %v", file) } @@ -342,3 +347,21 @@ func convertTemplate(folder string) int { return len(templates) } + +func checkExceptionList(filePath string) bool { + exceptionPaths := []string{ + "custom_flatten/bigquery_table_ref_load_destinationtable.go", + "custom_flatten/bigquery_table_ref.go", + "custom_flatten/bigquery_table_ref_copy_destinationtable.go", + "custom_flatten/bigquery_table_ref_extract_sourcetable.go", + "custom_flatten/bigquery_table_ref_query_destinationtable.go", + } + + for _, t := range exceptionPaths { + if strings.Contains(filePath, t) { + return true + } + } + + return false +} diff --git a/mmv1/templates/terraform/constants/go/monitoring_uptime_check_config.go.tmpl b/mmv1/templates/terraform/constants/go/monitoring_uptime_check_config.go.tmpl index b10157904afa..8b22300db65c 100644 --- a/mmv1/templates/terraform/constants/go/monitoring_uptime_check_config.go.tmpl +++ b/mmv1/templates/terraform/constants/go/monitoring_uptime_check_config.go.tmpl @@ -1,3 +1,12 @@ func resourceMonitoringUptimeCheckConfigHttpCheckPathDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return old == "/"+new +} + +func resourceMonitoringUptimeCheckConfigMonitoredResourceLabelsDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // GCP adds the project_id to the labels if unset. + // We want to suppress the diff if not set in the config. + if strings.HasSuffix(k, "project_id") && new == "" && old != "" { + return true + } + return false } \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/privileged_access_manager_entitlement.go.tmpl b/mmv1/templates/terraform/constants/go/privileged_access_manager_entitlement.go.tmpl new file mode 100644 index 000000000000..43044924a06e --- /dev/null +++ b/mmv1/templates/terraform/constants/go/privileged_access_manager_entitlement.go.tmpl @@ -0,0 +1,23 @@ +const deletedRegexp = `^deleted:` + +func validateDeletedPrincipals(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if regexp.MustCompile(deletedRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Terraform does not support IAM policies for deleted principals: %s", k)) + } + + return +} + +const entitlementIdRegexp = `^[a-z][a-z0-9-]{3,62}$` + +func validateEntitlementId(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if !regexp.MustCompile(entitlementIdRegexp).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Entitlement Id should be 4-63 characters, and valid characters are '[a-z]', '[0-9]', and '-'. The first character should be from [a-z]. : %s", k)) + } + + return +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl b/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl new file mode 100644 index 000000000000..42bbd47ae12f --- /dev/null +++ b/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl @@ -0,0 +1,55 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +func replicasHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["location"].(string)))) // ToLower just in case + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["type"].(string)))) + var isLeader interface{} + if m["defaultLeaderLocation"] != nil { + isLeader = m["defaultLeaderLocation"] + } else { + isLeader = false + } + buf.WriteString(fmt.Sprintf("%v-", isLeader.(bool))) + return tpgresource.Hashcode(buf.String()) +} + +func getBaseInstanceConfigReplicas(d *schema.ResourceData, config *transport_tpg.Config, baseConfigProp interface{}, billingProject, userAgent string) ([]interface{}, error) { + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}SpannerBasePath{{"}}"}}") + if err != nil { + return nil, err + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: fmt.Sprintf("%s%s", url, baseConfigProp.(string)), + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return nil, fmt.Errorf("Error fetching base InstanceConfig: %s", err) + } + + data, ok := res["replicas"] + if !ok || data == nil { + log.Print("[DEBUG] No replicas in the base InstanceConfig.") + return nil, nil + } + + return data.([]interface{}), nil +} diff --git a/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl b/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl new file mode 100644 index 000000000000..ad28e276546e --- /dev/null +++ b/mmv1/templates/terraform/custom_check_destroy/go/kms_autokey_config.go.tmpl @@ -0,0 +1,22 @@ +config := acctest.GoogleProviderConfig(t) + +url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}KMSBasePath{{"}}"}}folders/{{"{{"}}folder{{"}}"}}/autokeyConfig") +if err != nil { + return err +} + +res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: config.UserAgent, +}) +if err != nil { + return nil +} + +if v := res["key_project"]; v != nil { + return fmt.Errorf("AutokeyConfig still exists at %s", url) +} + +return nil diff --git a/mmv1/templates/terraform/custom_delete/go/only_remove_from_state.go.tmpl b/mmv1/templates/terraform/custom_delete/go/only_remove_from_state.go.tmpl new file mode 100644 index 000000000000..0280b5d91c1d --- /dev/null +++ b/mmv1/templates/terraform/custom_delete/go/only_remove_from_state.go.tmpl @@ -0,0 +1,3 @@ +log.Printf("[WARNING] Resource [%s] will be only removed from Terraform state, but will be left intact on GCP. %s", d.Id(), userAgent) + +return schema.RemoveFromState(d, meta) \ No newline at end of file diff --git a/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl new file mode 100644 index 000000000000..e4ce7115a853 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl @@ -0,0 +1,42 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + apiData := make([]map[string]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + apiData = append(apiData, map[string]interface{}{ + "description": original["description"], + "range": original["range"], + }) + } + configData := []map[string]interface{}{} + if v, ok := d.GetOk("advertised_ip_ranges"); ok { + for _, item := range v.([]interface{}) { + configData = append(configData, item.(map[string]interface{})) + } + } + sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "range") + if err != nil { + log.Printf("[ERROR] Could not support API response for advertisedIpRanges.0.range: %s", err) + return apiData + } + return sorted +} diff --git a/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl new file mode 100644 index 000000000000..8b22f15bc6f7 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl @@ -0,0 +1,29 @@ +{{- /* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["thumbprint"] = original["thumbprint"] + // Trigger a diff based on the plain_text if there is no change in the thumbprint, + // otherwise leave plain_text empty to always trigger a diff. + if original["thumbprint"].(string) == d.Get("extra_attributes_oauth2_client.0.client_secret.0.value.0.thumbprint").(string) { + transformed["plain_text"] = d.Get("extra_attributes_oauth2_client.0.client_secret.0.value.0.plain_text") + } + return []interface{}{transformed} +} diff --git a/mmv1/templates/terraform/decoders/go/app_engine_domain_mapping.go.tmpl b/mmv1/templates/terraform/decoders/go/app_engine_domain_mapping.go.tmpl new file mode 100644 index 000000000000..0d20873e2439 --- /dev/null +++ b/mmv1/templates/terraform/decoders/go/app_engine_domain_mapping.go.tmpl @@ -0,0 +1,18 @@ +// sslManagementType does not get returned with the beta endpoint. Hence, if sslSettings is set +// and sslManagementType is set, we return that value. Otherwise, we carry over the old value +// from state by calling d.Get("ssl_settings.0.ssl_management_type") +if v, ok := res["sslSettings"]; ok { + original := v.(map[string]interface{}) + if _, ok := original["sslManagementType"]; !ok { + original["sslManagementType"] = d.Get("ssl_settings.0.ssl_management_type") + } + res["sslSettings"] = original +} else { + // If ssl_settings is not set, we call d.Get("ssl_settings.0.ssl_management_type"), create sslSettings, + // and store the retrieved value in sslManagementType + transformed := make(map[string]interface{}) + transformed["sslManagementType"] = d.Get("ssl_settings.0.ssl_management_type") + res["sslSettings"] = transformed +} + +return res, nil diff --git a/mmv1/templates/terraform/decoders/go/spanner_instance_config.go.tmpl b/mmv1/templates/terraform/decoders/go/spanner_instance_config.go.tmpl new file mode 100644 index 000000000000..23a57a340b34 --- /dev/null +++ b/mmv1/templates/terraform/decoders/go/spanner_instance_config.go.tmpl @@ -0,0 +1,28 @@ +config := meta.(*transport_tpg.Config) +d.SetId(res["name"].(string)) +if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/instanceConfigs/(?P[^/]+)"}, d, config); err != nil { + return nil, err +} +res["project"] = d.Get("project").(string) +res["name"] = d.Get("name").(string) +id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}project{{"}}"}}/{{"{{"}}name{{"}}"}}") +if err != nil { +return nil, err +} +baseReplicas, err := getBaseInstanceConfigReplicas(d, config, res["baseConfig"], res["project"].(string), config.UserAgent) +if err != nil { + return nil, err +} +customReplica := make(map[int]interface{}) +for _, b := range baseReplicas { + customReplica[replicasHash(b)] = b +} +var cR []interface{} +for _, r := range res["replicas"].([]interface{}) { + if _, ok := customReplica[replicasHash(r)]; !ok { + cR = append(cR, r) + } +} +res["replicas"] = cR +d.SetId(id) +return res, nil \ No newline at end of file diff --git a/mmv1/templates/terraform/encoders/go/spanner_instance_config.go.tmpl b/mmv1/templates/terraform/encoders/go/spanner_instance_config.go.tmpl new file mode 100644 index 000000000000..b12071a83112 --- /dev/null +++ b/mmv1/templates/terraform/encoders/go/spanner_instance_config.go.tmpl @@ -0,0 +1,19 @@ +config := meta.(*transport_tpg.Config) +project, err := tpgresource.GetProject(d, config) +if err != nil { + return nil, err +} +newObj := make(map[string]interface{}) +if obj["name"] == nil { + return nil, fmt.Errorf("Error setting instance config name") +} +newObj["instanceConfigId"] = obj["name"] +obj["name"] = fmt.Sprintf("projects/%s/instanceConfigs/%s", project, obj["name"]) +baseReplicas, err := getBaseInstanceConfigReplicas(d, config, obj["baseConfig"], project, meta.(*transport_tpg.Config).UserAgent) +if err != nil { + return nil, err +} +r := obj["replicas"].([]interface{}) +obj["replicas"] = append(r, baseReplicas...) +newObj["instanceConfig"] = obj +return newObj, nil diff --git a/mmv1/templates/terraform/examples/go/alloydb_backup_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_backup_basic.tf.tmpl index 5648cedc9bc3..a78ce7463f6a 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_backup_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_backup_basic.tf.tmpl @@ -9,7 +9,9 @@ resource "google_alloydb_backup" "{{$.PrimaryResourceId}}" { resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" location = "us-central1" - network = google_compute_network.default.id + network_config { + network = google_compute_network.default.id + } } resource "google_alloydb_instance" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/alloydb_backup_full.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_backup_full.tf.tmpl index c73bae6badb7..563fdcbac298 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_backup_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_backup_full.tf.tmpl @@ -14,7 +14,9 @@ resource "google_alloydb_backup" "{{$.PrimaryResourceId}}" { resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" location = "us-central1" - network = google_compute_network.default.id + network_config { + network = google_compute_network.default.id + } } resource "google_alloydb_instance" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/alloydb_cluster_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_cluster_basic.tf.tmpl index b7cd61658634..c9bab8098b43 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_cluster_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_cluster_basic.tf.tmpl @@ -1,7 +1,9 @@ resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" location = "us-central1" - network = google_compute_network.default.id + network_config { + network = google_compute_network.default.id + } } data "google_project" "project" {} diff --git a/mmv1/templates/terraform/examples/go/alloydb_cluster_full.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_cluster_full.tf.tmpl index df82c062a985..2b0c9e4c9f63 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_cluster_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_cluster_full.tf.tmpl @@ -1,7 +1,9 @@ resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { - cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" - location = "us-central1" - network = google_compute_network.default.id + cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" + location = "us-central1" + network_config { + network = google_compute_network.default.id + } database_version = "POSTGRES_15" initial_user { diff --git a/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl index f37b8bb0470d..2b3842496b21 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_cluster_restore.tf.tmpl @@ -31,7 +31,9 @@ resource "google_alloydb_backup" "{{$.PrimaryResourceId}}" { resource "google_alloydb_cluster" "restored_from_backup" { cluster_id = "{{index $.Vars "alloydb_backup_restored_cluster_name"}}" location = "us-central1" - network = data.google_compute_network.default.id + network_config { + network = data.google_compute_network.default.id + } restore_backup_source { backup_name = google_alloydb_backup.{{$.PrimaryResourceId}}.name } diff --git a/mmv1/templates/terraform/examples/go/alloydb_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/alloydb_instance_basic.tf.tmpl index d1fb4671aa5a..575c2a6ed790 100644 --- a/mmv1/templates/terraform/examples/go/alloydb_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/alloydb_instance_basic.tf.tmpl @@ -13,7 +13,9 @@ resource "google_alloydb_instance" "{{$.PrimaryResourceId}}" { resource "google_alloydb_cluster" "{{$.PrimaryResourceId}}" { cluster_id = "{{index $.Vars "alloydb_cluster_name"}}" location = "us-central1" - network = google_compute_network.default.id + network_config { + network = google_compute_network.default.id + } initial_user { password = "{{index $.Vars "alloydb_cluster_name"}}" diff --git a/mmv1/templates/terraform/examples/go/bigquery_connection_kms.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_connection_kms.tf.tmpl new file mode 100644 index 000000000000..b5195f1acb5f --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_connection_kms.tf.tmpl @@ -0,0 +1,38 @@ +resource "google_sql_database_instance" "instance" { + name = "{{index $.Vars "database_instance_name"}}" + database_version = "POSTGRES_11" + region = "us-central1" + settings { + tier = "db-f1-micro" + } + + deletion_protection = "{{index $.Vars "deletion_protection"}}" +} + +resource "google_sql_database" "db" { + instance = google_sql_database_instance.instance.name + name = "db" +} + +resource "google_sql_user" "user" { + name = "{{index $.Vars "username"}}" + instance = google_sql_database_instance.instance.name + password = "tf-test-my-password%{random_suffix}" +} + +resource "google_bigquery_connection" "{{$.PrimaryResourceId}}" { + friendly_name = "👋" + description = "a riveting description" + location = "US" + kms_key_name = "{{index $.Vars "kms_key_name"}}" + cloud_sql { + instance_id = google_sql_database_instance.instance.connection_name + database = google_sql_database.db.name + type = "POSTGRES" + credential { + username = google_sql_user.user.name + password = google_sql_user.user.password + } + } +} + diff --git a/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws.tf.tmpl index 6c9ef4dd12a6..3c6263691609 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws.tf.tmpl @@ -1,12 +1,11 @@ resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { - provider = google-beta dataset_id = "{{index $.Vars "dataset_id"}}" friendly_name = "test" description = "This is a test description" location = "aws-us-east-1" external_dataset_reference { - external_source = "aws-glue://arn:aws:glue:us-east-1:772042918353:database/db_other_formats_external" - connection = "projects/bigquerytestdefault/locations/aws-us-east-1/connections/external_test-connection" + external_source = "aws-glue://arn:aws:glue:us-east-1:999999999999:database/database" + connection = "projects/project/locations/aws-us-east-1/connections/connection" } } diff --git a/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws_test.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws_test.tf.tmpl new file mode 100644 index 000000000000..909aac1f6f9e --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_dataset_external_reference_aws_test.tf.tmpl @@ -0,0 +1,11 @@ +resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { + dataset_id = "{{index $.Vars "dataset_id"}}" + friendly_name = "test" + description = "This is a test description" + location = "aws-us-east-1" + + external_dataset_reference { + external_source = "aws-glue://arn:aws:glue:us-east-1:772042918353:database/db_other_formats_external" + connection = "projects/bigquerytestdefault/locations/aws-us-east-1/connections/external_test-connection" + } +} diff --git a/mmv1/templates/terraform/examples/go/bigquery_job_copy.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_job_copy.tf.tmpl index 4cde9abdea69..88b2e20502ca 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_job_copy.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_job_copy.tf.tmpl @@ -1,6 +1,10 @@ +locals { + count = 2 +} + resource "google_bigquery_table" "source" { deletion_protection = false - count = length(google_bigquery_dataset.source) + count = local.count dataset_id = google_bigquery_dataset.source[count.index].dataset_id table_id = "{{index $.Vars "job_id"}}_${count.index}_table" @@ -27,7 +31,7 @@ EOF } resource "google_bigquery_dataset" "source" { - count = 2 + count = local.count dataset_id = "{{index $.Vars "job_id"}}_${count.index}_dataset" friendly_name = "test" diff --git a/mmv1/templates/terraform/examples/go/bigquery_job_copy_table_reference.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_job_copy_table_reference.tf.tmpl index cab8db69851f..aa52657f9063 100644 --- a/mmv1/templates/terraform/examples/go/bigquery_job_copy_table_reference.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/bigquery_job_copy_table_reference.tf.tmpl @@ -1,6 +1,10 @@ +locals { + count = 2 +} + resource "google_bigquery_table" "source" { deletion_protection = false - count = length(google_bigquery_dataset.source) + count = local.count dataset_id = google_bigquery_dataset.source[count.index].dataset_id table_id = "{{index $.Vars "job_id"}}_${count.index}_table" @@ -24,10 +28,12 @@ resource "google_bigquery_table" "source" { } ] EOF + + depends_on = [google_bigquery_dataset.source] } resource "google_bigquery_dataset" "source" { - count = 2 + count = local.count dataset_id = "{{index $.Vars "job_id"}}_${count.index}_dataset" friendly_name = "test" diff --git a/mmv1/templates/terraform/examples/go/cloudbuildv2_repository_ghe_doc.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudbuildv2_repository_ghe_doc.tf.tmpl index bf307ddefa98..aac9315c7584 100644 --- a/mmv1/templates/terraform/examples/go/cloudbuildv2_repository_ghe_doc.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudbuildv2_repository_ghe_doc.tf.tmpl @@ -64,6 +64,6 @@ resource "google_cloudbuildv2_connection" "my-connection" { resource "google_cloudbuildv2_repository" "my-repository" { name = "my-terraform-ghe-repo" location = "us-central1" - parent_connection = google_cloudbuildv2_connection.my-connection.id + parent_connection = google_cloudbuildv2_connection.my-connection.name remote_uri = "https://ghe.com/hashicorp/terraform-provider-google.git" } diff --git a/mmv1/templates/terraform/examples/go/clouddeploy_custom_target_type_gcb_repo_skaffold_modules.tf.tmpl b/mmv1/templates/terraform/examples/go/clouddeploy_custom_target_type_gcb_repo_skaffold_modules.tf.tmpl new file mode 100644 index 000000000000..51ac301c287a --- /dev/null +++ b/mmv1/templates/terraform/examples/go/clouddeploy_custom_target_type_gcb_repo_skaffold_modules.tf.tmpl @@ -0,0 +1,17 @@ +resource "google_clouddeploy_custom_target_type" "{{$.PrimaryResourceId}}" { + location = "us-central1" + name = "{{index $.Vars "custom_target_type_name"}}" + description = "My custom target type" + custom_actions { + render_action = "renderAction" + deploy_action = "deployAction" + include_skaffold_modules { + configs = ["my-config"] + google_cloud_build_repo { + repository = "projects/example/locations/us-central1/connections/git/repositories/example-repo" + path = "configs/skaffold.yaml" + ref = "main" + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/cloudfunctions2_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudfunctions2_basic.tf.tmpl index 7e3dc298e394..4b21e2ea0f35 100644 --- a/mmv1/templates/terraform/examples/go/cloudfunctions2_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudfunctions2_basic.tf.tmpl @@ -36,4 +36,3 @@ resource "google_cloudfunctions2_function" "{{$.PrimaryResourceId}}" { timeout_seconds = 60 } } - diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_basic.tf.tmpl index 4607484d4070..69c80116cd54 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_basic.tf.tmpl @@ -5,7 +5,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { template { template { containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" } } } diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_directvpc.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_directvpc.tf.tmpl index 6033510e7fb1..4a869bf081d1 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_directvpc.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_directvpc.tf.tmpl @@ -1,7 +1,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "cloud_run_job_name"}}" location = "us-central1" - launch_stage = "BETA" + launch_stage = "GA" template { template{ containers { @@ -13,7 +13,6 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { subnetwork = "default" tags = ["tag1", "tag2", "tag3"] } - egress = "ALL_TRAFFIC" } } } diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_emptydir.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_emptydir.tf.tmpl index 2c5e4564aed2..7231e35938a0 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_emptydir.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_emptydir.tf.tmpl @@ -6,7 +6,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { template { template { containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" volume_mounts { name = "empty-dir-volume" mount_path = "/mnt" diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_limits.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_limits.tf.tmpl index eeac70139490..5d2ae17ba840 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_limits.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_limits.tf.tmpl @@ -5,7 +5,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { template { template { containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" resources { limits = { cpu = "2" diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_run_job.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_run_job.tf.tmpl new file mode 100644 index 000000000000..df6a52af53d0 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_run_job.tf.tmpl @@ -0,0 +1,12 @@ +resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "cloud_run_job_name"}}" + location = "us-central1" + start_execution_token = "start-once-created" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_secret.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_secret.tf.tmpl index 199e7b6a8cb6..34a753f31bba 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_secret.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_secret.tf.tmpl @@ -17,7 +17,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { } } containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" volume_mounts { name = "a-volume" mount_path = "/secrets" diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_sql.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_sql.tf.tmpl index b7352d3638bd..e1f652e4145c 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_sql.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_sql.tf.tmpl @@ -12,7 +12,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { } containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" env { name = "FOO" diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_job_vpcaccess.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_job_vpcaccess.tf.tmpl index 6070521641c7..a2f00a6fdf6f 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_job_vpcaccess.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_job_vpcaccess.tf.tmpl @@ -5,7 +5,7 @@ resource "google_cloud_run_v2_job" "{{$.PrimaryResourceId}}" { template { template{ containers { - image = "us-docker.pkg.dev/cloudrun/container/hello" + image = "us-docker.pkg.dev/cloudrun/container/job" } vpc_access{ connector = google_vpc_access_connector.connector.id diff --git a/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl index 4137a4c81dd9..502ffc36f77a 100644 --- a/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudrunv2_service_directvpc.tf.tmpl @@ -12,7 +12,6 @@ resource "google_cloud_run_v2_service" "{{$.PrimaryResourceId}}" { subnetwork = "default" tags = ["tag1", "tag2", "tag3"] } - egress = "ALL_TRAFFIC" } } } diff --git a/mmv1/templates/terraform/examples/go/composer_user_workloads_config_map_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/composer_user_workloads_config_map_basic.tf.tmpl new file mode 100644 index 000000000000..a2641c1bf801 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/composer_user_workloads_config_map_basic.tf.tmpl @@ -0,0 +1,20 @@ +resource "google_composer_environment" "environment" { + provider = google-beta + name = "{{index $.Vars "environment_name"}}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_config_map" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "config_map_name"}}" + region = "us-central1" + environment = google_composer_environment.environment.name + data = { + api_host: "apihost:443", + } +} diff --git a/mmv1/templates/terraform/examples/go/compute_interconnect_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_interconnect_basic.tf.tmpl new file mode 100644 index 000000000000..cdbf39957e21 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_interconnect_basic.tf.tmpl @@ -0,0 +1,10 @@ +data "google_project" "project" {} + +resource "google_compute_interconnect" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "interconnect_name"}}" + customer_name = "{{index $.Vars "customer_name"}}" + interconnect_type = "DEDICATED" + link_type = "LINK_TYPE_ETHERNET_10G_LR" + location = "https://www.googleapis.com/compute/v1/projects/${data.google_project.project.name}/global/interconnectLocations/iad-zone1-1" + requested_link_count = 1 +} diff --git a/mmv1/templates/terraform/examples/go/compute_interconnect_basic_test.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_interconnect_basic_test.tf.tmpl new file mode 100644 index 000000000000..b9bbd64004f8 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_interconnect_basic_test.tf.tmpl @@ -0,0 +1,15 @@ +data "google_project" "project" {} + +resource "google_compute_interconnect" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "interconnect_name"}}" + customer_name = "internal_customer" # Special customer only available for Google testing. + interconnect_type = "IT_PRIVATE" # Special type only available for Google testing. + link_type = "LINK_TYPE_ETHERNET_10G_LR" + location = "https://www.googleapis.com/compute/v1/projects/${data.google_project.project.name}/global/interconnectLocations/z2z-us-east4-zone1-lciadl-a" # Special location only available for Google testing. + requested_link_count = 1 + admin_enabled = true + description = "example description" + macsec_enabled = false + noc_contact_email = "user@example.com" + requested_features = [] +} diff --git a/mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_basic.tf.tmpl new file mode 100644 index 000000000000..b7ac23580978 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_basic.tf.tmpl @@ -0,0 +1,3 @@ +resource "google_compute_project_cloud_armor_tier" "{{$.PrimaryResourceId}}" { + cloud_armor_tier = "CA_STANDARD" +} diff --git a/mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_project_set.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_project_set.tf.tmpl new file mode 100644 index 000000000000..a92fbbb8d92c --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_project_cloud_armor_tier_project_set.tf.tmpl @@ -0,0 +1,17 @@ +resource "google_project" "project" { + project_id = "{{index $.Vars "project_id"}}" + name = "{{index $.Vars "project_id"}}" + org_id = "{{index $.TestEnvVars "org_id"}}" + billing_account = "{{index $.TestEnvVars "billing_account"}}" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_project_cloud_armor_tier" "{{$.PrimaryResourceId}}" { + project = google_project.project.project_id + cloud_armor_tier = "CA_STANDARD" + depends_on = [google_project_service.compute] +} diff --git a/mmv1/templates/terraform/examples/go/dataplex_aspect_type_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dataplex_aspect_type_basic.tf.tmpl new file mode 100644 index 000000000000..7422bb08572b --- /dev/null +++ b/mmv1/templates/terraform/examples/go/dataplex_aspect_type_basic.tf.tmpl @@ -0,0 +1,32 @@ +resource "google_dataplex_aspect_type" "{{$.PrimaryResourceId}}" { + aspect_type_id = "{{index $.Vars "aspect_type_name"}}" + project = "{{index $.TestEnvVars "project_name"}}" + location = "us-central1" + + metadata_template = < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "{{index $.Vars "hc_name"}}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "{{index $.Vars "mig_name"}}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "{{index $.Vars "fw_allow_iap_hc_name"}}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "{{index $.Vars "fw_allow_ilb_to_backends_name"}}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} +# [END cloudloadbalancing_int_http_gce] + +# [START lb_route_extension] +resource "google_network_services_lb_route_extension" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "lb_route_extension_name"}}" + description = "my route extension" + location = "us-west1" + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.path.startsWith('/extensions')" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + forward_headers = ["custom-header"] + } + } + + labels = { + foo = "bar" + } +} + +# test instance +resource "google_compute_instance" "vm_test" { + name = "{{index $.Vars "vm_test_name"}}" + zone = "us-west1-b" + machine_type = "e2-small" + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + } + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } +} + +# Route Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "{{index $.Vars "callouts_instance_name"}}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + apt-get update + apt-get install apache2 -y + a2ensite default-ssl + a2enmod ssl + echo "Page served from second backend service" | tee /var/www/html/index.html + systemctl restart apache2' + EOF1 + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false + + depends_on = [ + google_compute_instance.vm_test + ] +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "{{index $.Vars "callouts_instance_group"}}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "{{index $.Vars "callouts_hc_name"}}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "{{index $.Vars "callouts_backend_name"}}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} +# [END lb_route_extension] diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl new file mode 100644 index 000000000000..abc31633df6b --- /dev/null +++ b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl @@ -0,0 +1,337 @@ +# Internal HTTP load balancer with a managed instance group backend +# VPC network +resource "google_compute_network" "ilb_network" { + name = "{{index $.Vars "ilb_network_name"}}" + auto_create_subnetworks = false +} + +# proxy-only subnet +resource "google_compute_subnetwork" "proxy_subnet" { + name = "{{index $.Vars "proxy_subnet_name"}}" + ip_cidr_range = "10.0.0.0/24" + region = "us-west1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +# backend subnet +resource "google_compute_subnetwork" "ilb_subnet" { + name = "{{index $.Vars "backend_subnet_name"}}" + ip_cidr_range = "10.0.1.0/24" + region = "us-west1" + network = google_compute_network.ilb_network.id + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# forwarding rule +resource "google_compute_forwarding_rule" "default" { + name = "{{index $.Vars "forwarding_rule_name"}}" + region = "us-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL_MANAGED" + port_range = "80" + target = google_compute_region_target_http_proxy.default.id + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + network_tier = "PREMIUM" + + depends_on = [ + google_compute_subnetwork.proxy_subnet + ] +} + +# HTTP target proxy +resource "google_compute_region_target_http_proxy" "default" { + name = "{{index $.Vars "target_http_proxy_name"}}" + region = "us-west1" + url_map = google_compute_region_url_map.default.id +} + +# URL map +resource "google_compute_region_url_map" "default" { + name = "{{index $.Vars "regional_url_map_name"}}" + region = "us-west1" + default_service = google_compute_region_backend_service.default.id +} + +# backend service +resource "google_compute_region_backend_service" "default" { + name = "{{index $.Vars "backend_service_name"}}" + region = "us-west1" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + health_checks = [google_compute_region_health_check.default.id] + + backend { + group = google_compute_region_instance_group_manager.mig.instance_group + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } +} + +# instance template +resource "google_compute_instance_template" "instance_template" { + name = "{{index $.Vars "mig_template_name"}}" + machine_type = "e2-small" + tags = ["http-server"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + } + + disk { + source_image = "debian-cloud/debian-10" + auto_delete = true + boot = true + } + + # install nginx and serve a simple web page + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + set -euo pipefail + + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y nginx-light jq + + NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname") + IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip") + METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])') + + cat < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "{{index $.Vars "hc_name"}}" + region = "us-west1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "{{index $.Vars "mig_name"}}" + region = "us-west1" + + base_instance_name = "vm" + target_size = 2 + + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } +} + +# allow all access from IAP and health check ranges +resource "google_compute_firewall" "fw_iap" { + name = "{{index $.Vars "fw_allow_iap_hc_name"}}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + + allow { + protocol = "tcp" + } +} + +# allow http from proxy subnet to backends +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "{{index $.Vars "fw_allow_ilb_to_backends_name"}}" + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.0.0/24"] + target_tags = ["http-server"] + + allow { + protocol = "tcp" + ports = ["80", "443", "8080"] + } + + depends_on = [ + google_compute_firewall.fw_iap + ] +} +# [END cloudloadbalancing_int_http_gce] + +# [START lb_traffic_extension] +resource "google_network_services_lb_traffic_extension" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "lb_traffic_extension_name"}}" + description = "my traffic extension" + location = "us-west1" + + load_balancing_scheme = "INTERNAL_MANAGED" + forwarding_rules = [google_compute_forwarding_rule.default.self_link] + + extension_chains { + name = "chain1" + + match_condition { + cel_expression = "request.host == 'example.com'" + } + + extensions { + name = "ext11" + authority = "ext11.com" + service = google_compute_region_backend_service.callouts_backend.self_link + timeout = "0.1s" + fail_open = false + + supported_events = ["REQUEST_HEADERS"] + forward_headers = ["custom-header"] + } + } + + labels = { + foo = "bar" + } +} + +# test instance +resource "google_compute_instance" "vm_test" { + name = "{{index $.Vars "vm_test_name"}}" + zone = "us-west1-b" + machine_type = "e2-small" + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + } + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } +} + +# Traffic Extension Backend Instance +resource "google_compute_instance" "callouts_instance" { + name = "{{index $.Vars "callouts_instance_name"}}" + zone = "us-west1-a" + machine_type = "e2-small" + + labels = { + "container-vm" = "cos-stable-109-17800-147-54" + } + + tags = ["allow-ssh","load-balanced-backend"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + + access_config { + # add external ip to fetch packages + } + + } + + boot_disk { + auto_delete = true + + initialize_params { + type = "pd-standard" + size = 10 + image = "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-stable-109-17800-147-54" + } + } + + # Initialize an Envoy's Ext Proc gRPC API based on a docker container + metadata = { + gce-container-declaration = "# DISCLAIMER:\n# This container declaration format is not a public API and may change without\n# notice. Please use gcloud command-line tool or Google Cloud Console to run\n# Containers on Google Compute Engine.\n\nspec:\n containers:\n - image: us-docker.pkg.dev/service-extensions/ext-proc/service-callout-basic-example-python:latest\n name: callouts-vm\n securityContext:\n privileged: false\n stdin: false\n tty: false\n volumeMounts: []\n restartPolicy: Always\n volumes: []\n" + google-logging-enabled = "true" + } + + lifecycle { + create_before_destroy = true + } + + deletion_protection = false + + depends_on = [ + google_compute_instance.vm_test + ] +} + +// callouts instance group +resource "google_compute_instance_group" "callouts_instance_group" { + name = "{{index $.Vars "callouts_instance_group"}}" + description = "Terraform test instance group" + zone = "us-west1-a" + + instances = [ + google_compute_instance.callouts_instance.id, + ] + + named_port { + name = "http" + port = "80" + } + + named_port { + name = "grpc" + port = "443" + } +} + +# callout health check +resource "google_compute_region_health_check" "callouts_health_check" { + name = "{{index $.Vars "callouts_hc_name"}}" + region = "us-west1" + + http_health_check { + port = 80 + } + + depends_on = [ + google_compute_region_health_check.default + ] +} + +# callout backend service +resource "google_compute_region_backend_service" "callouts_backend" { + name = "{{index $.Vars "callouts_backend_name"}}" + region = "us-west1" + protocol = "HTTP2" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 + port_name = "grpc" + health_checks = [google_compute_region_health_check.callouts_health_check.id] + + backend { + group = google_compute_instance_group.callouts_instance_group.id + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + depends_on = [ + google_compute_region_backend_service.default + ] +} +# [END lb_traffic_extension] diff --git a/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tf.tmpl new file mode 100644 index 000000000000..51680aa0e867 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_advanced.tf.tmpl @@ -0,0 +1,30 @@ +resource "google_network_services_service_lb_policies" "{{$.PrimaryResourceId}}" { + provider = google-beta + + name = "{{index $.Vars "resource_name"}}" + location = "global" + description = "my description" + load_balancing_algorithm = "SPRAY_TO_REGION" + + auto_capacity_drain { + enable = true + } + + failover_config { + failover_health_threshold = 70 + } + + labels = { + foo = "bar" + } +} + +resource "google_compute_backend_service" "default" { + provider = google-beta + + name = "{{index $.Vars "backend_name"}}" + description = "my description" + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + protocol = "HTTP" + service_lb_policy = "//networkservices.googleapis.com/${google_network_services_service_lb_policies.{{$.PrimaryResourceId}}.id}" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tf.tmpl new file mode 100644 index 000000000000..db6ea6cd28e2 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/network_services_service_lb_policies_basic.tf.tmpl @@ -0,0 +1,6 @@ +resource "google_network_services_service_lb_policies" "{{$.PrimaryResourceId}}" { + provider = google-beta + + name = "{{index $.Vars "resource_name"}}" + location = "global" +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl new file mode 100644 index 000000000000..92deecf16d0b --- /dev/null +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl @@ -0,0 +1,53 @@ +# [START privateca_create_ca] +resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { + // This example assumes this pool already exists. + // Pools cannot be deleted in normal test circumstances, so we depend on static pools + pool = "{{index $.Vars "pool_name"}}" + certificate_authority_id = "{{index $.Vars "certificate_authority_id"}}" + location = "{{index $.Vars "pool_location"}}" + deletion_protection = "{{index $.Vars "deletion_protection"}}" + config { + subject_config { + subject { + organization = "HashiCorp" + common_name = "my-certificate-authority" + } + subject_alt_name { + dns_names = ["hashicorp.com"] + } + } + subject_key_id { + key_id = "4cf3372289b1d411b999dbb9ebcd44744b6b2fca" + } + x509_config { + ca_options { + is_ca = true + max_issuer_path_length = 10 + } + key_usage { + base_key_usage { + digital_signature = true + content_commitment = true + key_encipherment = false + data_encipherment = true + key_agreement = true + cert_sign = true + crl_sign = true + decipher_only = true + } + extended_key_usage { + server_auth = true + client_auth = false + email_protection = true + code_signing = true + time_stamping = true + } + } + } + } + lifetime = "86400s" + key_spec { + cloud_kms_key_version = "{{index $.Vars "kms_key_name"}}/cryptoKeyVersions/1" + } +} +# [END privateca_create_ca] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl new file mode 100644 index 000000000000..e760da42aede --- /dev/null +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl @@ -0,0 +1,93 @@ +# [START privateca_create_certificate] +resource "google_privateca_ca_pool" "default" { + location = "us-central1" + name = "{{index $.Vars "ca_pool_id"}}" + tier = "ENTERPRISE" +} + +resource "google_privateca_certificate_authority" "default" { + location = "us-central1" + pool = google_privateca_ca_pool.default.name + certificate_authority_id = "my-authority" + config { + subject_config { + subject { + organization = "HashiCorp" + common_name = "my-certificate-authority" + } + subject_alt_name { + dns_names = ["hashicorp.com"] + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + digital_signature = true + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = true + } + } + } + } + lifetime = "86400s" + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } + + // Disable CA deletion related safe checks for easier cleanup. + deletion_protection = false + skip_grace_period = true + ignore_active_certificates_on_deletion = true +} + + +resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { + location = "us-central1" + pool = google_privateca_ca_pool.default.name + name = "{{index $.Vars "certificate_name"}}" + lifetime = "860s" + config { + subject_config { + subject { + common_name = "san1.example.com" + country_code = "us" + organization = "google" + organizational_unit = "enterprise" + locality = "mountain view" + province = "california" + street_address = "1600 amphitheatre parkway" + postal_code = "94109" + } + } + subject_key_id { + key_id = "4cf3372289b1d411b999dbb9ebcd44744b6b2fca" + } + x509_config { + ca_options { + is_ca = false + } + key_usage { + base_key_usage { + crl_sign = true + } + extended_key_usage { + server_auth = true + } + } + } + public_key { + format = "PEM" + key = filebase64("test-fixtures/rsa_public.pem") + } + } + // Certificates require an authority to exist in the pool, though they don't + // need to be explicitly connected to it + depends_on = [google_privateca_certificate_authority.default] +} +# [END privateca_create_certificate] diff --git a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl index cf9f8d4471e0..03909a72b673 100644 --- a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl @@ -2,6 +2,7 @@ resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" + description = "A sample certificate template" identity_constraints { allow_subject_alt_names_passthrough = true @@ -14,5 +15,60 @@ resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { title = "Sample expression" } } + + maximum_lifetime = "86400s" + + passthrough_extensions { + additional_extensions { + object_id_path = [1, 6] + } + known_extensions = ["EXTENDED_KEY_USAGE"] + } + + predefined_values { + additional_extensions { + object_id { + object_id_path = [1, 6] + } + value = "c3RyaW5nCg==" + critical = true + } + aia_ocsp_servers = ["string"] + ca_options { + is_ca = false + max_issuer_path_length = 6 + } + key_usage { + base_key_usage { + cert_sign = false + content_commitment = true + crl_sign = false + data_encipherment = true + decipher_only = true + digital_signature = true + encipher_only = true + key_agreement = true + key_encipherment = true + } + extended_key_usage { + client_auth = true + code_signing = true + email_protection = true + ocsp_signing = true + server_auth = true + time_stamping = true + } + unknown_extended_key_usages { + object_id_path = [1, 6] + } + } + policy_ids { + object_id_path = [1, 6] + } + } + + labels = { + label-one = "value-one" + } } # [END privateca_create_certificate_template] diff --git a/mmv1/templates/terraform/examples/go/privileged_access_manager_entitlement_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privileged_access_manager_entitlement_basic.tf.tmpl new file mode 100644 index 000000000000..9bb8c5914991 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/privileged_access_manager_entitlement_basic.tf.tmpl @@ -0,0 +1,39 @@ +resource "google_privileged_access_manager_entitlement" "{{$.PrimaryResourceId}}" { + provider = google-beta + entitlement_id = "{{index $.Vars "entitlement_id"}}" + location = "global" + max_request_duration = "43200s" + parent = "projects/{{index $.TestEnvVars "project"}}" + requester_justification_config { + unstructured{} + } + eligible_users { + principals = ["group:test@google.com"] + } + privileged_access{ + gcp_iam_access{ + role_bindings{ + role = "roles/storage.admin" + condition_expression = "request.time < timestamp(\"2024-04-23T18:30:00.000Z\")" + } + resource = "//cloudresourcemanager.googleapis.com/projects/{{index $.TestEnvVars "project"}}" + resource_type = "cloudresourcemanager.googleapis.com/Project" + } + } + additional_notification_targets { + admin_email_recipients = ["user@example.com"] + requester_email_recipients = ["user@example.com"] + } + approval_workflow { + manual_approvals { + require_approver_justification = true + steps { + approvals_needed = 1 + approver_email_recipients = ["user@example.com"] + approvers { + principals = ["group:test@google.com"] + } + } + } + } +} diff --git a/mmv1/templates/terraform/examples/go/public_advertised_prefixes_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/public_advertised_prefixes_basic.tf.tmpl index 4890d3ee772d..a9f888e5edd4 100644 --- a/mmv1/templates/terraform/examples/go/public_advertised_prefixes_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/public_advertised_prefixes_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_compute_public_advertised_prefix" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "prefixes_name"}}" - description = "{{index $.TestEnvVars "description"}}" + description = "{{index $.TestEnvVars "desc"}}" dns_verification_ip = "127.127.0.0" ip_cidr_range = "127.127.0.0/16" } diff --git a/mmv1/templates/terraform/examples/go/public_delegated_prefixes_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/public_delegated_prefixes_basic.tf.tmpl index b54d0f3fb88f..b878e52d00bd 100644 --- a/mmv1/templates/terraform/examples/go/public_delegated_prefixes_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/public_delegated_prefixes_basic.tf.tmpl @@ -1,6 +1,6 @@ resource "google_compute_public_advertised_prefix" "advertised" { name = "{{index $.Vars "prefixes_name"}}" - description = "{{index $.TestEnvVars "description"}}" + description = "{{index $.TestEnvVars "desc"}}" dns_verification_ip = "127.127.0.0" ip_cidr_range = "127.127.0.0/16" } diff --git a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl index 07543bc458d0..b5c1a710930f 100644 --- a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage.tf.tmpl @@ -35,4 +35,4 @@ resource "google_storage_bucket_iam_member" "admin" { bucket = google_storage_bucket.{{$.PrimaryResourceId}}.name role = "roles/storage.admin" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl index 4d78207e4c9f..3974c8593c5b 100644 --- a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_avro.tf.tmpl @@ -39,4 +39,4 @@ resource "google_storage_bucket_iam_member" "admin" { bucket = google_storage_bucket.{{$.PrimaryResourceId}}.name role = "roles/storage.admin" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl b/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl index 799c11f9108b..492a9153374d 100644 --- a/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/redis_cluster_ha.tf.tmpl @@ -9,6 +9,9 @@ resource "google_redis_cluster" "{{$.PrimaryResourceId}}" { node_type = "REDIS_SHARED_CORE_NANO" transit_encryption_mode = "TRANSIT_ENCRYPTION_MODE_DISABLED" authorization_mode = "AUTH_MODE_DISABLED" + redis_configs = { + maxmemory-policy = "volatile-ttl" + } zone_distribution_config { mode = "MULTI_ZONE" } diff --git a/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl b/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl new file mode 100644 index 000000000000..0d3bcb48f87a --- /dev/null +++ b/mmv1/templates/terraform/examples/go/redis_cluster_ha_single_zone.tf.tmpl @@ -0,0 +1,42 @@ +resource "google_redis_cluster" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "cluster_name"}}" + shard_count = 3 + psc_configs { + network = google_compute_network.producer_net.id + } + region = "us-central1" + zone_distribution_config { + mode = "SINGLE_ZONE" + zone = "us-central1-f" + } + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + + lifecycle { + prevent_destroy = {{index $.Vars "prevent_destroy"}} + } +} + +resource "google_network_connectivity_service_connection_policy" "default" { + name = "{{index $.Vars "policy_name"}}" + location = "us-central1" + service_class = "gcp-memorystore-redis" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + name = "{{index $.Vars "subnet_name"}}" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + name = "{{index $.Vars "network_name"}}" + auto_create_subnetworks = false +} diff --git a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_portmap.tf.tmpl b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_portmap.tf.tmpl new file mode 100644 index 000000000000..e4f502062ad8 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_portmap.tf.tmpl @@ -0,0 +1,22 @@ +resource "google_compute_region_network_endpoint_group" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "neg_name"}}" + region = "us-central1" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + + network_endpoint_type = "GCE_VM_IP_PORTMAP" + provider = google-beta +} + +resource "google_compute_network" "default" { + name = "{{index $.Vars "network_name"}}" + provider = google-beta +} + +resource "google_compute_subnetwork" "default" { + name = "{{index $.Vars "subnetwork_name"}}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id + provider = google-beta +} diff --git a/mmv1/templates/terraform/examples/go/region_network_endpoint_portmap.tf.tmpl b/mmv1/templates/terraform/examples/go/region_network_endpoint_portmap.tf.tmpl new file mode 100644 index 000000000000..bc9b61702ddc --- /dev/null +++ b/mmv1/templates/terraform/examples/go/region_network_endpoint_portmap.tf.tmpl @@ -0,0 +1,58 @@ +resource "google_compute_network" "default" { + name = "{{index $.Vars "network_name"}}" + auto_create_subnetworks = false + provider = google-beta +} + +resource "google_compute_subnetwork" "default" { + name = "{{index $.Vars "subnetwork_name"}}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id + provider = google-beta +} + +resource "google_compute_region_network_endpoint_group" default { + name = "{{index $.Vars "neg_name"}}" + region = "us-central1" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + + network_endpoint_type = "GCE_VM_IP_PORTMAP" + provider = google-beta +} + +resource "google_compute_region_network_endpoint" "{{$.PrimaryResourceId}}" { + region_network_endpoint_group = google_compute_region_network_endpoint_group.default.name + region = "us-central1" + instance = google_compute_instance.default.self_link + port = 80 + ip_address = google_compute_instance.default.network_interface[0].network_ip + client_destination_port = 8080 + provider = google-beta +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + provider = google-beta +} + +resource "google_compute_instance" "default" { + name = "{{index $.Vars "instance_name"}}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.id + access_config { + } + } + provider = google-beta +} diff --git a/mmv1/templates/terraform/examples/go/region_security_policy_rule_with_preconfigured_waf_config.tf.tmpl b/mmv1/templates/terraform/examples/go/region_security_policy_rule_with_preconfigured_waf_config.tf.tmpl new file mode 100644 index 000000000000..370460df558e --- /dev/null +++ b/mmv1/templates/terraform/examples/go/region_security_policy_rule_with_preconfigured_waf_config.tf.tmpl @@ -0,0 +1,53 @@ +resource "google_compute_region_security_policy" "default" { + provider = google-beta + + region = "asia-southeast1" + name = "{{index $.Vars "sec_policy_name"}}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "{{$.PrimaryResourceId}}" { + provider = google-beta + + region = "asia-southeast1" + security_policy = google_compute_region_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + preconfigured_waf_config { + exclusion { + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + target_rule_set = "rce-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + request_query_param { + operator = "EQUALS" + value = "description" + } + target_rule_set = "xss-stable" + target_rule_ids = [ + "owasp-crs-v030001-id941330-xss", + "owasp-crs-v030001-id941340-xss", + ] + } + } + action = "allow" + preview = true +} diff --git a/mmv1/templates/terraform/examples/go/region_target_https_proxy_mtls.tf.tmpl b/mmv1/templates/terraform/examples/go/region_target_https_proxy_mtls.tf.tmpl new file mode 100644 index 000000000000..25cd56003eab --- /dev/null +++ b/mmv1/templates/terraform/examples/go/region_target_https_proxy_mtls.tf.tmpl @@ -0,0 +1,101 @@ +data "google_project" "project" { + provider = google-beta +} + +resource "google_compute_region_target_https_proxy" "{{$.PrimaryResourceId}}" { + provider = google-beta + region = "us-central1" + name = "{{index $.Vars "target_https_proxy_name"}}" + url_map = google_compute_region_url_map.default.id + ssl_certificates = [google_compute_region_ssl_certificate.default.id] + server_tls_policy = google_network_security_server_tls_policy.default.id +} + +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + location = "us-central1" + name = "{{index $.Vars "trust_config_name"}}" + description = "sample description for trust config" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } + + labels = { + foo = "bar" + } +} + +resource "google_network_security_server_tls_policy" "default" { + provider = google-beta + location = "us-central1" + name = "{{index $.Vars "server_tls_policy_name"}}" + description = "my description" + allow_open = "false" + mtls_policy { + client_validation_mode = "REJECT_INVALID" + client_validation_trust_config = "projects/${data.google_project.project.number}/locations/us-central1/trustConfigs/${google_certificate_manager_trust_config.default.name}" + } +} + +resource "google_compute_region_ssl_certificate" "default" { + provider = google-beta + region = "us-central1" + name = "{{index $.Vars "ssl_certificate_name"}}" + private_key = file("path/to/private.key") + certificate = file("path/to/certificate.crt") +} + +resource "google_compute_region_url_map" "default" { + provider = google-beta + region = "us-central1" + name = "{{index $.Vars "url_map_name"}}" + description = "a description" + + default_service = google_compute_region_backend_service.default.id + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = google_compute_region_backend_service.default.id + + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.default.id + } + } +} + +resource "google_compute_region_backend_service" "default" { + provider = google-beta + region = "us-central1" + name = "{{index $.Vars "backend_service_name"}}" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + load_balancing_scheme = "INTERNAL_MANAGED" + + health_checks = [google_compute_region_health_check.default.id] +} + +resource "google_compute_region_health_check" "default" { + provider = google-beta + region = "us-central1" + name = "{{index $.Vars "http_health_check_name"}}" + check_interval_sec = 1 + timeout_sec = 1 + + http_health_check { + port = 80 + } +} diff --git a/mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_basic.tf.tmpl new file mode 100644 index 000000000000..49eb43a28cc2 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_basic.tf.tmpl @@ -0,0 +1,24 @@ +resource "google_folder" "folder" { + parent = "organizations/{{index $.TestEnvVars "org_id"}}" + display_name = "{{index $.Vars "folder_display_name"}}" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "{{$.PrimaryResourceId}}" { + folder = google_folder.folder.folder_id + location = "global" + display_name = "{{index $.Vars "display_name"}}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_full.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_full.tf.tmpl new file mode 100644 index 000000000000..45964d5b11b8 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_folder_security_health_analytics_custom_module_full.tf.tmpl @@ -0,0 +1,38 @@ +resource "google_folder" "folder" { + parent = "organizations/{{index $.TestEnvVars "org_id"}}" + display_name = "{{index $.Vars "folder_display_name"}}" +} + +resource "google_scc_management_folder_security_health_analytics_custom_module" "{{$.PrimaryResourceId}}" { + folder = google_folder.folder.folder_id + location = "global" + display_name = "{{index $.Vars "display_name"}}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/scc_management_organization_event_threat_detection_custom_module.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_organization_event_threat_detection_custom_module.tf.tmpl new file mode 100644 index 000000000000..badc584445b3 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_organization_event_threat_detection_custom_module.tf.tmpl @@ -0,0 +1,19 @@ +resource "google_scc_management_organization_event_threat_detection_custom_module" "{{$.PrimaryResourceId}}" { + organization = "{{index $.TestEnvVars "org_id"}}" + location = "global" + display_name = "{{index $.Vars "display_name"}}" + enablement_state = "ENABLED" + type = "{{index $.Vars "type"}}" + description = "My Event Threat Detection Custom Module" + config = jsonencode({ + "metadata": { + "severity": "LOW", + "description": "Flagged by Forcepoint as malicious", + "recommendation": "Contact the owner of the relevant project." + }, + "ips": [ + "192.0.2.1", + "192.0.2.0/24" + ] + }) +} diff --git a/mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.tmpl new file mode 100644 index 000000000000..39ea244c1f06 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_basic.tf.tmpl @@ -0,0 +1,18 @@ +resource "google_scc_management_project_security_health_analytics_custom_module" "{{$.PrimaryResourceId}}" { + location = "global" + display_name = "{{index $.Vars "display_name"}}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_full.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_full.tf.tmpl new file mode 100644 index 000000000000..a986ac74a1ec --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_organization_project_security_health_analytics_custom_module_full.tf.tmpl @@ -0,0 +1,32 @@ +resource "google_scc_management_project_security_health_analytics_custom_module" "{{$.PrimaryResourceId}}" { + location = "global" + display_name = "{{index $.Vars "display_name"}}" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_basic.tf.tmpl new file mode 100644 index 000000000000..aec1c1fe7cbc --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_basic.tf.tmpl @@ -0,0 +1,19 @@ +resource "google_scc_management_organization_security_health_analytics_custom_module" "{{$.PrimaryResourceId}}" { + organization = "{{index $.TestEnvVars "org_id"}}" + display_name = "{{index $.Vars "display_name"}}" + location = "global" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + description = "The rotation period of the identified cryptokey resource exceeds 30 days." + recommendation = "Set the rotation period to at most 30 days." + severity = "MEDIUM" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_full.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_full.tf.tmpl new file mode 100644 index 000000000000..159ecea97f01 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_management_organization_security_health_analytics_custom_module_full.tf.tmpl @@ -0,0 +1,33 @@ +resource "google_scc_management_organization_security_health_analytics_custom_module" "{{$.PrimaryResourceId}}" { + organization = "{{index $.TestEnvVars "org_id"}}" + display_name = "{{index $.Vars "display_name"}}" + location = "global" + enablement_state = "ENABLED" + custom_config { + predicate { + expression = "resource.rotationPeriod > duration(\"2592000s\")" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + custom_output { + properties { + name = "duration" + value_expression { + expression = "resource.rotationPeriod" + title = "Purpose of the expression" + description = "description of the expression" + location = "location of the expression" + } + } + } + resource_selector { + resource_types = [ + "cloudkms.googleapis.com/CryptoKey", + ] + } + severity = "LOW" + description = "Description of the custom module" + recommendation = "Steps to resolve violation" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/security_policy_rule_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/security_policy_rule_basic.tf.tmpl new file mode 100644 index 000000000000..4667ad26d55b --- /dev/null +++ b/mmv1/templates/terraform/examples/go/security_policy_rule_basic.tf.tmpl @@ -0,0 +1,19 @@ +resource "google_compute_security_policy" "default" { + name = "{{index $.Vars "sec_policy_name"}}" + description = "basic global security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_security_policy_rule" "{{$.PrimaryResourceId}}" { + security_policy = google_compute_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} diff --git a/mmv1/templates/terraform/examples/go/security_policy_rule_default_rule.tf.tmpl b/mmv1/templates/terraform/examples/go/security_policy_rule_default_rule.tf.tmpl new file mode 100644 index 000000000000..6e606b6624a3 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/security_policy_rule_default_rule.tf.tmpl @@ -0,0 +1,37 @@ +resource "google_compute_security_policy" "default" { + name = "{{index $.Vars "sec_policy_name"}}" + description = "basic global security policy" + type = "CLOUD_ARMOR" +} + +# A default rule is generated when creating the security_policy resource, import is needed to patch it +# import { +# id = "projects/{{index $.TestEnvVars "project_id"}}/global/securityPolicies/{{index $.Vars "sec_policy_name"}}/priority/2147483647" +# to = google_compute_security_policy_rule.default_rule +# } +resource "google_compute_security_policy_rule" "default_rule" { + security_policy = google_compute_security_policy.default.name + description = "default rule" + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } +} + +resource "google_compute_security_policy_rule" "{{$.PrimaryResourceId}}" { + security_policy = google_compute_security_policy.default.name + description = "new rule" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} diff --git a/mmv1/templates/terraform/examples/go/security_policy_rule_multiple_rules.tf.tmpl b/mmv1/templates/terraform/examples/go/security_policy_rule_multiple_rules.tf.tmpl new file mode 100644 index 000000000000..cba6ad3c7a74 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/security_policy_rule_multiple_rules.tf.tmpl @@ -0,0 +1,33 @@ +resource "google_compute_security_policy" "default" { + name = "{{index $.Vars "sec_policy_name"}}" + description = "basic global security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_security_policy_rule" "{{$.PrimaryResourceId}}" { + security_policy = google_compute_security_policy.default.name + description = "new rule one" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.10.0.0/16"] + } + } + action = "allow" + preview = true +} + +resource "google_compute_security_policy_rule" "policy_rule_two" { + security_policy = google_compute_security_policy.default.name + description = "new rule two" + priority = 101 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + } + } + action = "allow" + preview = true +} diff --git a/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl index c4c8e2723258..6aeb25862767 100644 --- a/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/spanner_instance_config_basic.tf.tmpl @@ -1,5 +1,5 @@ -resource "google_spanner_instance_config" "<%= ctx[:primary_resource_id] %>" { - name = "<%= ctx[:vars]['instance_config_name'] %>" +resource "google_spanner_instance_config" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "instance_config_name"}}" display_name = "Test Spanner Instance Config" base_config = "nam11" replicas { diff --git a/mmv1/templates/terraform/examples/go/uptime_check_config_https.tf.tmpl b/mmv1/templates/terraform/examples/go/uptime_check_config_https.tf.tmpl index 9930df6daef6..76468c0f860d 100644 --- a/mmv1/templates/terraform/examples/go/uptime_check_config_https.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/uptime_check_config_https.tf.tmpl @@ -7,6 +7,9 @@ resource "google_monitoring_uptime_check_config" "{{$.PrimaryResourceId}}" { port = "443" use_ssl = true validate_ssl = true + service_agent_authentication { + type = "OIDC_TOKEN" + } } monitored_resource { diff --git a/mmv1/templates/terraform/examples/go/workstation_config_boost.tf.tmpl b/mmv1/templates/terraform/examples/go/workstation_config_boost.tf.tmpl index 9a012edbcc0f..39ae0d19bf1d 100644 --- a/mmv1/templates/terraform/examples/go/workstation_config_boost.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/workstation_config_boost.tf.tmpl @@ -48,8 +48,11 @@ resource "google_workstations_workstation_config" "{{$.PrimaryResourceId}}" { } } boost_configs { - id = "boost-1" - machine_type = "e2-standard-2" + id = "boost-2" + machine_type = "n1-standard-2" + pool_size = 2 + boot_disk_size_gb = 30 + enable_nested_virtualization = true } } } diff --git a/mmv1/templates/terraform/examples/public_advertised_prefixes_basic.tf.erb b/mmv1/templates/terraform/examples/public_advertised_prefixes_basic.tf.erb index 79a7c65f34e0..8660344ed64f 100644 --- a/mmv1/templates/terraform/examples/public_advertised_prefixes_basic.tf.erb +++ b/mmv1/templates/terraform/examples/public_advertised_prefixes_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_public_advertised_prefix" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['prefixes_name'] %>" - description = "<%= ctx[:test_env_vars]['description'] %>" + description = "<%= ctx[:test_env_vars]['desc'] %>" dns_verification_ip = "127.127.0.0" ip_cidr_range = "127.127.0.0/16" } diff --git a/mmv1/templates/terraform/examples/public_delegated_prefixes_basic.tf.erb b/mmv1/templates/terraform/examples/public_delegated_prefixes_basic.tf.erb index feb036bd752d..ec382f723627 100644 --- a/mmv1/templates/terraform/examples/public_delegated_prefixes_basic.tf.erb +++ b/mmv1/templates/terraform/examples/public_delegated_prefixes_basic.tf.erb @@ -1,6 +1,6 @@ resource "google_compute_public_advertised_prefix" "advertised" { name = "<%= ctx[:vars]['prefixes_name'] %>" - description = "<%= ctx[:test_env_vars]['description'] %>" + description = "<%= ctx[:test_env_vars]['desc'] %>" dns_verification_ip = "127.127.0.0" ip_cidr_range = "127.127.0.0/16" } diff --git a/mmv1/templates/terraform/post_create/go/iam_workforce_pool_provider.go.tmpl b/mmv1/templates/terraform/post_create/go/iam_workforce_pool_provider.go.tmpl index 25aaf9eb73b5..56594c36c4ac 100644 --- a/mmv1/templates/terraform/post_create/go/iam_workforce_pool_provider.go.tmpl +++ b/mmv1/templates/terraform/post_create/go/iam_workforce_pool_provider.go.tmpl @@ -1,17 +1,35 @@ -createdClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") -if createdClientSecret != nil && createdClientSecret != "" { +createdOidcClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") +createdExtraAttributesClientSecret := d.Get("extra_attributes_oauth2_client.0.client_secret.0.value.0.plain_text") + +if (createdOidcClientSecret != nil && createdOidcClientSecret != "") || (createdExtraAttributesClientSecret != nil && createdExtraAttributesClientSecret != "") { // After the create, reading from the API returns a new thumbprint // for the client secret value, which clears the plain_text. We set the plain_text since // this case should not warrant a diff. if err := resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta); err != nil { return err } - oidc := d.Get("oidc") - clientSecret := oidc.([]interface{})[0].(map[string]interface{})["client_secret"] - clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] - clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = createdClientSecret - if err := d.Set("oidc", oidc); err != nil { - return err + + // Populate ExtraAttributesOauth2Client the client secret plain text + if createdExtraAttributesClientSecret != nil && createdExtraAttributesClientSecret != "" { + extraAttributesOauth2Client := d.Get("extra_attributes_oauth2_client") + clientSecret := extraAttributesOauth2Client.([]interface{})[0].(map[string]interface{})["client_secret"] + clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = createdExtraAttributesClientSecret + if err := d.Set("extra_attributes_oauth2_client", extraAttributesOauth2Client); err != nil { + return err + } + } + + // Populate OIDC the client secret plain text + if createdOidcClientSecret != nil && createdOidcClientSecret != "" { + oidc := d.Get("oidc") + clientSecret := oidc.([]interface{})[0].(map[string]interface{})["client_secret"] + clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = createdOidcClientSecret + if err := d.Set("oidc", oidc); err != nil { + return err + } } return nil } + diff --git a/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl b/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl index f82d1d27a253..2dfb9e3db151 100644 --- a/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl +++ b/mmv1/templates/terraform/post_update/go/iam_workforce_pool_provider.go.tmpl @@ -1,19 +1,31 @@ -if d.HasChange("oidc") { - updatedClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") - if updatedClientSecret != nil && updatedClientSecret != "" { - // After the update, reading from the API returns a different thumbprint - // for the client secret value, which clears the plain_text. We set the plain_text since - // this case should not warrant a diff. - if err := resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta); err != nil { - return err - } +if d.HasChange("oidc") || d.HasChange("extra_attributes_oauth2_client") { + updatedOidcClientSecret := d.Get("oidc.0.client_secret.0.value.0.plain_text") + updatedExtraAttributesOauth2ClientSecret := d.Get("extra_attributes_oauth2_client.0.client_secret.0.value.0.plain_text") + // After the update, reading from the API returns a different thumbprint + // for the client secret value, which clears the plain_text. We set the plain_text since + // this case should not warrant a diff. + if err := resourceIAMWorkforcePoolWorkforcePoolProviderRead(d, meta); err != nil { + return err + } + + if updatedOidcClientSecret != nil && updatedOidcClientSecret != "" { oidc := d.Get("oidc") clientSecret := oidc.([]interface{})[0].(map[string]interface{})["client_secret"] clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] - clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = updatedClientSecret + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = updatedOidcClientSecret if err := d.Set("oidc", oidc); err != nil { return err } - return nil } -} + + if updatedExtraAttributesOauth2ClientSecret != nil && updatedExtraAttributesOauth2ClientSecret != "" { + extraAttributesOauth2Client := d.Get("extra_attributes_oauth2_client") + clientSecret := extraAttributesOauth2Client.([]interface{})[0].(map[string]interface{})["client_secret"] + clientSecretValue := clientSecret.([]interface{})[0].(map[string]interface{})["value"] + clientSecretValue.([]interface{})[0].(map[string]interface{})["plain_text"] = updatedExtraAttributesOauth2ClientSecret + if err := d.Set("extra_attributes_oauth2_client", extraAttributesOauth2Client); err != nil { + return err + } + } + return nil +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/go/integrations_client.go.tmpl b/mmv1/templates/terraform/pre_create/go/integrations_client.go.tmpl new file mode 100644 index 000000000000..8e40faf8ba58 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/integrations_client.go.tmpl @@ -0,0 +1,5 @@ +// Translate `createSampleIntegrations` to `createSampleWorkflows` +if val, ok := obj["createSampleIntegrations"]; ok { + delete(obj, "createSampleIntegrations") + obj["createSampleWorkflows"] = val +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl index 732a63059365..3d0169cd742b 100644 --- a/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl +++ b/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl @@ -27,6 +27,26 @@ if fqdnProp != "" { toDelete["fqdn"] = fqdnProp } +{{- if ne $.TargetVersionName "ga" }} +// Instance +instanceProp, err := expandNestedComputeRegionNetworkEndpointInstance(d.Get("instance"), d, config) +if err != nil { + return err +} +if instanceProp != "" { + toDelete["instance"] = instanceProp +} + +// Client Destination Port +clientDestinationPortProp, err := expandNestedComputeRegionNetworkEndpointClientDestinationPort(d.Get("client_destination_port"), d, config) +if err != nil { + return err +} +if clientDestinationPortProp != "" && d.Get("client_destination_port").(int) > 0 { + toDelete["clientDestinationPort"] = clientDestinationPortProp +} +{{- end }} + obj = map[string]interface{}{ "networkEndpoints": []map[string]interface{}{toDelete}, -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl b/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl index 0c893e92f2cd..dcbd075fb019 100644 --- a/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl +++ b/mmv1/templates/terraform/pre_update/go/alloydb_cluster.go.tmpl @@ -1,8 +1,3 @@ -// Restrict modification of cluster_type from PRIMARY to SECONDARY as it is an invalid operation -if d.HasChange("cluster_type") && d.Get("cluster_type") == "SECONDARY" { - return fmt.Errorf("Can not convert a primary cluster to a secondary cluster.") -} - // Restrict setting secondary_config if cluster_type is PRIMARY if d.Get("cluster_type") == "PRIMARY" && !tpgresource.IsEmptyValue(reflect.ValueOf(d.Get("secondary_config"))) { return fmt.Errorf("Can not set secondary config for primary cluster.") diff --git a/mmv1/templates/terraform/pre_update/go/network_services_gateway.tmpl b/mmv1/templates/terraform/pre_update/go/network_services_gateway.tmpl new file mode 100644 index 000000000000..feed1adcc26e --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/network_services_gateway.tmpl @@ -0,0 +1,4 @@ +if d.Get("type") == "SECURE_WEB_GATEWAY" { + obj["name"] = d.Get("name") + obj["type"] = d.Get("type") +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_update/go/privileged_access_manager_entitlement.go.tmpl b/mmv1/templates/terraform/pre_update/go/privileged_access_manager_entitlement.go.tmpl new file mode 100644 index 000000000000..3855742cd66c --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/privileged_access_manager_entitlement.go.tmpl @@ -0,0 +1,13 @@ + approvalWorkflowProp, err := expandPrivilegedAccessManagerEntitlementApprovalWorkflow(d.Get("approval_workflow"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("approval_workflow"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, approvalWorkflowProp)) { + obj["approvalWorkflow"] = approvalWorkflowProp + } + if d.HasChange("approval_workflow") { + updateMask = append(updateMask, "approvalWorkflow") + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } diff --git a/mmv1/templates/terraform/update_encoder/go/spanner_instance_config_update.go.tmpl b/mmv1/templates/terraform/update_encoder/go/spanner_instance_config_update.go.tmpl new file mode 100644 index 000000000000..6805f5d6b35d --- /dev/null +++ b/mmv1/templates/terraform/update_encoder/go/spanner_instance_config_update.go.tmpl @@ -0,0 +1,8 @@ +project, err := tpgresource.GetProject(d, meta.(*transport_tpg.Config)) +if err != nil { +return nil, err +} +obj["name"] = fmt.Sprintf("projects/%s/instanceConfigs/%s", project, obj["name"]) +newObj := make(map[string]interface{}) +newObj["instanceConfig"] = obj +return newObj, nil From d228ab0675d96ec513b7501a58cafd0ac40928ce Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Thu, 13 Jun 2024 10:34:04 -0700 Subject: [PATCH 142/356] Adding a breaking change for adding subfields to SchemaConfigModeAttr (#10770) --- .../breaking-changes/breaking-changes.md | 6 ++ tools/diff-processor/rules/rules_field.go | 33 ++++++++ .../diff-processor/rules/rules_field_test.go | 75 +++++++++++++++++++ 3 files changed, 114 insertions(+) diff --git a/docs/content/develop/breaking-changes/breaking-changes.md b/docs/content/develop/breaking-changes/breaking-changes.md index 6e2b93039bb6..b972fb023216 100644 --- a/docs/content/develop/breaking-changes/breaking-changes.md +++ b/docs/content/develop/breaking-changes/breaking-changes.md @@ -82,6 +82,12 @@ For more information, see * Removing diff suppression from a field. * For MMv1 resources, removing `diff_suppress_func` from a field. * For handwritten resources, removing `DiffSuppressFunc` from a field. +* Adding a subfield to + a SchemaConfigModeAttr field. + * For MMv1 resources, adding a subfield to a field that has + SchemaConfigModeAttr. + * For handwritten resources, adding a subfield to a field that has + SchemaConfigModeAttr. * Removing update support from a field. ### Making validation more strict diff --git a/tools/diff-processor/rules/rules_field.go b/tools/diff-processor/rules/rules_field.go index efe5d4f2d028..34c32958d305 100644 --- a/tools/diff-processor/rules/rules_field.go +++ b/tools/diff-processor/rules/rules_field.go @@ -28,6 +28,7 @@ var FieldRules = []FieldRule{ fieldRule_GrowingMin, fieldRule_ShrinkingMax, fieldRule_RemovingDiffSuppress, + fieldRule_AddingSubfieldToConfigModeAttr, fieldRule_ChangingFieldDataFormat, } @@ -237,6 +238,38 @@ func fieldRule_RemovingDiffSuppress_func(old, new *schema.Schema, mc MessageCont return nil } +var fieldRule_AddingSubfieldToConfigModeAttr = FieldRule{ + name: "Adding a subfield to a SchemaConfigModeAttr field", + definition: "Subfields cannot be added to fields with SchemaConfigModeAttr because they will be treated as required even if optional.", + message: "Field {{field}} gained a subfield {{subfield}} when it has SchemaConfigModeAttr", + identifier: "field-adding-subfield-to-config-mode-attr", + isRuleBreak: fieldRule_AddingSubfieldToConfigModeAttr_func, +} + +func fieldRule_AddingSubfieldToConfigModeAttr_func(old, new *schema.Schema, mc MessageContext) *BreakingChange { + if old == nil || new == nil { + return nil + } + if new.ConfigMode == schema.SchemaConfigModeAttr { + newObj, ok := new.Elem.(*schema.Resource) + if !ok { + return nil + } + oldObj, ok := old.Elem.(*schema.Resource) + if !ok { + return nil + } + message := mc.message + for fieldName := range newObj.Schema { + if _, ok := oldObj.Schema[fieldName]; !ok { + message = strings.ReplaceAll(message, "{{subfield}}", fieldName) + return populateMessageContext(message, mc) + } + } + } + return nil +} + func fieldRulesToRuleArray(frs []FieldRule) []Rule { var rules []Rule for _, fr := range frs { diff --git a/tools/diff-processor/rules/rules_field_test.go b/tools/diff-processor/rules/rules_field_test.go index cb8f5ab6fb3a..f68847dab6bb 100644 --- a/tools/diff-processor/rules/rules_field_test.go +++ b/tools/diff-processor/rules/rules_field_test.go @@ -560,6 +560,81 @@ var fieldRule_ShrinkingMaxTestCases = []fieldTestCase{ }, } +func TestFieldRule_AddingSubfieldToConfigModeAttr(t *testing.T) { + for _, tc := range fieldRule_AddingSubfieldToConfigModeAttrTestCases { + tc.check(fieldRule_AddingSubfieldToConfigModeAttr, t) + } +} + +var fieldRule_AddingSubfieldToConfigModeAttrTestCases = []fieldTestCase{ + { + name: "no new subfields", + oldField: &schema.Schema{ + ConfigMode: schema.SchemaConfigModeAttr, + Description: "beep", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_one": {}, + }, + }, + }, + newField: &schema.Schema{ + ConfigMode: schema.SchemaConfigModeAttr, + Description: "beep", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_one": {}, + }, + }, + }, + expectedViolation: false, + }, + { + name: "adding a subfield with no SchemaConfigModeAttr", + oldField: &schema.Schema{ + Description: "beep", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_one": {}, + }, + }, + }, + newField: &schema.Schema{ + Description: "beep", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_one": {}, + "field_two": {}, + }, + }, + }, + expectedViolation: false, + }, + { + name: "adding a field with SchemaConfigModeAttr", + oldField: &schema.Schema{ + ConfigMode: schema.SchemaConfigModeAttr, + Description: "beep", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_one": {}, + }, + }, + }, + newField: &schema.Schema{ + ConfigMode: schema.SchemaConfigModeAttr, + Description: "beep", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "field_one": {}, + "field_two": {}, + }, + }, + }, + expectedViolation: true, + }, +} + func (tc *fieldTestCase) check(rule FieldRule, t *testing.T) { breakage := rule.isRuleBreak(tc.oldField, tc.newField, MessageContext{}) From 81488b892dcb7a9e33c3718a1fbcbdd434860d7c Mon Sep 17 00:00:00 2001 From: Rene Scheepers <38879286+renescheepers@users.noreply.github.com> Date: Thu, 13 Jun 2024 20:28:05 +0200 Subject: [PATCH 143/356] feat(billingbudget): support project level recicipients on budgets (#10926) --- mmv1/products/billingbudget/Budget.yaml | 17 ++++++++++++ ...ing_budget_notify_project_recipient.tf.erb | 27 +++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 mmv1/templates/terraform/examples/billing_budget_notify_project_recipient.tf.erb diff --git a/mmv1/products/billingbudget/Budget.yaml b/mmv1/products/billingbudget/Budget.yaml index dcddda4a0fdf..0ba4a2e3df58 100644 --- a/mmv1/products/billingbudget/Budget.yaml +++ b/mmv1/products/billingbudget/Budget.yaml @@ -68,6 +68,13 @@ examples: channel_name: 'Example Notification Channel' test_env_vars: billing_acct: :MASTER_BILLING_ACCT + - !ruby/object:Provider::Terraform::Examples + name: 'billing_budget_notify_project_recipient' + primary_resource_id: 'budget' + vars: + budget_name: 'Example Billing Budget' + test_env_vars: + billing_acct: :MASTER_BILLING_ACCT - !ruby/object:Provider::Terraform::Examples name: 'billing_budget_customperiod' primary_resource_id: 'budget' @@ -440,6 +447,7 @@ properties: - 'notificationsRule.schemaVersion' - 'notificationsRule.monitoringNotificationChannels' - 'notificationsRule.disableDefaultIamRecipients' + - 'notificationsRule.enableProjectLevelRecipients' properties: - !ruby/object:Api::Type::String name: pubsubTopic @@ -479,6 +487,15 @@ properties: when a threshold is exceeded. Default recipients are those with Billing Account Administrators and Billing Account Users IAM roles for the target account. + - !ruby/object:Api::Type::Boolean + name: enableProjectLevelRecipients + default_value: false + description: | + When set to true, and when the budget has a single project configured, + notifications will be sent to project level recipients of that project. + This field will be ignored if the budget has multiple or no project configured. + + Currently, project level recipients are the users with Owner role on a cloud project. - !ruby/object:Api::Type::Enum name: ownershipScope description: | diff --git a/mmv1/templates/terraform/examples/billing_budget_notify_project_recipient.tf.erb b/mmv1/templates/terraform/examples/billing_budget_notify_project_recipient.tf.erb new file mode 100644 index 000000000000..d24ea5ebcb90 --- /dev/null +++ b/mmv1/templates/terraform/examples/billing_budget_notify_project_recipient.tf.erb @@ -0,0 +1,27 @@ +data "google_billing_account" "account" { + billing_account = "<%= ctx[:test_env_vars]['billing_acct'] -%>" +} + +data "google_project" "project" { +} + +resource "google_billing_budget" "<%= ctx[:primary_resource_id] %>" { + billing_account = data.google_billing_account.account.id + display_name = "<%= ctx[:vars]['budget_name'] %>" + + budget_filter { + projects = ["projects/${data.google_project.project.number}"] + } + + amount { + specified_amount { + currency_code = "USD" + units = "100000" + } + } + + all_updates_rule { + monitoring_notification_channels = [] + enable_project_level_recipients = true + } +} From 7f7c253af3ae1b79814a2893d9cf4e6b405b4049 Mon Sep 17 00:00:00 2001 From: Mike Laramie Date: Thu, 13 Jun 2024 17:00:58 -0400 Subject: [PATCH 144/356] fix: added ENTERPRISE option to GKE Hub Fleet Security Posture setting (#10895) --- mmv1/products/gkehub2/Fleet.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/products/gkehub2/Fleet.yaml b/mmv1/products/gkehub2/Fleet.yaml index 88f2b389233c..e6541dc02414 100644 --- a/mmv1/products/gkehub2/Fleet.yaml +++ b/mmv1/products/gkehub2/Fleet.yaml @@ -130,6 +130,7 @@ properties: values: - DISABLED - BASIC + - ENTERPRISE - !ruby/object:Api::Type::Enum name: "vulnerabilityMode" description: Sets which mode to use for vulnerability scanning. From 11babe3e130039e22905bcf7f7440a7088ae1d97 Mon Sep 17 00:00:00 2001 From: patrickmoy <53500820+patrickmoy@users.noreply.github.com> Date: Fri, 14 Jun 2024 04:41:13 -0700 Subject: [PATCH 145/356] Fix flaky DiscoveryConfig.yaml tests (#10959) --- ...a_loss_prevention_discovery_config_test.go | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go b/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go index dfd6d137ff81..59c43de57ddb 100644 --- a/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go +++ b/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_discovery_config_test.go @@ -52,7 +52,7 @@ func testAccDataLossPreventionDiscoveryConfig_BasicUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigUpdate(context), @@ -61,7 +61,7 @@ func testAccDataLossPreventionDiscoveryConfig_BasicUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -88,7 +88,7 @@ func testAccDataLossPreventionDiscoveryConfig_OrgUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigOrgFolderPaused(context), @@ -97,7 +97,7 @@ func testAccDataLossPreventionDiscoveryConfig_OrgUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -123,7 +123,7 @@ func testAccDataLossPreventionDiscoveryConfig_ActionsUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigActions(context), @@ -132,7 +132,7 @@ func testAccDataLossPreventionDiscoveryConfig_ActionsUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigActionsSensitivity(context), @@ -141,7 +141,7 @@ func testAccDataLossPreventionDiscoveryConfig_ActionsUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -167,7 +167,7 @@ func testAccDataLossPreventionDiscoveryConfig_ConditionsCadenceUpdate(t *testing ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigConditionsCadence(context), @@ -176,7 +176,7 @@ func testAccDataLossPreventionDiscoveryConfig_ConditionsCadenceUpdate(t *testing ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -202,7 +202,7 @@ func testAccDataLossPreventionDiscoveryConfig_FilterUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigFilterRegexesAndConditions(context), @@ -211,7 +211,7 @@ func testAccDataLossPreventionDiscoveryConfig_FilterUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -237,7 +237,7 @@ func testAccDataLossPreventionDiscoveryConfig_CloudSqlUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigUpdateCloudSql(context), @@ -246,7 +246,7 @@ func testAccDataLossPreventionDiscoveryConfig_CloudSqlUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -272,7 +272,7 @@ func testAccDataLossPreventionDiscoveryConfig_BqSingleTable(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigBqSingleUpdate(context), @@ -281,7 +281,7 @@ func testAccDataLossPreventionDiscoveryConfig_BqSingleTable(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -307,7 +307,7 @@ func testAccDataLossPreventionDiscoveryConfig_SqlSingleTable(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigCloudSqlSingleUpdate(context), @@ -316,7 +316,7 @@ func testAccDataLossPreventionDiscoveryConfig_SqlSingleTable(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) @@ -342,7 +342,7 @@ func testAccDataLossPreventionDiscoveryConfig_SecretsUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, { Config: testAccDataLossPreventionDiscoveryConfig_dlpDiscoveryConfigSecretsUpdate(context), @@ -351,7 +351,7 @@ func testAccDataLossPreventionDiscoveryConfig_SecretsUpdate(t *testing.T) { ResourceName: "google_data_loss_prevention_discovery_config.basic", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time"}, + ImportStateVerifyIgnore: []string{"location", "parent", "last_run_time", "update_time", "errors"}, }, }, }) From 4da96e02caa24f40576076da88ed0d9f1c37f349 Mon Sep 17 00:00:00 2001 From: Kamal Aboul-Hosn Date: Fri, 14 Jun 2024 10:53:39 -0400 Subject: [PATCH 146/356] feat: Add Pub/Sub Subscription support for specifying a service account (#10967) --- mmv1/products/pubsub/Subscription.yaml | 29 +++ ...ubscription_push_bq_service_account.tf.erb | 56 ++++++ ...n_push_cloudstorage_service_account.tf.erb | 46 +++++ .../resource_pubsub_subscription_test.go | 168 ++++++++++++++++-- 4 files changed, 286 insertions(+), 13 deletions(-) create mode 100644 mmv1/templates/terraform/examples/pubsub_subscription_push_bq_service_account.tf.erb create mode 100644 mmv1/templates/terraform/examples/pubsub_subscription_push_cloudstorage_service_account.tf.erb diff --git a/mmv1/products/pubsub/Subscription.yaml b/mmv1/products/pubsub/Subscription.yaml index 97a17de701cd..d70884b88cfa 100644 --- a/mmv1/products/pubsub/Subscription.yaml +++ b/mmv1/products/pubsub/Subscription.yaml @@ -64,6 +64,15 @@ examples: subscription_name: 'example-subscription' dataset_id: 'example_dataset' table_id: 'example_table' + - !ruby/object:Provider::Terraform::Examples + name: 'pubsub_subscription_push_bq_service_account' + primary_resource_id: 'example' + vars: + topic_name: 'example-topic' + subscription_name: 'example-subscription' + dataset_id: 'example_dataset' + table_id: 'example_table' + service_account_id: 'example-bqw' - !ruby/object:Provider::Terraform::Examples name: 'pubsub_subscription_push_cloudstorage' primary_resource_id: 'example' @@ -78,6 +87,14 @@ examples: topic_name: 'example-topic' subscription_name: 'example-subscription' bucket_name: 'example-bucket' + - !ruby/object:Provider::Terraform::Examples + name: 'pubsub_subscription_push_cloudstorage_service_account' + primary_resource_id: 'example' + vars: + topic_name: 'example-topic' + subscription_name: 'example-subscription' + bucket_name: 'example-bucket' + service_account_id: 'example-stw' docs: !ruby/object:Provider::Terraform::Docs note: | You can retrieve the email of the Google Managed Pub/Sub Service Account used for forwarding @@ -150,6 +167,12 @@ properties: When true and use_topic_schema or use_table_schema is true, any fields that are a part of the topic schema or message schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. + - !ruby/object:Api::Type::String + name: 'serviceAccountEmail' + description: | + The service account to use to write to BigQuery. If not specified, the Pub/Sub + [service agent](https://cloud.google.com/iam/docs/service-agents), + service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. - !ruby/object:Api::Type::NestedObject name: 'cloudStorageConfig' conflicts: @@ -207,6 +230,12 @@ properties: name: 'writeMetadata' description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. + - !ruby/object:Api::Type::String + name: 'serviceAccountEmail' + description: | + The service account to use to write to Cloud Storage. If not specified, the Pub/Sub + [service agent](https://cloud.google.com/iam/docs/service-agents), + service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. - !ruby/object:Api::Type::NestedObject name: 'pushConfig' conflicts: diff --git a/mmv1/templates/terraform/examples/pubsub_subscription_push_bq_service_account.tf.erb b/mmv1/templates/terraform/examples/pubsub_subscription_push_bq_service_account.tf.erb new file mode 100644 index 000000000000..9dc8061980cb --- /dev/null +++ b/mmv1/templates/terraform/examples/pubsub_subscription_push_bq_service_account.tf.erb @@ -0,0 +1,56 @@ +resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['topic_name'] %>" +} + +resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['subscription_name'] %>" + topic = google_pubsub_topic.<%= ctx[:primary_resource_id] %>.id + + bigquery_config { + table = "${google_bigquery_table.test.project}.${google_bigquery_table.test.dataset_id}.${google_bigquery_table.test.table_id}" + service_account_email = google_service_account.bq_write_service_account.email + } + + depends_on = [google_service_account.bq_write_service_account, google_project_iam_member.viewer, google_project_iam_member.editor] +} + +data "google_project" "project" { +} + +resource "google_service_account" "bq_write_service_account" { + account_id = "<%= ctx[:vars]['service_account_id'] %>" + display_name = "BQ Write Service Account" +} + +resource "google_project_iam_member" "viewer" { + project = data.google_project.project.project_id + role = "roles/bigquery.metadataViewer" + member = "serviceAccount:${google_service_account.bq_write_service_account.email}" +} + +resource "google_project_iam_member" "editor" { + project = data.google_project.project.project_id + role = "roles/bigquery.dataEditor" + member = "serviceAccount:${google_service_account.bq_write_service_account.email}" +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "<%= ctx[:vars]['dataset_id'] %>" +} + +resource "google_bigquery_table" "test" { + deletion_protection = false + table_id = "<%= ctx[:vars]['table_id'] %>" + dataset_id = google_bigquery_dataset.test.dataset_id + + schema = <" { + name = "<%= ctx[:vars]['bucket_name'] %>" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['topic_name'] %>" +} + +resource "google_pubsub_subscription" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['subscription_name'] %>" + topic = google_pubsub_topic.<%= ctx[:primary_resource_id] %>.id + + cloud_storage_config { + bucket = google_storage_bucket.<%= ctx[:primary_resource_id] %>.name + + filename_prefix = "pre-" + filename_suffix = "-%{random_suffix}" + filename_datetime_format = "YYYY-MM-DD/hh_mm_ssZ" + + max_bytes = 1000 + max_duration = "300s" + + service_account_email = google_service_account.storage_write_service_account.email + } + depends_on = [ + google_service_account.storage_write_service_account, + google_storage_bucket.<%= ctx[:primary_resource_id] %>, + google_storage_bucket_iam_member.admin, + ] +} + +data "google_project" "project" { +} + +resource "google_service_account" "storage_write_service_account" { + account_id = "<%= ctx[:vars]['service_account_id'] %>" + display_name = "Storage Write Service Account" +} + +resource "google_storage_bucket_iam_member" "admin" { + bucket = google_storage_bucket.<%= ctx[:primary_resource_id] %>.name + role = "roles/storage.admin" + member = "serviceAccount:${google_service_account.storage_write_service_account.email}" +} diff --git a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go index 4ae710112c1c..e368b8d1fa35 100644 --- a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go +++ b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go @@ -185,7 +185,7 @@ func TestAccPubsubSubscriptionBigQuery_update(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, false), + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, false, ""), }, { ResourceName: "google_pubsub_subscription.foo", @@ -194,7 +194,51 @@ func TestAccPubsubSubscriptionBigQuery_update(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, true), + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, true, ""), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPubsubSubscriptionBigQuery_serviceAccount(t *testing.T) { + t.Parallel() + + dataset := fmt.Sprintf("tftestdataset%s", acctest.RandString(t, 10)) + table := fmt.Sprintf("tf-test-table-%s", acctest.RandString(t, 10)) + topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + subscriptionShort := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, false, "bq-test-sa"), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, true, ""), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscriptionShort, true, "bq-test-sa2"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -219,7 +263,50 @@ func TestAccPubsubSubscriptionCloudStorage_update(t *testing.T) { CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, ""), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", ""), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", ""), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccPubsubSubscriptionCloudStorage_serviceAccount(t *testing.T) { + t.Parallel() + + bucket := fmt.Sprintf("tf-test-bucket-%s", acctest.RandString(t, 10)) + topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + subscriptionShort := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckPubsubSubscriptionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", "gcs-test-sa"), + }, + { + ResourceName: "google_pubsub_subscription.foo", + ImportStateId: subscriptionShort, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s", ""), }, { ResourceName: "google_pubsub_subscription.foo", @@ -228,7 +315,7 @@ func TestAccPubsubSubscriptionCloudStorage_update(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "pre-", "-suffix", "YYYY-MM-DD/hh_mm_ssZ", 1000, "300s"), + Config: testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscriptionShort, "", "", "", 0, "", "gcs-test-sa2"), }, { ResourceName: "google_pubsub_subscription.foo", @@ -435,10 +522,30 @@ resource "google_pubsub_subscription" "foo" { `, topic, subscription, label, deadline, exactlyOnceDelivery) } -func testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscription string, useTableSchema bool) string { - return fmt.Sprintf(` -data "google_project" "project" { } +func testAccPubsubSubscriptionBigQuery_basic(dataset, table, topic, subscription string, useTableSchema bool, serviceAccountId string) string { + serivceAccountEmailField := "" + serivceAccountResource := "" + if serviceAccountId != "" { + serivceAccountResource = fmt.Sprintf(` +resource "google_service_account" "bq_write_service_account" { + account_id = "%s" + display_name = "BQ Write Service Account" +} + +resource "google_project_iam_member" "viewer" { + project = data.google_project.project.project_id + role = "roles/bigquery.metadataViewer" + member = "serviceAccount:${google_service_account.bq_write_service_account.email}" +} +resource "google_project_iam_member" "editor" { + project = data.google_project.project.project_id + role = "roles/bigquery.dataEditor" + member = "serviceAccount:${google_service_account.bq_write_service_account.email}" +}`, serviceAccountId) + serivceAccountEmailField = "service_account_email = google_service_account.bq_write_service_account.email" + } else { + serivceAccountResource = fmt.Sprintf(` resource "google_project_iam_member" "viewer" { project = data.google_project.project.project_id role = "roles/bigquery.metadataViewer" @@ -450,6 +557,13 @@ resource "google_project_iam_member" "editor" { role = "roles/bigquery.dataEditor" member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" } + `) + } + + return fmt.Sprintf(` +data "google_project" "project" { } + +%s resource "google_bigquery_dataset" "test" { dataset_id = "%s" @@ -483,6 +597,7 @@ resource "google_pubsub_subscription" "foo" { bigquery_config { table = "${google_bigquery_table.test.project}.${google_bigquery_table.test.dataset_id}.${google_bigquery_table.test.table_id}" use_table_schema = %t + %s } depends_on = [ @@ -490,10 +605,10 @@ resource "google_pubsub_subscription" "foo" { google_project_iam_member.editor ] } -`, dataset, table, topic, subscription, useTableSchema) + `, serivceAccountResource, dataset, table, topic, subscription, useTableSchema, serivceAccountEmailField) } -func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, filenamePrefix, filenameSuffix, filenameDatetimeFormat string, maxBytes int, maxDuration string) string { +func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, filenamePrefix, filenameSuffix, filenameDatetimeFormat string, maxBytes int, maxDuration string, serviceAccountId string) string { filenamePrefixString := "" if filenamePrefix != "" { filenamePrefixString = fmt.Sprintf(`filename_prefix = "%s"`, filenamePrefix) @@ -514,20 +629,46 @@ func testAccPubsubSubscriptionCloudStorage_basic(bucket, topic, subscription, fi if maxDuration != "" { maxDurationString = fmt.Sprintf(`max_duration = "%s"`, maxDuration) } - return fmt.Sprintf(` -data "google_project" "project" { } + + serivceAccountEmailField := "" + serivceAccountResource := "" + if serviceAccountId != "" { + serivceAccountResource = fmt.Sprintf(` +resource "google_service_account" "storage_write_service_account" { + account_id = "%s" + display_name = "Write Service Account" +} resource "google_storage_bucket_iam_member" "admin" { bucket = google_storage_bucket.test.name role = "roles/storage.admin" - member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" + member = "serviceAccount:${google_service_account.storage_write_service_account.email}" } +resource "google_project_iam_member" "editor" { + project = data.google_project.project.project_id + role = "roles/bigquery.dataEditor" + member = "serviceAccount:${google_service_account.storage_write_service_account.email}" +}`, serviceAccountId) + serivceAccountEmailField = "service_account_email = google_service_account.storage_write_service_account.email" + } else { + serivceAccountResource = fmt.Sprintf(` +resource "google_storage_bucket_iam_member" "admin" { + bucket = google_storage_bucket.test.name + role = "roles/storage.admin" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-pubsub.iam.gserviceaccount.com" +}`) + } + return fmt.Sprintf(` +data "google_project" "project" { } + resource "google_storage_bucket" "test" { name = "%s" location = "US" } +%s + resource "google_pubsub_topic" "foo" { name = "%s" } @@ -543,6 +684,7 @@ resource "google_pubsub_subscription" "foo" { %s %s %s + %s } depends_on = [ @@ -550,7 +692,7 @@ resource "google_pubsub_subscription" "foo" { google_storage_bucket_iam_member.admin, ] } -`, bucket, topic, subscription, filenamePrefixString, filenameSuffixString, filenameDatetimeString, maxBytesString, maxDurationString) +`, bucket, serivceAccountResource, topic, subscription, filenamePrefixString, filenameSuffixString, filenameDatetimeString, maxBytesString, maxDurationString, serivceAccountEmailField) } func testAccPubsubSubscription_topicOnly(topic string) string { From 9156b0c736912c251b4b79f853b7436a6cba8d69 Mon Sep 17 00:00:00 2001 From: Douglas Bunker Date: Fri, 14 Jun 2024 09:02:33 -0700 Subject: [PATCH 147/356] Add artifact docker image data source (#9521) --- .../provider/provider_mmv1_resources.go.erb | 1 + ...a_source_artifact_registry_docker_image.go | 320 ++++++++++++++++++ ...rce_artifact_registry_docker_image_test.go | 149 ++++++++ ...tifact_registry_docker_image.html.markdown | 76 +++++ 4 files changed, 546 insertions(+) create mode 100644 mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image.go create mode 100644 mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 35ee6bdf1d40..8f56a941f61a 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -28,6 +28,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_active_folder": resourcemanager.DataSourceGoogleActiveFolder(), "google_alloydb_locations": alloydb.DataSourceAlloydbLocations(), "google_alloydb_supported_database_flags": alloydb.DataSourceAlloydbSupportedDatabaseFlags(), + "google_artifact_registry_docker_image": artifactregistry.DataSourceArtifactRegistryDockerImage(), "google_artifact_registry_repository": artifactregistry.DataSourceArtifactRegistryRepository(), "google_apphub_discovered_workload": apphub.DataSourceApphubDiscoveredWorkload(), "google_app_engine_default_service_account": appengine.DataSourceGoogleAppEngineDefaultServiceAccount(), diff --git a/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image.go b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image.go new file mode 100644 index 000000000000..f5dbc9d23e33 --- /dev/null +++ b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image.go @@ -0,0 +1,320 @@ +package artifactregistry + +import ( + "fmt" + "net/url" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages#DockerImage +type DockerImage struct { + name string + uri string + tags []string + imageSizeBytes string + mediaType string + uploadTime string + buildTime string + updateTime string +} + +func DataSourceArtifactRegistryDockerImage() *schema.Resource { + + return &schema.Resource{ + Read: DataSourceArtifactRegistryDockerImageRead, + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID of the project.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: `The region of the artifact registry repository. For example, "us-west1".`, + }, + "repository_id": { + Type: schema.TypeString, + Required: true, + Description: `The last part of the repository name to fetch from.`, + }, + "image_name": { + Type: schema.TypeString, + Required: true, + Description: `The image name to fetch.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The fully qualified name of the fetched image.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI to access the image.`, + }, + "tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `All tags associated with the image.`, + }, + "image_size_bytes": { + Type: schema.TypeString, + Computed: true, + Description: `Calculated size of the image in bytes.`, + }, + "media_type": { + Type: schema.TypeString, + Computed: true, + Description: `Media type of this image.`, + }, + "upload_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time, as a RFC 3339 string, the image was uploaded.`, + }, + "build_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time, as a RFC 3339 string, this image was built.`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The time, as a RFC 3339 string, this image was updated.`, + }, + }, + } +} + +func DataSourceArtifactRegistryDockerImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + var res DockerImage + + imageName, tag, digest := parseImage(d.Get("image_name").(string)) + + if digest != "" { + // fetch image by digest + // https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages/get + imageUrlSafe := url.QueryEscape(imageName) + urlRequest, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf("{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/dockerImages/%s@%s", imageUrlSafe, digest)) + if err != nil { + return fmt.Errorf("Error setting api endpoint") + } + + resGet, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: urlRequest, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + res = convertResponseToStruct(resGet) + } else { + // fetch the list of images, ordered by update time + // https://cloud.google.com/artifact-registry/docs/reference/rest/v1/projects.locations.repositories.dockerImages/list + urlRequest, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/dockerImages") + if err != nil { + return fmt.Errorf("Error setting api endpoint") + } + + urlRequest, err = transport_tpg.AddQueryParams(urlRequest, map[string]string{"orderBy": "update_time desc"}) + if err != nil { + return err + } + + res, err = retrieveAndFilterImages(d, config, urlRequest, userAgent, imageName, tag) + if err != nil { + return err + } + } + + // set the schema data using the response + if err := d.Set("name", res.name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + if err := d.Set("self_link", res.uri); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + + if err := d.Set("tags", res.tags); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + + if err := d.Set("image_size_bytes", res.imageSizeBytes); err != nil { + return fmt.Errorf("Error setting image_size_bytes: %s", err) + } + + if err := d.Set("media_type", res.mediaType); err != nil { + return fmt.Errorf("Error setting media_type: %s", err) + } + + if err := d.Set("upload_time", res.uploadTime); err != nil { + return fmt.Errorf("Error setting upload_time: %s", err) + } + + if err := d.Set("build_time", res.buildTime); err != nil { + return fmt.Errorf("Error setting build_time: %s", err) + } + + if err := d.Set("update_time", res.updateTime); err != nil { + return fmt.Errorf("Error setting update_time: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "{{ArtifactRegistryBasePath}}projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}/dockerImages/{{image_name}}") + if err != nil { + return fmt.Errorf("Error constructing the data source id: %s", err) + } + + d.SetId(id) + + return nil +} + +func parseImage(image string) (imageName string, tag string, digest string) { + splitByAt := strings.Split(image, "@") + splitByColon := strings.Split(image, ":") + + if len(splitByAt) == 2 { + imageName = splitByAt[0] + digest = splitByAt[1] + } else if len(splitByColon) == 2 { + imageName = splitByColon[0] + tag = splitByColon[1] + } else { + imageName = image + } + + return imageName, tag, digest +} + +func retrieveAndFilterImages(d *schema.ResourceData, config *transport_tpg.Config, urlRequest string, userAgent string, imageName string, tag string) (DockerImage, error) { + // Paging through the list method until either: + // if a tag was provided, the matching image name and tag pair + // otherwise, return the first matching image name + + for { + resListImages, token, err := retrieveListOfDockerImages(config, urlRequest, userAgent) + if err != nil { + return DockerImage{}, err + } + + var resFiltered []DockerImage + for _, image := range resListImages { + if strings.Contains(image.name, "/"+url.QueryEscape(imageName)+"@") { + resFiltered = append(resFiltered, image) + } + } + + if tag != "" { + for _, image := range resFiltered { + for _, iterTag := range image.tags { + if iterTag == tag { + return image, nil + } + } + } + } else if len(resFiltered) > 0 { + return resFiltered[0], nil + } + + if token == "" { + return DockerImage{}, fmt.Errorf("Requested image was not found.") + } + + urlRequest, err = transport_tpg.AddQueryParams(urlRequest, map[string]string{"pageToken": token}) + if err != nil { + return DockerImage{}, err + } + } +} + +func retrieveListOfDockerImages(config *transport_tpg.Config, urlRequest string, userAgent string) ([]DockerImage, string, error) { + resList, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: urlRequest, + UserAgent: userAgent, + }) + if err != nil { + return make([]DockerImage, 0), "", err + } + + if nextPageToken, ok := resList["nextPageToken"].(string); ok { + return flattenDataSourceListResponse(resList), nextPageToken, nil + } else { + return flattenDataSourceListResponse(resList), "", nil + } +} + +func flattenDataSourceListResponse(res map[string]interface{}) []DockerImage { + var dockerImages []DockerImage + + resDockerImages, _ := res["dockerImages"].([]interface{}) + + for _, resImage := range resDockerImages { + image, _ := resImage.(map[string]interface{}) + dockerImages = append(dockerImages, convertResponseToStruct(image)) + } + + return dockerImages +} + +func convertResponseToStruct(res map[string]interface{}) DockerImage { + var dockerImage DockerImage + + if name, ok := res["name"].(string); ok { + dockerImage.name = name + } + + if uri, ok := res["uri"].(string); ok { + dockerImage.uri = uri + } + + if tags, ok := res["tags"].([]interface{}); ok { + var stringTags []string + + for _, tag := range tags { + strTag := tag.(string) + stringTags = append(stringTags, strTag) + } + dockerImage.tags = stringTags + } + + if imageSizeBytes, ok := res["imageSizeBytes"].(string); ok { + dockerImage.imageSizeBytes = imageSizeBytes + } + + if mediaType, ok := res["mediaType"].(string); ok { + dockerImage.mediaType = mediaType + } + + if uploadTime, ok := res["uploadTime"].(string); ok { + dockerImage.uploadTime = uploadTime + } + + if buildTime, ok := res["buildTime"].(string); ok { + dockerImage.buildTime = buildTime + } + + if updateTime, ok := res["updateTime"].(string); ok { + dockerImage.updateTime = updateTime + } + + return dockerImage +} diff --git a/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image_test.go b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image_test.go new file mode 100644 index 000000000000..30dd01e71ba3 --- /dev/null +++ b/mmv1/third_party/terraform/services/artifactregistry/data_source_artifact_registry_docker_image_test.go @@ -0,0 +1,149 @@ +package artifactregistry_test + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceArtifactRegistryDockerImage(t *testing.T) { + t.Parallel() + + resourceName := "data.google_artifact_registry_docker_image.test" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceArtifactRegistryDockerImageConfig, + Check: resource.ComposeTestCheckFunc( + // Data source using a tag + checkTaggedDataSources(resourceName+"Tag", "latest"), + resource.TestCheckResourceAttrSet(resourceName+"Tag", "image_size_bytes"), + validateTimeStamps(resourceName+"Tag"), + + // Data source using a digest + checkDigestDataSources( + resourceName+"Digest", + "projects/cloudrun/locations/us/repositories/container/dockerImages/hello@sha256:7a6e0dfb0142464ce0ba14a2cfcac75e383e36f39f47539c870132c826314ad6", + "us-docker.pkg.dev/cloudrun/container/hello@sha256:7a6e0dfb0142464ce0ba14a2cfcac75e383e36f39f47539c870132c826314ad6", + ), + resource.TestCheckResourceAttrSet(resourceName+"Digest", "image_size_bytes"), + validateTimeStamps(resourceName+"Digest"), + + // url safe docker name using a tag + checkTaggedDataSources(resourceName+"UrlTag", "latest"), + + // url safe docker name using a digest + checkDigestDataSources( + resourceName+"UrlDigest", + "projects/go-containerregistry/locations/us/repositories/gcr.io/dockerImages/krane%2Fdebug@sha256:26903bf659994649af0b8ccb2675b76318b2bc3b2c85feea9a1f9d5b98eff363", + "us-docker.pkg.dev/go-containerregistry/gcr.io/krane/debug@sha256:26903bf659994649af0b8ccb2675b76318b2bc3b2c85feea9a1f9d5b98eff363", + ), + + // Data source using no tag or digest + resource.TestCheckResourceAttrSet(resourceName+"None", "repository_id"), + resource.TestCheckResourceAttrSet(resourceName+"None", "image_name"), + resource.TestCheckResourceAttrSet(resourceName+"None", "name"), + resource.TestCheckResourceAttrSet(resourceName+"None", "self_link"), + ), + }, + }, + }) +} + +// Test the data source against the public AR repos +// https://console.cloud.google.com/artifacts/docker/cloudrun/us/container +// https://console.cloud.google.com/artifacts/docker/go-containerregistry/us/gcr.io +// Currently, gcr.io does not provide a imageSizeBytes or buildTime field in the JSON response +const testAccDataSourceArtifactRegistryDockerImageConfig = ` +data "google_artifact_registry_docker_image" "testTag" { + project = "cloudrun" + location = "us" + repository_id = "container" + image_name = "hello:latest" +} + +data "google_artifact_registry_docker_image" "testDigest" { + project = "cloudrun" + location = "us" + repository_id = "container" + image_name = "hello@sha256:7a6e0dfb0142464ce0ba14a2cfcac75e383e36f39f47539c870132c826314ad6" +} + +data "google_artifact_registry_docker_image" "testUrlTag" { + project = "go-containerregistry" + location = "us" + repository_id = "gcr.io" + image_name = "krane/debug:latest" +} + +data "google_artifact_registry_docker_image" "testUrlDigest" { + project = "go-containerregistry" + location = "us" + repository_id = "gcr.io" + image_name = "krane/debug@sha256:26903bf659994649af0b8ccb2675b76318b2bc3b2c85feea9a1f9d5b98eff363" +} + +data "google_artifact_registry_docker_image" "testNone" { + project = "go-containerregistry" + location = "us" + repository_id = "gcr.io" + image_name = "crane" +} +` + +func checkTaggedDataSources(resourceName string, expectedTag string) resource.TestCheckFunc { + return resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "repository_id"), + resource.TestCheckResourceAttrSet(resourceName, "image_name"), + resource.TestCheckResourceAttrSet(resourceName, "name"), + resource.TestCheckResourceAttrSet(resourceName, "self_link"), + resource.TestCheckTypeSetElemAttr(resourceName, "tags.*", expectedTag), + resource.TestCheckResourceAttrSet(resourceName, "media_type"), + ) +} + +func checkDigestDataSources(resourceName string, expectedName string, expectedSelfLink string) resource.TestCheckFunc { + return resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet(resourceName, "repository_id"), + resource.TestCheckResourceAttrSet(resourceName, "image_name"), + resource.TestCheckResourceAttr(resourceName, "name", expectedName), + resource.TestCheckResourceAttr(resourceName, "self_link", expectedSelfLink), + resource.TestCheckResourceAttrSet(resourceName, "media_type"), + ) +} + +func validateTimeStamps(dataSourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // check that the timestamps are RFC3339 + ds, ok := s.RootModule().Resources[dataSourceName] + if !ok { + return fmt.Errorf("can't find %s in state", dataSourceName) + } + + if !isRFC3339(ds.Primary.Attributes["upload_time"]) { + return fmt.Errorf("upload_time is not RFC3339: %s", ds.Primary.Attributes["upload_time"]) + } + + if !isRFC3339(ds.Primary.Attributes["build_time"]) { + return fmt.Errorf("build_time is not RFC3339: %s", ds.Primary.Attributes["build_time"]) + } + + if !isRFC3339(ds.Primary.Attributes["update_time"]) { + return fmt.Errorf("update_time is not RFC3339: %s", ds.Primary.Attributes["update_time"]) + } + + return nil + } +} + +func isRFC3339(s string) bool { + _, err := time.Parse(time.RFC3339, s) + return err == nil +} diff --git a/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown b/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown new file mode 100644 index 000000000000..4065bd033515 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown @@ -0,0 +1,76 @@ +--- +subcategory: "Artifact Registry" +description: |- + Get information about a Docker Image within a Google Artifact Registry Repository. +--- + +# google\_artifact\_registry\_docker\_image + +This data source fetches information from a provided Artifact Registry repository, including the fully qualified name and URI for an image, based on a the latest version of image name and optional digest or tag. + +~> **Note** +Requires one of the following OAuth scopes: `https://www.googleapis.com/auth/cloud-platform` or `https://www.googleapis.com/auth/cloud-platform.read-only`. + +## Example Usage + +```hcl +resource "google_artifact_registry_repository" "my_repo" { + location = "us-west1" + repository_id = "my-repository" + format = "DOCKER" +} + +data "google_artifact_registry_docker_image" "my_image" { + repository = google_artifact_registry_repository.my_repo.id + image = "my-image" + tag = "my-tag" +} + +resource "google_cloud_run_v2_service" "default" { + # ... + + template { + containers { + image = data.google_artifact_registry_docker_image.my_image.self_link + } + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `location` - (Required) The location of the artifact registry. + +* `repository_id` - (Required) The last part of the repository name. to fetch from. + +* `image_name` - (Required) The image name to fetch. If no digest or tag is provided, then the latest modified image will be used. + +* `project` - (Optional) The project ID in which the resource belongs. If it is not provided, the provider project is used. + +## Attributes Reference + +The following computed attributes are exported: + +* `name` - The fully qualified name of the fetched image. This name has the form: `projects/{{project}}/locations/{{location}}/repository/{{repository_id}}/dockerImages/{{docker_image}}`. For example, +``` +projects/test-project/locations/us-west4/repositories/test-repo/dockerImages/nginx@sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf +``` + +* `self_link` - The URI to access the image. For example, +``` +us-west4-docker.pkg.dev/test-project/test-repo/nginx@sha256:e9954c1fc875017be1c3e36eca16be2d9e9bccc4bf072163515467d6a823c7cf +``` + +* `tags` - A list of all tags associated with the image. + +* `image_size_bytes` - Calculated size of the image in bytes. + +* `media_type` - Media type of this image, e.g. `application/vnd.docker.distribution.manifest.v2+json`. + +* `upload_time` - The time, as a RFC 3339 string, the image was uploaded. For example, `2014-10-02T15:01:23.045123456Z`. + +* `build_time` - The time, as a RFC 3339 string, this image was built. + +* `update_time` - The time, as a RFC 3339 string, this image was updated. From e463dfa4d6b6b1802f5ce9deb54f3868b15a0c9f Mon Sep 17 00:00:00 2001 From: Julio Castillo Date: Fri, 14 Jun 2024 22:05:36 +0200 Subject: [PATCH 148/356] Add Service Networking VPC Service Controls resource. (#10899) --- .../servicenetworking/VPCServiceControls.yaml | 107 ++++++++++++++++++ mmv1/products/servicenetworking/product.yaml | 22 ++++ ...ice_networking_vpc_service_controls.go.erb | 76 +++++++++++++ ...ice_networking_vpc_service_controls.go.erb | 1 + ...tworking_vpc_service_controls_basic.tf.erb | 28 +++++ ...ice_networking_vpc_service_controls.go.erb | 16 +++ .../terraform/acctest/bootstrap_test_utils.go | 2 +- .../terraform/fwmodels/provider_model.go.erb | 1 - .../fwprovider/framework_provider.go.erb | 8 +- .../terraform/provider/provider.go.erb | 1 - .../provider/provider_mmv1_resources.go.erb | 1 - ...le_service_networking_peered_dns_domain.go | 2 +- .../resource_service_networking_connection.go | 6 +- ...ce_networking_vpc_service_controls_test.go | 72 ++++++++++++ ....go => service_networking_operation_hw.go} | 8 +- .../terraform/transport/config.go.erb | 4 - 16 files changed, 332 insertions(+), 23 deletions(-) create mode 100644 mmv1/products/servicenetworking/VPCServiceControls.yaml create mode 100644 mmv1/products/servicenetworking/product.yaml create mode 100644 mmv1/templates/terraform/constants/service_networking_vpc_service_controls.go.erb create mode 100644 mmv1/templates/terraform/custom_create/service_networking_vpc_service_controls.go.erb create mode 100644 mmv1/templates/terraform/examples/service_networking_vpc_service_controls_basic.tf.erb create mode 100644 mmv1/templates/terraform/pre_read/service_networking_vpc_service_controls.go.erb create mode 100644 mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_vpc_service_controls_test.go rename mmv1/third_party/terraform/services/servicenetworking/{service_networking_operation.go => service_networking_operation_hw.go} (71%) diff --git a/mmv1/products/servicenetworking/VPCServiceControls.yaml b/mmv1/products/servicenetworking/VPCServiceControls.yaml new file mode 100644 index 000000000000..82939e2d720e --- /dev/null +++ b/mmv1/products/servicenetworking/VPCServiceControls.yaml @@ -0,0 +1,107 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'VPCServiceControls' +base_url: '' +skip_delete: true +exclude_tgc: true # excluding tgc because of helpers in custom_code.constants +id_format: 'services/{{service}}/projects/{{project}}/networks/{{network}}' +import_format: + - 'services/{{service}}/projects/{{project}}/networks/{{network}}' +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Private Google Access with VPC Service Controls': 'https://cloud.google.com/vpc-service-controls/docs/private-connectivity' + 'Set up private connectivity to Google APIs and services': 'https://cloud.google.com/vpc-service-controls/docs/set-up-private-connectivity' + 'Enable VPC Service Controls for service networking': 'https://cloud.google.com/sdk/gcloud/reference/services/vpc-peerings/enable-vpc-service-controls' + api: 'https://cloud.google.com/service-infrastructure/docs/service-networking/reference/rest/v1/services' +description: | + Manages the VPC Service Controls configuration for a service + networking connection + + When enabled, Google Cloud makes the following + route configuration changes in the service producer VPC network: + - Removes the IPv4 default route (destination 0.0.0.0/0, + next hop default internet gateway), Google Cloud then creates an + IPv4 route for destination 199.36.153.4/30 using the default + internet gateway next hop. + - Creates Cloud DNS managed private zones and authorizes those zones + for the service producer VPC network. The zones include + googleapis.com, gcr.io, pkg.dev, notebooks.cloud.google.com, + kernels.googleusercontent.com, backupdr.cloud.google.com, and + backupdr.googleusercontent.com as necessary domains or host names + for Google APIs and services that are compatible with VPC Service + Controls. Record data in the zones resolves all host names to + 199.36.153.4, 199.36.153.5, 199.36.153.6, and 199.36.153.7. + + When disabled, Google Cloud makes the following route configuration + changes in the service producer VPC network: + - Restores a default route (destination 0.0.0.0/0, next hop default + internet gateway) + - Deletes the Cloud DNS managed private zones that provided the host + name overrides. +docs: !ruby/object:Provider::Terraform::Docs + note: | + Destroying a `google_service_networking_vpc_service_controls` + resource will remove it from state, but will not change the + underlying VPC Service Controls configuration for the service + producer network. +examples: + - !ruby/object:Provider::Terraform::Examples + name: "service_networking_vpc_service_controls_basic" + primary_resource_id: "default" + vars: + network_name: "example-network" + psa_range_name: "psa-range" +autogen_async: true +async: !ruby/object:Api::OpAsync + actions: [] # we just need the boilerplate async code, we'll call the methods manually + include_project: true + operation: !ruby/object:Api::OpAsync::Operation + base_url: '{{op_id}}' +custom_code: !ruby/object:Provider::Terraform::CustomCode + custom_create: templates/terraform/custom_create/service_networking_vpc_service_controls.go.erb + custom_update: templates/terraform/custom_create/service_networking_vpc_service_controls.go.erb + constants: templates/terraform/constants/service_networking_vpc_service_controls.go.erb + pre_read: templates/terraform/pre_read/service_networking_vpc_service_controls.go.erb +parameters: + - !ruby/object:Api::Type::String + name: 'network' + required: true + immutable: true + url_param_only: true + description: | + The network that the consumer is using to connect with services. + - !ruby/object:Api::Type::String + name: 'service' + required: true + url_param_only: true + immutable: true + description: | + The service that is managing peering connectivity for a service + producer's organization. For Google services that support this + functionality, this value is `servicenetworking.googleapis.com`. + - !ruby/object:Api::Type::String + name: 'project' + # required: true + immutable: true + ignore_read: true + description: |- + The id of the Google Cloud project containing the consumer network. +properties: + - !ruby/object:Api::Type::Boolean + name: 'enabled' + required: true + description: |- + Desired VPC Service Controls state service producer VPC network, as + described at the top of this page. diff --git a/mmv1/products/servicenetworking/product.yaml b/mmv1/products/servicenetworking/product.yaml new file mode 100644 index 000000000000..c79d864be72f --- /dev/null +++ b/mmv1/products/servicenetworking/product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Product +name: ServiceNetworking +display_name: Service Networking +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://servicenetworking.googleapis.com/v1/ +scopes: + - https://www.googleapis.com/auth/cloud-platform diff --git a/mmv1/templates/terraform/constants/service_networking_vpc_service_controls.go.erb b/mmv1/templates/terraform/constants/service_networking_vpc_service_controls.go.erb new file mode 100644 index 000000000000..9133cfdfe634 --- /dev/null +++ b/mmv1/templates/terraform/constants/service_networking_vpc_service_controls.go.erb @@ -0,0 +1,76 @@ +func resourceServiceNetworkingVPCServiceControlsSet(d *schema.ResourceData, meta interface{}, config *transport_tpg.Config) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + projectNumber, err := getProjectNumber(d, config, project, userAgent) + if err != nil { + return err + } + + network := d.Get("network").(string) + enabled := d.Get("enabled").(bool) + + obj := make(map[string]interface{}) + obj["consumerNetwork"] = fmt.Sprintf("projects/%s/global/networks/%s", projectNumber, network) + + url, err := tpgresource.ReplaceVars(d, config, "{{ServiceNetworkingBasePath}}services/{{service}}") + if err != nil { + return err + } + + if enabled { + url = url + ":enableVpcServiceControls" + } else { + url = url + ":disableVpcServiceControls" + } + + log.Printf("[DEBUG] Setting service networking VPC service controls: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating VPCServiceControls: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "services/{{service}}/projects/{{project}}/networks/{{network}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ServiceNetworkingOperationWaitTime( + config, res, "Setting service networking VPC service controls", userAgent, project, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to set service networking VPC service controls: %s", err) + } + + log.Printf("[DEBUG] Finished setting service networking VPC service controls %q: %#v", d.Id(), res) + + return resourceServiceNetworkingVPCServiceControlsRead(d, meta) +} diff --git a/mmv1/templates/terraform/custom_create/service_networking_vpc_service_controls.go.erb b/mmv1/templates/terraform/custom_create/service_networking_vpc_service_controls.go.erb new file mode 100644 index 000000000000..6779a2b7246c --- /dev/null +++ b/mmv1/templates/terraform/custom_create/service_networking_vpc_service_controls.go.erb @@ -0,0 +1 @@ +return resourceServiceNetworkingVPCServiceControlsSet(d, meta, config) diff --git a/mmv1/templates/terraform/examples/service_networking_vpc_service_controls_basic.tf.erb b/mmv1/templates/terraform/examples/service_networking_vpc_service_controls_basic.tf.erb new file mode 100644 index 000000000000..3ab1b6f308a9 --- /dev/null +++ b/mmv1/templates/terraform/examples/service_networking_vpc_service_controls_basic.tf.erb @@ -0,0 +1,28 @@ +# Create a VPC +resource "google_compute_network" "default" { + name = "<%= ctx[:vars]['network_name'] %>" +} + +# Create an IP address +resource "google_compute_global_address" "default" { + name = "<%= ctx[:vars]['psa_range_name'] %>" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.default.id +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.default.name] +} + +# Enable VPC-SC on the producer network +resource "google_service_networking_vpc_service_controls" "<%= ctx[:primary_resource_id] %>" { + network = google_compute_network.default.name + service = "servicenetworking.googleapis.com" + enabled = true + depends_on = [google_service_networking_connection.default] +} diff --git a/mmv1/templates/terraform/pre_read/service_networking_vpc_service_controls.go.erb b/mmv1/templates/terraform/pre_read/service_networking_vpc_service_controls.go.erb new file mode 100644 index 000000000000..9d797f67b310 --- /dev/null +++ b/mmv1/templates/terraform/pre_read/service_networking_vpc_service_controls.go.erb @@ -0,0 +1,16 @@ +project, err := tpgresource.GetProject(d, config) +if err != nil { + return err +} +projectNumber, err := getProjectNumber(d, config, project, userAgent) +if err != nil { + return err +} + +service := d.Get("service").(string) +network := d.Get("network").(string) +parent := fmt.Sprintf("services/%s/projects/%s/global/networks/%s", service, projectNumber, network) +url, err = tpgresource.ReplaceVars(d, config, "{{ServiceNetworkingBasePath}}"+parent+"/vpcServiceControls") +if err != nil { + return err +} diff --git a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go index 330f4bf2b5eb..b169e89456d3 100644 --- a/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go +++ b/mmv1/third_party/terraform/acctest/bootstrap_test_utils.go @@ -551,7 +551,7 @@ func BootstrapSharedServiceNetworkingConnection(t *testing.T, testId string, par } log.Printf("[DEBUG] Waiting for service networking connection creation to finish") - if err := tpgservicenetworking.ServiceNetworkingOperationWaitTime(config, op, "Create Service Networking Connection", config.UserAgent, projectId, 4*time.Minute); err != nil { + if err := tpgservicenetworking.ServiceNetworkingOperationWaitTimeHW(config, op, "Create Service Networking Connection", config.UserAgent, projectId, 4*time.Minute); err != nil { t.Fatalf("Error bootstrapping shared test service networking connection: %s", err) } } diff --git a/mmv1/third_party/terraform/fwmodels/provider_model.go.erb b/mmv1/third_party/terraform/fwmodels/provider_model.go.erb index 90da65b6f40f..aad2449e042f 100644 --- a/mmv1/third_party/terraform/fwmodels/provider_model.go.erb +++ b/mmv1/third_party/terraform/fwmodels/provider_model.go.erb @@ -41,7 +41,6 @@ type ProviderModel struct { RuntimeconfigCustomEndpoint types.String `tfsdk:"runtimeconfig_custom_endpoint"` <% end -%> IAMCustomEndpoint types.String `tfsdk:"iam_custom_endpoint"` - ServiceNetworkingCustomEndpoint types.String `tfsdk:"service_networking_custom_endpoint"` TagsLocationCustomEndpoint types.String `tfsdk:"tags_location_custom_endpoint"` // dcl diff --git a/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb b/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb index bb982c8ae97c..4ebbf6709f37 100644 --- a/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb +++ b/mmv1/third_party/terraform/fwprovider/framework_provider.go.erb @@ -200,12 +200,6 @@ func (p *FrameworkProvider) Schema(_ context.Context, _ provider.SchemaRequest, transport_tpg.CustomEndpointValidator(), }, }, - "service_networking_custom_endpoint": &schema.StringAttribute{ - Optional: true, - Validators: []validator.String{ - transport_tpg.CustomEndpointValidator(), - }, - }, "tags_location_custom_endpoint": &schema.StringAttribute{ Optional: true, Validators: []validator.String{ @@ -299,4 +293,4 @@ func (p *FrameworkProvider) Functions(_ context.Context) []func() function.Funct functions.NewRegionFromZoneFunction, functions.NewZoneFromIdFunction, } -} \ No newline at end of file +} diff --git a/mmv1/third_party/terraform/provider/provider.go.erb b/mmv1/third_party/terraform/provider/provider.go.erb index 2400c070bc7d..59a6b338d90a 100644 --- a/mmv1/third_party/terraform/provider/provider.go.erb +++ b/mmv1/third_party/terraform/provider/provider.go.erb @@ -364,7 +364,6 @@ func ProviderConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.RuntimeConfigBasePath = d.Get(transport_tpg.RuntimeConfigCustomEndpointEntryKey).(string) <% end -%> config.IAMBasePath = d.Get(transport_tpg.IAMCustomEndpointEntryKey).(string) - config.ServiceNetworkingBasePath = d.Get(transport_tpg.ServiceNetworkingCustomEndpointEntryKey).(string) config.ServiceUsageBasePath = d.Get(transport_tpg.ServiceUsageCustomEndpointEntryKey).(string) config.BigtableAdminBasePath = d.Get(transport_tpg.BigtableAdminCustomEndpointEntryKey).(string) config.TagsLocationBasePath = d.Get(transport_tpg.TagsLocationCustomEndpointEntryKey).(string) diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 8f56a941f61a..b8231d948fd0 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/terraform-provider-google/google/services/containeraws" "github.com/hashicorp/terraform-provider-google/google/services/containerazure" "github.com/hashicorp/terraform-provider-google/google/services/dataflow" - "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" ) diff --git a/mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go b/mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go index bb9aba314e34..9c93f55eb655 100644 --- a/mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go +++ b/mmv1/third_party/terraform/services/servicenetworking/resource_google_service_networking_peered_dns_domain.go @@ -135,7 +135,7 @@ func resourceGoogleServiceNetworkingPeeredDNSDomainCreate(d *schema.ResourceData return err } - if err := ServiceNetworkingOperationWaitTime(config, op, "Create Service Networking Peered DNS Domain", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { + if err := ServiceNetworkingOperationWaitTimeHW(config, op, "Create Service Networking Peered DNS Domain", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { return err } diff --git a/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go b/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go index bf4692e7af9c..2352ddda26bc 100644 --- a/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go +++ b/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_connection.go @@ -117,7 +117,7 @@ func resourceServiceNetworkingConnectionCreate(d *schema.ResourceData, meta inte return err } - if err := ServiceNetworkingOperationWaitTime(config, op, "Create Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { + if err := ServiceNetworkingOperationWaitTimeHW(config, op, "Create Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { return err } @@ -246,7 +246,7 @@ func resourceServiceNetworkingConnectionUpdate(d *schema.ResourceData, meta inte if err != nil { return err } - if err := ServiceNetworkingOperationWaitTime(config, op, "Update Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := ServiceNetworkingOperationWaitTimeHW(config, op, "Update Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutUpdate)); err != nil { return err } } @@ -297,7 +297,7 @@ func resourceServiceNetworkingConnectionDelete(d *schema.ResourceData, meta inte return err } - if err := ServiceNetworkingOperationWaitTime(config, op, "Delete Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { + if err := ServiceNetworkingOperationWaitTimeHW(config, op, "Delete Service Networking Connection", userAgent, project, d.Timeout(schema.TimeoutCreate)); err != nil { return errwrap.Wrapf("Unable to remove Service Networking Connection, err: {{err}}", err) } diff --git a/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_vpc_service_controls_test.go b/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_vpc_service_controls_test.go new file mode 100644 index 000000000000..7e1cf81b65fe --- /dev/null +++ b/mmv1/third_party/terraform/services/servicenetworking/resource_service_networking_vpc_service_controls_test.go @@ -0,0 +1,72 @@ +package servicenetworking_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccServiceNetworkingVPCServiceControls_update(t *testing.T) { + t.Parallel() + suffix := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceNetworkingVPCServiceControls_full(suffix, "true"), + }, + { + ResourceName: "google_service_networking_vpc_service_controls.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "project", "service"}, + }, + { + Config: testAccServiceNetworkingVPCServiceControls_full(suffix, "false"), + }, + { + ResourceName: "google_service_networking_vpc_service_controls.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "project", "service"}, + }, + }, + }) +} + +func testAccServiceNetworkingVPCServiceControls_full(suffix, enabled string) string { + return fmt.Sprintf(` +# Create a VPC +resource "google_compute_network" "default" { + name = "tf-test-example-network%s" +} + +# Create an IP address +resource "google_compute_global_address" "default" { + name = "tf-test-psa-range%s" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.default.id +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.default.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.default.name] +} + +# Enable VPC-SC on the producer network +resource "google_service_networking_vpc_service_controls" "default" { + network = google_compute_network.default.name + service = "servicenetworking.googleapis.com" + enabled = %s + depends_on = [google_service_networking_connection.default] +} +`, suffix, suffix, enabled) +} diff --git a/mmv1/third_party/terraform/services/servicenetworking/service_networking_operation.go b/mmv1/third_party/terraform/services/servicenetworking/service_networking_operation_hw.go similarity index 71% rename from mmv1/third_party/terraform/services/servicenetworking/service_networking_operation.go rename to mmv1/third_party/terraform/services/servicenetworking/service_networking_operation_hw.go index 516b60a79d50..89144132461f 100644 --- a/mmv1/third_party/terraform/services/servicenetworking/service_networking_operation.go +++ b/mmv1/third_party/terraform/services/servicenetworking/service_networking_operation_hw.go @@ -8,14 +8,14 @@ import ( "google.golang.org/api/servicenetworking/v1" ) -type ServiceNetworkingOperationWaiter struct { +type ServiceNetworkingOperationWaiterHW struct { Service *servicenetworking.APIService Project string UserProjectOverride bool tpgresource.CommonOperationWaiter } -func (w *ServiceNetworkingOperationWaiter) QueryOp() (interface{}, error) { +func (w *ServiceNetworkingOperationWaiterHW) QueryOp() (interface{}, error) { opGetCall := w.Service.Operations.Get(w.Op.Name) if w.UserProjectOverride { opGetCall.Header().Add("X-Goog-User-Project", w.Project) @@ -23,8 +23,8 @@ func (w *ServiceNetworkingOperationWaiter) QueryOp() (interface{}, error) { return opGetCall.Do() } -func ServiceNetworkingOperationWaitTime(config *transport_tpg.Config, op *servicenetworking.Operation, activity, userAgent, project string, timeout time.Duration) error { - w := &ServiceNetworkingOperationWaiter{ +func ServiceNetworkingOperationWaitTimeHW(config *transport_tpg.Config, op *servicenetworking.Operation, activity, userAgent, project string, timeout time.Duration) error { + w := &ServiceNetworkingOperationWaiterHW{ Service: config.NewServiceNetworkingClient(userAgent), Project: project, UserProjectOverride: config.UserProjectOverride, diff --git a/mmv1/third_party/terraform/transport/config.go.erb b/mmv1/third_party/terraform/transport/config.go.erb index e20f818b5545..821ff6cdef08 100644 --- a/mmv1/third_party/terraform/transport/config.go.erb +++ b/mmv1/third_party/terraform/transport/config.go.erb @@ -216,7 +216,6 @@ type Config struct { ResourceManagerV3BasePath string IAMBasePath string CloudIoTBasePath string - ServiceNetworkingBasePath string BigtableAdminBasePath string TagsLocationBasePath string @@ -237,7 +236,6 @@ const DataflowBasePathKey = "Dataflow" const IAMBasePathKey = "IAM" const IamCredentialsBasePathKey = "IamCredentials" const ResourceManagerV3BasePathKey = "ResourceManagerV3" -const ServiceNetworkingBasePathKey = "ServiceNetworking" const BigtableAdminBasePathKey = "BigtableAdmin" const ContainerAwsBasePathKey = "ContainerAws" const ContainerAzureBasePathKey = "ContainerAzure" @@ -258,7 +256,6 @@ var DefaultBasePaths = map[string]string{ IAMBasePathKey : "https://iam.googleapis.com/v1/", IamCredentialsBasePathKey : "https://iamcredentials.googleapis.com/v1/", ResourceManagerV3BasePathKey : "https://cloudresourcemanager.googleapis.com/v3/", - ServiceNetworkingBasePathKey : "https://servicenetworking.googleapis.com/v1/", BigtableAdminBasePathKey : "https://bigtableadmin.googleapis.com/v2/", ContainerAwsBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", ContainerAzureBasePathKey: "https://{{location}}-gkemulticloud.googleapis.com/v1/", @@ -1274,7 +1271,6 @@ func ConfigureBasePaths(c *Config) { c.IamCredentialsBasePath = DefaultBasePaths[IamCredentialsBasePathKey] c.ResourceManagerV3BasePath = DefaultBasePaths[ResourceManagerV3BasePathKey] c.IAMBasePath = DefaultBasePaths[IAMBasePathKey] - c.ServiceNetworkingBasePath = DefaultBasePaths[ServiceNetworkingBasePathKey] c.BigQueryBasePath = DefaultBasePaths[BigQueryBasePathKey] c.BigtableAdminBasePath = DefaultBasePaths[BigtableAdminBasePathKey] c.TagsLocationBasePath = DefaultBasePaths[TagsLocationBasePathKey] From cf775785f7d7e371c3019f89bc5b69e3e3bde6ab Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Fri, 14 Jun 2024 15:10:34 -0500 Subject: [PATCH 149/356] go rewrite - some compute and AD diffs (#10968) --- mmv1/api/resource.go | 54 +++++++++++++------ mmv1/api/resource/examples.go | 22 ++++---- mmv1/api/type.go | 13 +++++ mmv1/products/activedirectory/Domain.yaml | 4 +- .../products/activedirectory/DomainTrust.yaml | 4 +- mmv1/products/activedirectory/go_Domain.yaml | 41 ++++++++------ .../activedirectory/go_DomainTrust.yaml | 27 ++++++---- mmv1/products/activedirectory/go_Peering.yaml | 23 ++++---- mmv1/provider/template_data.go | 2 +- mmv1/provider/terraform.go | 24 +-------- mmv1/template-converter.go | 3 +- .../custom_flatten/go/default_if_empty.tmpl | 2 +- .../terraform/env_var_context.go.tmpl | 28 +++++----- .../base_configs/iam_test_file.go.tmpl | 27 +++++----- ...act_registry_repository_remote_yum.tf.tmpl | 6 +-- ...ng_budget_notify_project_recipient.tf.tmpl | 27 ++++++++++ .../terraform/expand_property_method.go.tmpl | 2 +- .../terraform/flatten_property_method.go.tmpl | 6 +-- mmv1/templates/terraform/nested_query.go.tmpl | 2 +- mmv1/templates/terraform/resource.go.tmpl | 31 ++++++----- .../terraform/schema_property.go.tmpl | 4 +- .../terraform/schema_subresource.go.tmpl | 4 +- .../unordered_list_customize_diff.go.tmpl | 4 +- mmv1/templates/terraform/update_mask.go.tmpl | 2 +- mmv1/templates/terraform/yaml_conversion.erb | 2 +- .../terraform/yaml_conversion_field.erb | 4 +- 26 files changed, 214 insertions(+), 154 deletions(-) create mode 100644 mmv1/templates/terraform/examples/go/billing_budget_notify_project_recipient.tf.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 3cfcdfc49508..f1726a744761 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -321,8 +321,12 @@ func (r *Resource) UnmarshalYAML(n *yaml.Node) error { return err } - r.ApiName = r.Name - r.CollectionUrlKey = google.Camelize(google.Plural(r.Name), "lower") + if r.ApiName == "" { + r.ApiName = r.Name + } + if r.CollectionUrlKey == "" { + r.CollectionUrlKey = google.Camelize(google.Plural(r.Name), "lower") + } return nil } @@ -849,12 +853,26 @@ func (r Resource) IncludeProjectForOperation() bool { // def region? func (r Resource) HasRegion() bool { - return strings.Contains(r.BaseUrl, "{{region}}") || strings.Contains(r.CreateUrl, "{{region}}") + found := false + for _, p := range r.Parameters { + if p.Name == "region" && p.IgnoreRead { + found = true + break + } + } + return found && strings.Contains(r.BaseUrl, "{{region}}") } // def zone? func (r Resource) HasZone() bool { - return strings.Contains(r.BaseUrl, "{{zone}}") || strings.Contains(r.CreateUrl, "{{zone}}") + found := false + for _, p := range r.Parameters { + if p.Name == "zone" && p.IgnoreRead { + found = true + break + } + } + return found && strings.Contains(r.BaseUrl, "{{zone}}") } // resource functions needed for template that previously existed in terraform.go but due to how files are being inherited here it was easier to put in here @@ -1124,7 +1142,7 @@ func (r Resource) ImportIdRegexesFromIam() string { transformed = append(transformed, s) } - return strings.Join(transformed[:], "\", \"") + return strings.Join(slices.Compact(transformed[:]), "\", \"") } // For example, "projects/{{project}}/schemas/{{name}}", "{{project}}/{{name}}", "{{name}}" @@ -1380,9 +1398,9 @@ func (r Resource) FirstIdentityProp() *Type { } type UpdateGroup struct { - UpdateUrl string - UpdateVerb string - UpdateId string + UpdateUrl string + UpdateVerb string + UpdateId string FingerprintName string } @@ -1397,9 +1415,9 @@ func (r Resource) PropertiesByCustomUpdate() map[UpdateGroup][]*Type { customUpdateProps := r.propertiesWithCustomUpdate(r.RootProperties()) groupedCustomUpdateProps := map[UpdateGroup][]*Type{} for _, prop := range customUpdateProps { - groupedProperty := UpdateGroup{ UpdateUrl: prop.UpdateUrl, - UpdateVerb: prop.UpdateVerb, - UpdateId: prop.UpdateId, + groupedProperty := UpdateGroup{UpdateUrl: prop.UpdateUrl, + UpdateVerb: prop.UpdateVerb, + UpdateId: prop.UpdateId, FingerprintName: prop.FingerprintName} groupedCustomUpdateProps[groupedProperty] = append(groupedCustomUpdateProps[groupedProperty], prop) } @@ -1412,17 +1430,21 @@ func (r Resource) FieldSpecificUpdateMethods() bool { func (r Resource) CustomUpdatePropertiesByKey(updateUrl string, updateId string, fingerprintName string, updateVerb string) []*Type { groupedProperties := r.PropertiesByCustomUpdate() - groupedProperty := UpdateGroup{ UpdateUrl: updateUrl, - UpdateVerb: updateVerb, - UpdateId: updateId, - FingerprintName: fingerprintName} + groupedProperty := UpdateGroup{UpdateUrl: updateUrl, + UpdateVerb: updateVerb, + UpdateId: updateId, + FingerprintName: fingerprintName} return groupedProperties[groupedProperty] } -func (r Resource) PropertyNamesToStrings (properties []*Type) []string{ +func (r Resource) PropertyNamesToStrings(properties []*Type) []string { var propertyNames []string for _, prop := range properties { propertyNames = append(propertyNames, google.Underscore(prop.Name)) } return propertyNames } + +func (r Resource) IsExcluded() bool { + return r.Exclude || r.ExcludeResource +} diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index f41a02d98357..31d59450e64b 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -179,8 +179,8 @@ func (e *Examples) UnmarshalYAML(n *yaml.Node) error { // Executes example templates for documentation and tests func (e *Examples) SetHCLText() { - docCopy := e - testCopy := e + docVars := make(map[string]string) + testVars := e.TestEnvVars docs_defaults := map[string]string{ "PROJECT_NAME": "my-project-name", "CREDENTIALS": "my/credentials/filename.json", @@ -197,15 +197,17 @@ func (e *Examples) SetHCLText() { } // Apply doc defaults to test_env_vars from YAML - for key := range docCopy.TestEnvVars { - docCopy.TestEnvVars[key] = docs_defaults[docCopy.TestEnvVars[key]] + for key := range e.TestEnvVars { + docVars[key] = docs_defaults[e.TestEnvVars[key]] } - e.DocumentationHCLText = ExecuteTemplate(docCopy, docCopy.ConfigPath, true) + e.TestEnvVars = docVars + e.DocumentationHCLText = ExecuteTemplate(e, e.ConfigPath, true) + e.TestEnvVars = testVars // Override vars to inject test values into configs - will have // - "a-example-var-value%{random_suffix}"" // - "%{my_var}" for overrides that have custom Golang values - for key, value := range testCopy.Vars { + for key, value := range e.Vars { var newVal string if strings.Contains(value, "-") { newVal = fmt.Sprintf("tf-test-%s", value) @@ -219,15 +221,15 @@ func (e *Examples) SetHCLText() { if len(newVal) > 54 { newVal = newVal[:54] } - testCopy.Vars[key] = fmt.Sprintf("%s%%{random_suffix}", newVal) + e.Vars[key] = fmt.Sprintf("%s%%{random_suffix}", newVal) } // Apply overrides from YAML - for key := range testCopy.TestVarsOverrides { - testCopy.Vars[key] = fmt.Sprintf("%%{%s}", key) + for key := range e.TestVarsOverrides { + e.Vars[key] = fmt.Sprintf("%%{%s}", key) } - e.TestHCLText = ExecuteTemplate(testCopy, testCopy.ConfigPath, true) + e.TestHCLText = ExecuteTemplate(e, e.ConfigPath, true) } func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 67b110c936c4..7e125f55f44b 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -1323,3 +1323,16 @@ func (t *Type) GoLiteral(value interface{}) string { panic(fmt.Errorf("unknown go literal type %+v", value)) } } + +// def force_new?(property, resource) +func (t *Type) IsForceNew() bool { + parent := t.Parent() + return (((!t.Output || t.IsA("KeyValueEffectiveLabels")) && + (t.Immutable || + (t.ResourceMetadata.Immutable && t.UpdateUrl == "" && !t.Immutable && + (parent == nil || + (parent.IsForceNew() && + !(parent.FlattenObject && t.IsA("KeyValueLabels"))))))) || + (t.IsA("KeyValueTerraformLabels") && + t.ResourceMetadata.Updatable() && !t.ResourceMetadata.RootLabels())) +} diff --git a/mmv1/products/activedirectory/Domain.yaml b/mmv1/products/activedirectory/Domain.yaml index 7ca646ef3565..b9aaf2e7f287 100644 --- a/mmv1/products/activedirectory/Domain.yaml +++ b/mmv1/products/activedirectory/Domain.yaml @@ -74,8 +74,8 @@ parameters: url_param_only: true immutable: true description: | - The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, - https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. + The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions + of https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. validation: !ruby/object:Provider::Terraform::Validation function: 'verify.ValidateADDomainName()' properties: diff --git a/mmv1/products/activedirectory/DomainTrust.yaml b/mmv1/products/activedirectory/DomainTrust.yaml index e29652b1f4c1..60dcfd8d5a7d 100644 --- a/mmv1/products/activedirectory/DomainTrust.yaml +++ b/mmv1/products/activedirectory/DomainTrust.yaml @@ -78,8 +78,8 @@ parameters: url_param_only: true immutable: true description: | - The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, - https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. + The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions + of https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. properties: - !ruby/object:Api::Type::String name: 'targetDomainName' diff --git a/mmv1/products/activedirectory/go_Domain.yaml b/mmv1/products/activedirectory/go_Domain.yaml index 71935c55730c..6b385c90e0ad 100644 --- a/mmv1/products/activedirectory/go_Domain.yaml +++ b/mmv1/products/activedirectory/go_Domain.yaml @@ -15,8 +15,7 @@ --- name: 'Domain' kind: 'activedirectory#domain' -description: | - Creates a Microsoft AD domain +description: Creates a Microsoft AD domain references: guides: 'Managed Microsoft Active Directory Quickstart': 'https://cloud.google.com/managed-microsoft-ad/docs/quickstarts' @@ -53,7 +52,7 @@ async: path: 'error' message: 'message' custom_code: - custom_import: 'templates/terraform/custom_import/self_link_as_name.erb' + custom_import: 'templates/terraform/custom_import/go/self_link_as_name.tmpl' error_abort_predicates: - 'transport_tpg.Is429QuotaError' @@ -67,8 +66,9 @@ examples: parameters: - name: 'domainName' type: String - description: "The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, -https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains." + description: | + The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions + of https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. url_param_only: true required: true immutable: true @@ -77,40 +77,47 @@ https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locatio properties: - name: 'name' type: String - description: "The unique name of the domain using the format: `projects/{project}/locations/global/domains/{domainName}`." + description: + 'The unique name of the domain using the format: + `projects/{project}/locations/global/domains/{domainName}`.' output: true - name: 'labels' type: KeyValueLabels - description: "Resource labels that can contain user-provided metadata" + description: 'Resource labels that can contain user-provided metadata' immutable: false - name: 'authorizedNetworks' type: Array - description: "The full names of the Google Compute Engine networks the domain instance is connected to. The domain is only available on networks listed in authorizedNetworks. -If CIDR subnets overlap between networks, domain creation will fail." + description: | + The full names of the Google Compute Engine networks the domain instance is connected to. The domain is only available on networks listed in authorizedNetworks. + If CIDR subnets overlap between networks, domain creation will fail. is_set: true item_type: type: String - name: 'reservedIpRange' type: String - description: "The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. -Ranges must be unique and non-overlapping with existing subnets in authorizedNetworks" + description: | + The CIDR range of internal addresses that are reserved for this domain. Reserved networks must be /24 or larger. + Ranges must be unique and non-overlapping with existing subnets in authorizedNetworks required: true immutable: true - name: 'locations' type: Array - description: "Locations where domain needs to be provisioned. [regions][compute/docs/regions-zones/] -e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block." + description: | + Locations where domain needs to be provisioned. [regions][compute/docs/regions-zones/] + e.g. us-west1 or us-east4 Service supports up to 4 locations at once. Each location will use a /26 block. required: true item_type: type: String - name: 'admin' type: String - description: "The name of delegated administrator account used to perform Active Directory operations. -If not specified, setupadmin will be used." + description: | + The name of delegated administrator account used to perform Active Directory operations. + If not specified, setupadmin will be used. immutable: true default_value: setupadmin - name: 'fqdn' type: String - description: "The fully-qualified domain name of the exposed domain used by clients to connect to the service. -Similar to what would be chosen for an Active Directory set up on an internal network." + description: | + The fully-qualified domain name of the exposed domain used by clients to connect to the service. + Similar to what would be chosen for an Active Directory set up on an internal network. output: true diff --git a/mmv1/products/activedirectory/go_DomainTrust.yaml b/mmv1/products/activedirectory/go_DomainTrust.yaml index 9b69dd0caa28..5540fc424a36 100644 --- a/mmv1/products/activedirectory/go_DomainTrust.yaml +++ b/mmv1/products/activedirectory/go_DomainTrust.yaml @@ -15,8 +15,7 @@ --- name: 'DomainTrust' kind: 'activedirectory#trust' -description: | - Adds a trust between Active Directory domains +description: Adds a trust between Active Directory domains references: guides: 'Active Directory Trust': 'https://cloud.google.com/managed-microsoft-ad/docs/create-one-way-trust' @@ -68,19 +67,22 @@ examples: parameters: - name: 'domain' type: String - description: "The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions, -https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains." + description: | + The fully qualified domain name. e.g. mydomain.myorganization.com, with the restrictions + of https://cloud.google.com/managed-microsoft-ad/reference/rest/v1/projects.locations.global.domains. url_param_only: true required: true immutable: true properties: - name: 'targetDomainName' type: String - description: "The fully qualified target domain name which will be in trust with the current domain." + description: + 'The fully qualified target domain name which will be in trust with the + current domain.' required: true - name: 'trustType' type: Enum - description: "The type of trust represented by the trust resource." + description: 'The type of trust represented by the trust resource.' required: true immutable: true enum_values: @@ -88,7 +90,9 @@ properties: - 'EXTERNAL' - name: 'trustDirection' type: Enum - description: "The trust direction, which decides if the current domain is trusted, trusting, or both." + description: + 'The trust direction, which decides if the current domain is trusted, + trusting, or both.' required: true immutable: true enum_values: @@ -97,18 +101,21 @@ properties: - 'BIDIRECTIONAL' - name: 'selectiveAuthentication' type: Boolean - description: "Whether the trusted side has forest/domain wide access or selective access to an approved set of resources." + description: | + Whether the trusted side has forest/domain wide access or selective access to an approved set of resources. immutable: true - name: 'targetDnsIpAddresses' type: Array - description: "The target DNS server IP addresses which can resolve the remote domain involved in the trust." + description: | + The target DNS server IP addresses which can resolve the remote domain involved in the trust. is_set: true required: true item_type: type: String - name: 'trustHandshakeSecret' type: String - description: "The trust secret used for the handshake with the target domain. This will not be stored." + description: | + The trust secret used for the handshake with the target domain. This will not be stored. required: true immutable: true ignore_read: true diff --git a/mmv1/products/activedirectory/go_Peering.yaml b/mmv1/products/activedirectory/go_Peering.yaml index bc798f5bf0f0..97a81343d131 100644 --- a/mmv1/products/activedirectory/go_Peering.yaml +++ b/mmv1/products/activedirectory/go_Peering.yaml @@ -15,8 +15,7 @@ --- name: 'Peering' kind: 'activedirectory#peering' -description: | - Creates a Peering for Managed AD instance. +description: Creates a Peering for Managed AD instance. min_version: 'beta' references: guides: @@ -63,7 +62,7 @@ examples: parameters: - name: 'peeringId' type: String - description: "" + description: '' min_version: 'beta' url_param_only: true required: true @@ -71,34 +70,40 @@ parameters: properties: - name: 'name' type: String - description: "Unique name of the peering in this scope including projects and location using the form: projects/{projectId}/locations/global/peerings/{peeringId}." + description: | + Unique name of the peering in this scope including projects and location using the form: projects/{projectId}/locations/global/peerings/{peeringId}. min_version: 'beta' output: true - name: 'labels' type: KeyValueLabels - description: "Resource labels that can contain user-provided metadata" + description: 'Resource labels that can contain user-provided metadata' min_version: 'beta' immutable: false - name: 'authorizedNetwork' type: String - description: "The full names of the Google Compute Engine networks to which the instance is connected. Caller needs to make sure that CIDR subnets do not overlap between networks, else peering creation will fail." + description: | + The full names of the Google Compute Engine networks to which the instance is connected. Caller needs to make sure that CIDR subnets do not overlap between networks, else peering creation will fail. min_version: 'beta' required: true immutable: true - name: 'domainResource' type: String - description: "Full domain resource path for the Managed AD Domain involved in peering. The resource path should be in the form projects/{projectId}/locations/global/domains/{domainName}" + description: | + Full domain resource path for the Managed AD Domain involved in peering. The resource path should be in the form projects/{projectId}/locations/global/domains/{domainName} min_version: 'beta' required: true immutable: true - name: 'status' type: String - description: "The current state of this Peering." + description: | + The current state of this Peering. min_version: 'beta' url_param_only: true ignore_read: true - name: 'statusMessage' type: String - description: "Additional information about the current status of this peering, if available." + description: | + Additional information about the current status of this peering, if available. + min_version: 'beta' ignore_read: true diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index dfbab55293d6..cf8aa1898400 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -171,7 +171,7 @@ func (td *TemplateData) GenerateSweeperFile(filePath string, resource api.Resour } func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, goFormat bool, templates ...string) { - log.Printf("Generating %s", filePath) + // log.Printf("Generating %s", filePath) templateFileName := filepath.Base(templatePath) diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index fb20d03e2d7b..4e22602653d7 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -99,7 +99,7 @@ func (t *Terraform) GenerateObjects(outputFolder string, generateCode, generateD func (t *Terraform) GenerateObject(object api.Resource, outputFolder, productPath string, generateCode, generateDocs bool) { templateData := NewTemplateData(outputFolder, t.Version) - if !object.ExcludeResource { + if !object.IsExcluded() { log.Printf("Generating %s resource", object.Name) t.GenerateResource(object, *templateData, outputFolder, generateCode, generateDocs) @@ -826,26 +826,6 @@ func (t Terraform) ImportPathFromVersion(v string) string { // // end // -// def force_new?(property, resource) -// -// ( -// (!property.output || property.is_a?(Api::Type::KeyValueEffectiveLabels)) && -// (property.immutable || -// (resource.immutable && property.update_url.nil? && property.immutable.nil? && -// (property.parent.nil? || -// (force_new?(property.parent, resource) && -// !(property.parent.flatten_object && property.is_a?(Api::Type::KeyValueLabels)) -// ) -// ) -// ) -// ) -// ) || -// (property.is_a?(Api::Type::KeyValueTerraformLabels) && -// !updatable?(resource, resource.all_user_properties) && !resource.root_labels? -// ) -// -// end -// // # Returns tuples of (fieldName, list of update masks) for // # top-level updatable fields. Schema path refers to a given Terraform // # field name (e.g. d.GetChange('fieldName)') @@ -931,7 +911,7 @@ func (t *Terraform) generateResourcesForVersion(products []map[string]interface{ var resourceName string - if !object.ExcludeResource { + if !object.IsExcluded() { t.ResourceCount++ resourceName = fmt.Sprintf("%s.Resource%s", service, object.ResourceName()) } diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index d336f47629b2..e423c017745e 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -254,7 +254,7 @@ func convertTemplate(folder string) int { if err != nil { log.Fatalf("Cannot compile the regular expression: %v", err) } - data = r.ReplaceAll(data, []byte(`{{$.DefaultValue}}`)) + data = r.ReplaceAll(data, []byte(`{{$.GoLiteral $.DefaultValue}}`)) // Replace <%= build_expand_resource_ref('v.(string)', property, pwd) %> r, err = regexp.Compile(`<%= build_expand_resource_ref\('v\.\(string\)', property, pwd\) %>`) @@ -355,6 +355,7 @@ func checkExceptionList(filePath string) bool { "custom_flatten/bigquery_table_ref_copy_destinationtable.go", "custom_flatten/bigquery_table_ref_extract_sourcetable.go", "custom_flatten/bigquery_table_ref_query_destinationtable.go", + "unordered_list_customize_diff", } for _, t := range exceptionPaths { diff --git a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl index 4f06178aa9f6..55dfa218c445 100644 --- a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl @@ -12,7 +12,7 @@ */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { - return {{$.DefaultValue}} + return {{$.GoLiteral $.DefaultValue}} } {{- if $.IsA "Integer" }} // Handles the string fixed64 format diff --git a/mmv1/templates/terraform/env_var_context.go.tmpl b/mmv1/templates/terraform/env_var_context.go.tmpl index 9cf8a91260f6..41f63c5245e6 100644 --- a/mmv1/templates/terraform/env_var_context.go.tmpl +++ b/mmv1/templates/terraform/env_var_context.go.tmpl @@ -1,32 +1,32 @@ {{- define "EnvVarContext" }} {{- range $varKey, $varVal := $.TestEnvVars }} - {{- if eq $varVal $.ORGID }} + {{- if eq $varVal "ORG_ID" }} "{{$varKey}}": envvar.GetTestOrgFromEnv(t), - {{- else if eq $varVal $.ORG_DOMAIN }} + {{- else if eq $varVal "ORG_DOMAIN" }} "{{$varKey}}": envvar.GetTestOrgDomainFromEnv(t), - {{- else if eq $varVal $.CREDENTIALS }} + {{- else if eq $varVal "CREDENTIALS" }} "{{$varKey}}": envvar.GetTestCredsFromEnv(t), - {{- else if eq $varVal $.REGION }} + {{- else if eq $varVal "REGION" }} "{{$varKey}}": envvar.GetTestRegionFromEnv(), - {{- else if eq $varVal $.ORG_TARGET }} + {{- else if eq $varVal "ORG_TARGET" }} "{{$varKey}}": envvar.GetTestOrgTargetFromEnv(t), - {{- else if eq $varVal $.BILLING_ACCT }} + {{- else if eq $varVal "BILLING_ACCT" }} "{{$varKey}}": envvar.GetTestBillingAccountFromEnv(t), - {{- else if eq $varVal $.MASTER_BILLING_ACCT }} + {{- else if eq $varVal "MASTER_BILLING_ACCT" }} "{{$varKey}}": envvar.GetTestMasterBillingAccountFromEnv(t), - {{- else if eq $varVal $.SERVICE_ACCT }} + {{- else if eq $varVal "SERVICE_ACCT" }} "{{$varKey}}": envvar.GetTestServiceAccountFromEnv(t), - {{- else if eq $varVal $.PROJECT_NAME }} + {{- else if eq $varVal "PROJECT_NAME" }} "{{$varKey}}": envvar.GetTestProjectFromEnv(), - {{- else if eq $varVal $.PROJECT_NUMBER }} + {{- else if eq $varVal "PROJECT_NUMBER" }} "{{$varKey}}": envvar.GetTestProjectNumberFromEnv(), - {{- else if eq $varVal $.CUST_ID }} + {{- else if eq $varVal "CUST_ID" }} "{{$varKey}}": envvar.GetTestCustIdFromEnv(t), - {{- else if eq $varVal $.IDENTITY_USER }} + {{- else if eq $varVal "IDENTITY_USER" }} "{{$varKey}}": envvar.GetTestIdentityUserFromEnv(t), - {{- else if eq $varVal $.PAP_DESCRIPTION }} + {{- else if eq $varVal "PAP_DESCRIPTION" }} "{{$varKey}}": envvar.GetTestPublicAdvertisedPrefixDescriptionFromEnv(t), - {{- else if eq $varVal $.ZONE }} + {{- else if eq $varVal "ZONE" }} "{{$varKey}}": envvar.GetTestZoneFromEnv(), {{- end }} {{- end }} diff --git a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl index 80e05c9afce9..2af462ec3716 100644 --- a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl @@ -164,8 +164,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { }, }) } - -{{- if $.IamPolicy.IamConditionsRequestType }} +{{ if $.IamPolicy.IamConditionsRequestType }} func TestAcc{{ $.ResourceName }}IamBindingGenerated_withCondition(t *testing.T) { t.Parallel() {{ template "IamContext" $ }} @@ -191,7 +190,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withCondition(t *testing.T) {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} %s", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), ImportState: true, ImportStateVerify: true, }, @@ -233,13 +232,13 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withAndWithoutCondition(t *t }, { ResourceName: "{{ $.IamTerraformName }}_binding.foo2", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} %s", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_binding.foo3", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} %s", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), ImportState: true, ImportStateVerify: true, }, @@ -250,6 +249,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated_withAndWithoutCondition(t *t func TestAcc{{ $.ResourceName }}IamMemberGenerated_withCondition(t *testing.T) { t.Parallel() + {{ template "IamContext" $ }} acctest.VcrTest(t, resource.TestCase{ @@ -273,7 +273,7 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated_withCondition(t *testing.T) { {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_member.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com %s", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), ImportState: true, ImportStateVerify: true, }, @@ -309,19 +309,19 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated_withAndWithoutCondition(t *te {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_member.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_member.foo2", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com %s", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title"]), ImportState: true, ImportStateVerify: true, }, { ResourceName: "{{ $.IamTerraformName }}_member.foo3", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com %s", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}, context["condition_title_no_desc"]), ImportState: true, ImportStateVerify: true, }, @@ -332,8 +332,7 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated_withAndWithoutCondition(t *te func TestAcc{{ $.ResourceName }}IamPolicyGenerated_withCondition(t *testing.T) { t.Parallel() - -{{- if $.IamPolicy.AdminIamRole }} +{{ if $.IamPolicy.AdminIamRole }} // This may skip test, so do it first sa := envvar.GetTestServiceAccountFromEnv(t) {{- end }} @@ -341,8 +340,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated_withCondition(t *testing.T) { {{- if $.IamPolicy.AdminIamRole }} context["service_account"] = sa {{- end }} - -{{- if $.IamPolicy.AdminIamRole }} +{{ if not $.IamPolicy.AdminIamRole }} // Test should have 2 bindings: one with a description and one without. Any < chars are converted to a unicode character by the API. expectedPolicyData := acctest.Nprintf(`{"bindings":[{"condition":{"description":"%{condition_desc}","expression":"%{condition_expr}","title":"%{condition_title}"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"},{"condition":{"expression":"%{condition_expr}","title":"%{condition_title}-no-description"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"}]}`, context) {{- else }} @@ -487,8 +485,7 @@ resource "{{ $.IamTerraformName }}_binding" "foo" { } `, context) } - -{{- if $.IamPolicy.IamConditionsRequestType }} +{{ if $.IamPolicy.IamConditionsRequestType }} func testAcc{{ $.ResourceName }}IamBinding_withConditionGenerated(context map[string]interface{}) string { return acctest.Nprintf(` {{ $example.TestHCLText }} diff --git a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl index 543b4b1604fb..b8ac1ab35a0a 100644 --- a/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/artifact_registry_repository_remote_yum.tf.tmpl @@ -5,11 +5,11 @@ resource "google_artifact_registry_repository" "{{$.PrimaryResourceId}}" { format = "YUM" mode = "REMOTE_REPOSITORY" remote_repository_config { - description = "Centos 8 remote repository" + description = "Rocky 9 remote repository" yum_repository { public_repository { - repository_base = "CENTOS" - repository_path = "centos/8-stream/BaseOS/x86_64/os" + repository_base = "ROCKY" + repository_path = "pub/rocky/9/BaseOS/x86_64/os" } } } diff --git a/mmv1/templates/terraform/examples/go/billing_budget_notify_project_recipient.tf.tmpl b/mmv1/templates/terraform/examples/go/billing_budget_notify_project_recipient.tf.tmpl new file mode 100644 index 000000000000..88251760bc65 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/billing_budget_notify_project_recipient.tf.tmpl @@ -0,0 +1,27 @@ +data "google_billing_account" "account" { + billing_account = "{{index $.TestEnvVars "billing_acct"}}" +} + +data "google_project" "project" { +} + +resource "google_billing_budget" "{{$.PrimaryResourceId}}" { + billing_account = data.google_billing_account.account.id + display_name = "{{index $.Vars "budget_name"}}" + + budget_filter { + projects = ["projects/${data.google_project.project.number}"] + } + + amount { + specified_amount { + currency_code = "USD" + units = "100000" + } + } + + all_updates_rule { + monitoring_notification_channels = [] + enable_project_level_recipients = true + } +} diff --git a/mmv1/templates/terraform/expand_property_method.go.tmpl b/mmv1/templates/terraform/expand_property_method.go.tmpl index 63acf1b35813..46ed73dc3b99 100644 --- a/mmv1/templates/terraform/expand_property_method.go.tmpl +++ b/mmv1/templates/terraform/expand_property_method.go.tmpl @@ -65,7 +65,7 @@ func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.T func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { transformed := make(map[string]interface{}) {{- range $prop := $.NestedProperties }} - {{- if not (and (hasPrefix $.prop.Type "KeyValue") $prop.IgnoreWrite) }} + {{- if not (and (hasPrefix $prop.Type "KeyValue") $prop.IgnoreWrite) }} transformed{{$prop.TitlelizeProperty}}, err := expand{{$.GetPrefix}}{{$.TitlelizeProperty}}{{$prop.TitlelizeProperty}}({{ if $prop.FlattenObject }}nil{{ else }}d.Get("{{ underscore $prop.Name }}"), d, config) if err != nil { return nil, err diff --git a/mmv1/templates/terraform/flatten_property_method.go.tmpl b/mmv1/templates/terraform/flatten_property_method.go.tmpl index f7b1ecf26eb8..4976f5747529 100644 --- a/mmv1/templates/terraform/flatten_property_method.go.tmpl +++ b/mmv1/templates/terraform/flatten_property_method.go.tmpl @@ -15,7 +15,7 @@ {{- define "flattenPropertyMethod" }} {{- if $.CustomFlatten }} {{- $.CustomTemplate $.CustomFlatten true -}} -{{- else }} +{{ else }} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { {{- if $.IsA "NestedObject" }} if v == nil { @@ -122,7 +122,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso } return v // let terraform core handle it otherwise - {{- else if and ($.IsA "Integer") ($.ItemType.IsA "ResourceRef")}} + {{- else if and ($.IsA "Array") ($.ItemType.IsA "ResourceRef")}} if v == nil { return v } @@ -147,7 +147,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso } {{- if $.NestedProperties }} {{- range $prop := $.NestedProperties }} - {{- template "flattenPropertyMethod" $prop -}} + {{ template "flattenPropertyMethod" $prop -}} {{- end }} {{- end }} {{- end }} diff --git a/mmv1/templates/terraform/nested_query.go.tmpl b/mmv1/templates/terraform/nested_query.go.tmpl index 6a05bb981b8a..d44729050971 100644 --- a/mmv1/templates/terraform/nested_query.go.tmpl +++ b/mmv1/templates/terraform/nested_query.go.tmpl @@ -56,7 +56,7 @@ func resource{{ $.ResourceName }}FindNestedObjectInList(d *schema.ResourceData, {{- if $.NestedQuery.IsListOfIds }} // List response only contains the ID - construct a response object. item := map[string]interface{}{ - "{{ $.FirstIdentity.ApiName }}": itemRaw, + "{{ $.FirstIdentityProp.ApiName }}": itemRaw, } {{- else }} item := itemRaw.(map[string]interface{}) diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 2d014e226cb5..cd6475b97068 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -138,15 +138,15 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- if $.VirtualFields -}} {{- range $field := $.VirtualFields }} "{{ $field.Name -}}": { - Type: schema.{{ $field.ItemType -}}, + Type: schema.{{ $field.Type -}}, Optional: true, {{ if $field.Immutable -}} ForceNew: true, {{- end}} {{ if $field.DefaultValue -}} - Default: {{ $field.GoLiteral $field.Default_Value -}}, + Default: {{ $field.GoLiteral $field.DefaultValue -}}, {{- end}} - Description: `{{ $field.Description -}}`, + Description: `{{ replace $field.GetDescription "`" "'" -1 -}}`, }, {{- end}} {{- end}} @@ -198,7 +198,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ obj := make(map[string]interface{}) {{- range $prop := $.SettableProperties }} - {{ $prop.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}({{ if $prop.FlattenObject }}nil{{ else }}d.Get("{{ underscore $prop.Name }}{{ end }}"), d, config) + {{ $prop.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}({{ if $prop.FlattenObject }}nil{{ else }}d.Get("{{ underscore $prop.Name }}"){{ end }}, d, config) if err != nil { return err {{- if $prop.SendEmptyValue -}} @@ -361,7 +361,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{- end}} {{- end}} {{- range $prop := $.GettableProperties }} -{{ if $.IsInIdentity $prop }} +{{- if $.IsInIdentity $prop }} if err := d.Set("{{ underscore $prop.Name -}}", flatten{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}(opRes["{{ $prop.ApiName -}}"], d, config)); err != nil { return err } @@ -384,7 +384,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{if $.CustomCode.PostCreateFailure -}} resource{{ $.ResourceName -}}PostCreateFailure(d, meta) {{- end}} -{{ if not $.TaintResourceOnFailedCreate -}} +{{- if not $.TaintResourceOnFailedCreate -}} // The resource didn't actually create d.SetId("") {{- end}} @@ -599,8 +599,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) d.SetId("") return nil } - -{{- end}} +{{ end}} {{- if $.VirtualFields -}} // Explicitly set virtual fields to default values if unset {{- range $prop := $.VirtualFields }} @@ -748,11 +747,11 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating {{ $.Name }} %q: %#v", d.Id(), obj) headers := make(http.Header) -{{- if $.UpdateMask -}} -{{template "UpdateMask" $ -}} +{{- if $.UpdateMask }} +{{ template "UpdateMask" $ -}} {{ end}} -{{- if $.CustomCode.PreUpdate -}} - {{- $.CustomTemplate $.CustomCode.PreUpdate true -}} +{{ if $.CustomCode.PreUpdate -}} + {{ $.CustomTemplate $.CustomCode.PreUpdate true -}} {{ end}} {{ if $.NestedQuery -}} {{ if $.NestedQuery.ModifyByPatch -}} @@ -831,7 +830,7 @@ if len(updateMask) > 0 { if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\""}}") { obj := make(map[string]interface{}) {{ if $index.FingerprintName }} - getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}" -}}") + getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}") if err != nil { return err } @@ -983,7 +982,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" {{ if $.CustomCode.PostUpdate -}} //TODO POST UPDATE {{end}} return resource{{ $.ResourceName -}}Read(d, meta) -{{ end -}}{{/*if CustomUpdate*/}} +{{- end }}{{/*if CustomUpdate*/}} } {{ else if $.RootLabels -}}{{/*if not immutable*/}} func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{}) error { @@ -1143,8 +1142,8 @@ func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{} // Explicitly set virtual fields to default values on import {{- range $vf := $.VirtualFields }} {{- if $vf.DefaultValue }} - if err := d.Set("{{ $.vf.Name }}", {{ $.vf.DefaultValue }}); err != nil { - return nil, fmt.Errorf("Error setting {{ $.vf.Name }}: %s", err) + if err := d.Set("{{ $vf.Name }}", {{ $vf.DefaultValue }}); err != nil { + return nil, fmt.Errorf("Error setting {{ $vf.Name }}: %s", err) } {{- end }} {{- end }} diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 7673032aded5..14fae40f8165 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -40,7 +40,7 @@ {{ if .DeprecationMessage -}} Deprecated: "{{ .DeprecationMessage }}", {{ end -}} -{{ if .Immutable -}} +{{ if .IsForceNew -}} ForceNew: true, {{ end -}} {{ if .Validation -}} @@ -140,7 +140,7 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] {{ if .KeyDiffSuppressFunc -}} DiffSuppressFunc: {{ .KeyDiffSuppressFunc }}, {{ end -}} - {{ if .Immutable -}} + {{ if .IsForceNew -}} ForceNew: true, {{ end -}} }, diff --git a/mmv1/templates/terraform/schema_subresource.go.tmpl b/mmv1/templates/terraform/schema_subresource.go.tmpl index ac96751eaa37..4a59519fa060 100644 --- a/mmv1/templates/terraform/schema_subresource.go.tmpl +++ b/mmv1/templates/terraform/schema_subresource.go.tmpl @@ -14,7 +14,7 @@ */}} {{define "SchemaSubResource"}} {{ if and (.IsSet) (eq .Type "Array") (eq .ItemType.Type "NestedObject") -}} -func{{ .NamespaceProperty }}Schema() *schema.Resource { +func {{ .NamespaceProperty }}Schema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ {{- range $prop := $.ItemType.Properties }} @@ -26,7 +26,7 @@ func{{ .NamespaceProperty }}Schema() *schema.Resource { {{ end -}} {{ if .NestedProperties }} - {{- range $prop := $.ItemType.Properties }} + {{- range $prop := $.NestedProperties }} {{template "SchemaSubResource" $prop}} {{- end }} {{- end }} diff --git a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl index eb42e9f0f556..f05a76bf00fe 100644 --- a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl +++ b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl @@ -28,8 +28,8 @@ for i := 0; i < count; i++ { } } -oldSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), old) -newSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), new) +oldSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceMetadata.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), old) +newSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceMetadata.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), new) if oldSet.Equal(newSet) { if err := diff.Clear({{ underscore $.Name }}); err != nil { diff --git a/mmv1/templates/terraform/update_mask.go.tmpl b/mmv1/templates/terraform/update_mask.go.tmpl index b4bca9377c55..6702943ade2e 100644 --- a/mmv1/templates/terraform/update_mask.go.tmpl +++ b/mmv1/templates/terraform/update_mask.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */}} -{{- define "UpdateMask" }} +{{- define "UpdateMask" -}} updateMask := []string{} {{- $maskGroups := $.GetPropertyUpdateMasksGroups $.UpdateBodyProperties "" }} {{- range $key := $.GetPropertyUpdateMasksGroupKeys $.UpdateBodyProperties }} diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index f7208713bbf9..6cf23b2f7a84 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -533,7 +533,7 @@ virtual_fields: - name: '<%= vfield.name %>' description: <% unless vfield.type.nil? -%> - type: <%= tf_type(vfield.type) %> + type: <%= vfield.type.to_s.gsub("Api::Type::", "") %> <% end -%> <% unless vfield.default_value.nil? -%> default_value: <%= go_literal(vfield.default_value) %> diff --git a/mmv1/templates/terraform/yaml_conversion_field.erb b/mmv1/templates/terraform/yaml_conversion_field.erb index 3a56f8e8f0aa..9c5b119b340e 100644 --- a/mmv1/templates/terraform/yaml_conversion_field.erb +++ b/mmv1/templates/terraform/yaml_conversion_field.erb @@ -173,7 +173,7 @@ <% end -%> <% end -%> <% elsif property.item_type.is_a?(Api::Type::ResourceRef) -%> - item_type: + item_type: name: '<%= property.item_type.name -%>' type: ResourceRef <% unless property.item_type.description.nil? || property.item_type.description == "A nested object resource" -%> @@ -186,7 +186,7 @@ imports: '<%= property.item_type.imports.to_s -%>' <% end -%> <% else -%> - item_type: + item_type: <% if property.item_type.is_a?(Api::Type::Enum) -%> type: Enum <% unless property.item_type.description.nil? || property.item_type.description == "A nested object resource" -%> From 5adb6e569cf7e6f3b6faec5a95186c7c70f2e2b8 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Fri, 14 Jun 2024 15:30:50 -0700 Subject: [PATCH 150/356] Moved test reader to a separate directory (#10853) --- .github/workflows/unit-test-tools.yml | 20 +++ .../cmd/detect_missing_tests.go | 2 +- tools/diff-processor/detector/detector.go | 2 +- .../diff-processor/detector/detector_test.go | 4 +- tools/diff-processor/go.mod | 52 ++++---- tools/diff-processor/go.sum | 126 ++++++++---------- tools/test-reader/.gitignore | 1 + tools/test-reader/README.md | 15 +++ tools/test-reader/cmd/read_tests.go | 63 +++++++++ tools/test-reader/cmd/root.go | 43 ++++++ tools/test-reader/go.mod | 23 ++++ tools/test-reader/go.sum | 40 ++++++ tools/test-reader/main.go | 9 ++ .../reader/reader.go | 0 .../reader/reader_test.go | 0 .../testdata/service/config_variable_test.go | 0 .../testdata/service/covered_resource_test.go | 0 .../testdata/service/cross_file_1_test.go | 0 .../testdata/service/cross_file_2_test.go | 0 .../testdata/service/function_call_test.go | 0 .../service/multiple_resource_test.go | 0 .../testdata/service/serial_resource_test.go | 0 .../service/uncovered_resource_test.go | 0 23 files changed, 304 insertions(+), 96 deletions(-) create mode 100644 tools/test-reader/.gitignore create mode 100644 tools/test-reader/README.md create mode 100644 tools/test-reader/cmd/read_tests.go create mode 100644 tools/test-reader/cmd/root.go create mode 100644 tools/test-reader/go.mod create mode 100644 tools/test-reader/go.sum create mode 100644 tools/test-reader/main.go rename tools/{diff-processor => test-reader}/reader/reader.go (100%) rename tools/{diff-processor => test-reader}/reader/reader_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/config_variable_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/covered_resource_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/cross_file_1_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/cross_file_2_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/function_call_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/multiple_resource_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/serial_resource_test.go (100%) rename tools/{diff-processor => test-reader}/reader/testdata/service/uncovered_resource_test.go (100%) diff --git a/.github/workflows/unit-test-tools.yml b/.github/workflows/unit-test-tools.yml index 5710ce054295..6c37fa63d6fe 100644 --- a/.github/workflows/unit-test-tools.yml +++ b/.github/workflows/unit-test-tools.yml @@ -103,4 +103,24 @@ jobs: - name: Test template-check run: | cd tools/template-check + go test -v ./... + + test-reader: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.2 + + - name: Set up Go + uses: actions/setup-go@0c52d547c9bc32b1aa3301fd7a9cb496313a4491 # v5.0.0 + with: + go-version: '^1.21.0' + + - name: Build test-reader + run: | + cd tools/test-reader + go build + + - name: Test test-reader + run: | + cd tools/test-reader go test -v ./... \ No newline at end of file diff --git a/tools/diff-processor/cmd/detect_missing_tests.go b/tools/diff-processor/cmd/detect_missing_tests.go index 044a7943d372..84d027e58a1d 100644 --- a/tools/diff-processor/cmd/detect_missing_tests.go +++ b/tools/diff-processor/cmd/detect_missing_tests.go @@ -11,7 +11,7 @@ import ( "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/detector" "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" - "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/reader" + "github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/reader" "github.com/golang/glog" "github.com/spf13/cobra" ) diff --git a/tools/diff-processor/detector/detector.go b/tools/diff-processor/detector/detector.go index bbd0f4295a6c..92ff4f384420 100644 --- a/tools/diff-processor/detector/detector.go +++ b/tools/diff-processor/detector/detector.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" - "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/reader" + "github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/reader" "github.com/hashicorp/hcl/v2/hclwrite" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/zclconf/go-cty/cty" diff --git a/tools/diff-processor/detector/detector_test.go b/tools/diff-processor/detector/detector_test.go index 1739ef73edfa..8c339d2694d3 100644 --- a/tools/diff-processor/detector/detector_test.go +++ b/tools/diff-processor/detector/detector_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/diff" - "github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor/reader" + "github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/reader" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) @@ -61,7 +61,7 @@ func TestGetChangedFieldsFromSchemaDiff(t *testing.T) { } func TestGetMissingTestsForChanges(t *testing.T) { - allTests, errs := reader.ReadAllTests("../reader/testdata") + allTests, errs := reader.ReadAllTests("../../test-reader/reader/testdata") if len(errs) > 0 { t.Errorf("errors reading tests before testing detect missing tests: %v", errs) } diff --git a/tools/diff-processor/go.mod b/tools/diff-processor/go.mod index d89297e03fe9..687b17d402e7 100644 --- a/tools/diff-processor/go.mod +++ b/tools/diff-processor/go.mod @@ -10,14 +10,19 @@ replace github.com/GoogleCloudPlatform/magic-modules/tools/diff-processor => ./ replace github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler => ../issue-labeler +replace github.com/GoogleCloudPlatform/magic-modules/tools/test-reader => ../test-reader + require ( - github.com/GoogleCloudPlatform/magic-modules/tools/issue-labeler v0.0.0-00010101000000-000000000000 + github.com/GoogleCloudPlatform/magic-modules/tools/test-reader v0.0.0-00010101000000-000000000000 github.com/davecgh/go-spew v1.1.1 + github.com/golang/glog v1.2.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 - github.com/spf13/cobra v1.7.0 + github.com/hashicorp/hcl/v2 v2.20.1 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 + github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 + github.com/zclconf/go-cty v1.14.2 golang.org/x/exp v0.0.0-20230905200255-921286631fa9 google/provider/new v0.0.0-00010101000000-000000000000 google/provider/old v0.0.0-00010101000000-000000000000 @@ -31,26 +36,25 @@ require ( cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.6 // indirect cloud.google.com/go/longrunning v0.5.5 // indirect - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.63.0 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.64.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect github.com/agext/levenshtein v1.2.2 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cloudflare/circl v1.3.3 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect - github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect - github.com/envoyproxy/go-control-plane v0.11.1 // indirect - github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/envoyproxy/go-control-plane v0.12.0 // indirect + github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect + github.com/fatih/color v1.16.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 // indirect github.com/gammazero/workerpool v0.0.0-20181230203049-86a96b5d5d92 // indirect github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/go-cpy v0.0.0-20211218193943-a9c933c06932 // indirect @@ -67,23 +71,22 @@ require ( github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.2 // indirect - github.com/hashicorp/hcl/v2 v2.19.1 // indirect + github.com/hashicorp/hc-install v0.6.3 // indirect github.com/hashicorp/logutils v1.0.0 // indirect - github.com/hashicorp/terraform-exec v0.19.0 // indirect - github.com/hashicorp/terraform-json v0.18.0 // indirect - github.com/hashicorp/terraform-plugin-framework v1.5.0 // indirect + github.com/hashicorp/terraform-exec v0.20.0 // indirect + github.com/hashicorp/terraform-json v0.21.0 // indirect + github.com/hashicorp/terraform-plugin-framework v1.7.0 // indirect github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.20.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.22.1 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-provider-google-beta v1.20.0 // indirect + github.com/hashicorp/terraform-provider-google v1.20.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect @@ -98,7 +101,6 @@ require ( github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect - github.com/zclconf/go-cty v1.14.1 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect @@ -106,20 +108,20 @@ require ( go.opentelemetry.io/otel/metric v1.23.0 // indirect go.opentelemetry.io/otel/trace v1.23.0 // indirect golang.org/x/crypto v0.19.0 // indirect - golang.org/x/mod v0.14.0 // indirect + golang.org/x/mod v0.15.0 // indirect golang.org/x/net v0.21.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.13.0 // indirect google.golang.org/api v0.167.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/grpc v1.61.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/diff-processor/go.sum b/tools/diff-processor/go.sum index 372104100426..71e1d1bb56e9 100644 --- a/tools/diff-processor/go.sum +++ b/tools/diff-processor/go.sum @@ -16,12 +16,12 @@ cloud.google.com/go/longrunning v0.5.5/go.mod h1:WV2LAxD8/rg5Z1cNW6FJ/ZpX4E4VnDn dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.63.0 h1:eSOBYPZVnU2fZul9sAJFGLVCgv6stNVKkmsogKF7UeY= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.63.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.64.0 h1:QA90iKudX8ijAW795f/jVbo0oEo7VoevwxLCNyi2qRc= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.64.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU= @@ -31,7 +31,6 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -40,15 +39,15 @@ github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91 github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= +github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -62,13 +61,14 @@ github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FM github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.11.1 h1:wSUXTlLfiAQRWs2F+p+EKOY9rUyis1MyGqJ2DIk5HpM= -github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g= +github.com/envoyproxy/go-control-plane v0.12.0 h1:4X+VP1GHd1Mhj6IB5mMeGbLCleqxjletLK6K0rbxyZI= +github.com/envoyproxy/go-control-plane v0.12.0/go.mod h1:ZBTaoJ23lqITozF0M6G4/IragXCQKCnYbmlmtHvwRG0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= -github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/gammazero/deque v0.0.0-20180920172122-f6adf94963e4 h1:R+19WKQClnfMXS60cP5BmMe1wjZ4u0evY2p2Ar0ZTXo= @@ -79,8 +79,8 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= -github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -94,8 +94,8 @@ github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3a github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= -github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -159,30 +159,30 @@ github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/C github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= -github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= -github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= -github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= +github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= +github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= -github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= -github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= -github.com/hashicorp/terraform-json v0.18.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= -github.com/hashicorp/terraform-plugin-framework v1.5.0 h1:8kcvqJs/x6QyOFSdeAyEgsenVOUeC/IyKpi2ul4fjTg= -github.com/hashicorp/terraform-plugin-framework v1.5.0/go.mod h1:6waavirukIlFpVpthbGd2PUNYaFedB0RwW3MDzJ/rtc= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-plugin-framework v1.7.0 h1:wOULbVmfONnJo9iq7/q+iBOBJul5vRovaYJIu2cY/Pw= +github.com/hashicorp/terraform-plugin-framework v1.7.0/go.mod h1:jY9Id+3KbZ17OMpulgnWLSfwxNVYSoYBQFTgsx044CI= github.com/hashicorp/terraform-plugin-framework-validators v0.9.0 h1:LYz4bXh3t7bTEydXOmPDPupRRnA480B/9+jV8yZvxBA= github.com/hashicorp/terraform-plugin-framework-validators v0.9.0/go.mod h1:+BVERsnfdlhYR2YkXMBtPnmn9UsL19U3qUtSZ+Y/5MY= -github.com/hashicorp/terraform-plugin-go v0.20.0 h1:oqvoUlL+2EUbKNsJbIt3zqqZ7wi6lzn4ufkn/UA51xQ= -github.com/hashicorp/terraform-plugin-go v0.20.0/go.mod h1:Rr8LBdMlY53a3Z/HpP+ZU3/xCDqtKNCkeI9qOyT10QE= +github.com/hashicorp/terraform-plugin-go v0.22.1 h1:iTS7WHNVrn7uhe3cojtvWWn83cm2Z6ryIUDTRO0EV7w= +github.com/hashicorp/terraform-plugin-go v0.22.1/go.mod h1:qrjnqRghvQ6KnDbB12XeZ4FluclYwptntoWCr9QaXTI= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-mux v0.13.0 h1:79U401/3nd8CWwDGtTHc8F3miSCAS9XGtVarxSTDgwA= -github.com/hashicorp/terraform-plugin-mux v0.13.0/go.mod h1:Ndv0FtwDG2ogzH59y64f2NYimFJ6I0smRgFUKfm6dyQ= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 h1:Bl3e2ei2j/Z3Hc2HIS15Gal2KMKyLAZ2om1HCEvK6es= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0/go.mod h1:i2C41tszDjiWfziPQDL5R/f3Zp0gahXe5No/MIO9rCE= -github.com/hashicorp/terraform-provider-google-beta v1.20.0 h1:rxZwjTPOQgmSaBINGCRhGTf9svsFU3n1iaF5i3rYIbo= -github.com/hashicorp/terraform-provider-google-beta v1.20.0/go.mod h1:t8+8q1zjjAREhGZHvwPU35evEHk9FqNvCpP8+HwJ3Cw= +github.com/hashicorp/terraform-plugin-mux v0.15.0 h1:+/+lDx0WUsIOpkAmdwBIoFU8UP9o2eZASoOnLsWbKME= +github.com/hashicorp/terraform-plugin-mux v0.15.0/go.mod h1:9ezplb1Dyq394zQ+ldB0nvy/qbNAz3mMoHHseMTMaKo= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0 h1:qHprzXy/As0rxedphECBEQAh3R4yp6pKksKHcqZx5G8= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.33.0/go.mod h1:H+8tjs9TjV2w57QFVSMBQacf8k/E1XwLXGCARgViC6A= +github.com/hashicorp/terraform-provider-google v1.20.0 h1:dVzBoqMHZA4PDAJaH3ztIey2cxFx6e+kRDAr3bMSrmI= +github.com/hashicorp/terraform-provider-google v1.20.0/go.mod h1:19QAcvJTh1z3BfW6cxR5MQd89aIurcIIur99oJGbv/E= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -201,19 +201,23 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= @@ -237,6 +241,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -245,8 +251,8 @@ github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -273,8 +279,10 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= -github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= @@ -296,8 +304,6 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -309,9 +315,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -323,9 +328,6 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -337,7 +339,6 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -355,25 +356,17 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= @@ -387,7 +380,6 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -416,8 +408,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -429,8 +421,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/tools/test-reader/.gitignore b/tools/test-reader/.gitignore new file mode 100644 index 000000000000..bf17282febdb --- /dev/null +++ b/tools/test-reader/.gitignore @@ -0,0 +1 @@ +test-reader \ No newline at end of file diff --git a/tools/test-reader/README.md b/tools/test-reader/README.md new file mode 100644 index 000000000000..1cbb878a3b68 --- /dev/null +++ b/tools/test-reader/README.md @@ -0,0 +1,15 @@ +# test-reader + +Tool for analyzing provider tests. + +## Run + +```bash +go run . read-tests ./reader/testdata/ +``` + +## Test + +```bash +go test ./... +``` \ No newline at end of file diff --git a/tools/test-reader/cmd/read_tests.go b/tools/test-reader/cmd/read_tests.go new file mode 100644 index 000000000000..cb9b51f2fc81 --- /dev/null +++ b/tools/test-reader/cmd/read_tests.go @@ -0,0 +1,63 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/reader" + "github.com/spf13/cobra" +) + +const readTestsDesc = "Run the missing test detector using the given services directory" + +type readTestsOptions struct { + rootOptions *rootOptions + testPrefix string +} + +func newReadTestsCmd(rootOptions *rootOptions) *cobra.Command { + o := &readTestsOptions{ + rootOptions: rootOptions, + } + cmd := &cobra.Command{ + Use: "read-tests SERVICES_DIR", + Short: readTestsDesc, + Long: readTestsDesc, + Args: cobra.ExactArgs(1), + RunE: func(c *cobra.Command, args []string) error { + return o.run(args) + }, + } + cmd.Flags().StringVar(&o.testPrefix, "test-prefix", "", "Only display results for matching tests") + return cmd +} + +func (o *readTestsOptions) run(args []string) error { + allTests, errs := reader.ReadAllTests(args[0]) + for path, err := range errs { + fmt.Printf("error reading path: %s, err: %v\n", path, err) + } + + total := 0 + for _, test := range allTests { + if !strings.HasPrefix(test.Name, o.testPrefix) { + continue + } + fmt.Printf("%s:\n", test.Name) + for index, step := range test.Steps { + fmt.Printf(" Step %d:\n", index) + for resourceType, resources := range step { + for _, resource := range resources { + fmt.Printf(" %s:\n", resourceType) + for field, value := range resource { + fmt.Printf(" %s: %v\n", field, value) + } + } + } + } + fmt.Println("") + total += 1 + } + fmt.Printf("Found %d tests\n", total) + return nil +} diff --git a/tools/test-reader/cmd/root.go b/tools/test-reader/cmd/root.go new file mode 100644 index 000000000000..4be6b45f3cf0 --- /dev/null +++ b/tools/test-reader/cmd/root.go @@ -0,0 +1,43 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" +) + +const rootCmdDesc = "Utilities for analyzing provider tests." + +type rootOptions struct { +} + +func newRootCmd() (*cobra.Command, *rootOptions, error) { + o := &rootOptions{} + cmd := &cobra.Command{ + Use: "test-reader", + Short: rootCmdDesc, + Long: rootCmdDesc, + SilenceUsage: true, + SilenceErrors: true, + } + cmd.AddCommand(newReadTestsCmd(o)) + return cmd, o, nil +} + +// Execute is the entry-point for all commands. +// This lets us keep all new command functions private. +func Execute() { + rootCmd, _, err := newRootCmd() + if err != nil { + fmt.Printf("Error creating root logger: %s", err) + os.Exit(1) + } + err = rootCmd.Execute() + if err == nil { + os.Exit(0) + } else { + fmt.Println(err.Error()) + os.Exit(1) + } +} diff --git a/tools/test-reader/go.mod b/tools/test-reader/go.mod new file mode 100644 index 000000000000..db575d4e5472 --- /dev/null +++ b/tools/test-reader/go.mod @@ -0,0 +1,23 @@ +module github.com/GoogleCloudPlatform/magic-modules/tools/test-reader + +go 1.21 + +require ( + github.com/hashicorp/hcl/v2 v2.20.1 + github.com/spf13/cobra v1.8.0 +) + +require ( + github.com/agext/levenshtein v1.2.1 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/zclconf/go-cty v1.13.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/tools v0.6.0 // indirect +) diff --git a/tools/test-reader/go.sum b/tools/test-reader/go.sum new file mode 100644 index 000000000000..0a25c71ee4fd --- /dev/null +++ b/tools/test-reader/go.sum @@ -0,0 +1,40 @@ +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/hashicorp/hcl/v2 v2.20.1 h1:M6hgdyz7HYt1UN9e61j+qKJBqR3orTWbI1HKBJEdxtc= +github.com/hashicorp/hcl/v2 v2.20.1/go.mod h1:TZDqQ4kNKCbh1iJp99FdPiUaVDDUPivbqxZulxDYqL4= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM= +github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/zclconf/go-cty v1.13.0 h1:It5dfKTTZHe9aeppbNOda3mN7Ag7sg6QkBNm6TkyFa0= +github.com/zclconf/go-cty v1.13.0/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b h1:FosyBZYxY34Wul7O/MSKey3txpPYyCqVO5ZyceuQJEI= +github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/tools/test-reader/main.go b/tools/test-reader/main.go new file mode 100644 index 000000000000..2a6ef450de9d --- /dev/null +++ b/tools/test-reader/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "github.com/GoogleCloudPlatform/magic-modules/tools/test-reader/cmd" +) + +func main() { + cmd.Execute() +} diff --git a/tools/diff-processor/reader/reader.go b/tools/test-reader/reader/reader.go similarity index 100% rename from tools/diff-processor/reader/reader.go rename to tools/test-reader/reader/reader.go diff --git a/tools/diff-processor/reader/reader_test.go b/tools/test-reader/reader/reader_test.go similarity index 100% rename from tools/diff-processor/reader/reader_test.go rename to tools/test-reader/reader/reader_test.go diff --git a/tools/diff-processor/reader/testdata/service/config_variable_test.go b/tools/test-reader/reader/testdata/service/config_variable_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/config_variable_test.go rename to tools/test-reader/reader/testdata/service/config_variable_test.go diff --git a/tools/diff-processor/reader/testdata/service/covered_resource_test.go b/tools/test-reader/reader/testdata/service/covered_resource_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/covered_resource_test.go rename to tools/test-reader/reader/testdata/service/covered_resource_test.go diff --git a/tools/diff-processor/reader/testdata/service/cross_file_1_test.go b/tools/test-reader/reader/testdata/service/cross_file_1_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/cross_file_1_test.go rename to tools/test-reader/reader/testdata/service/cross_file_1_test.go diff --git a/tools/diff-processor/reader/testdata/service/cross_file_2_test.go b/tools/test-reader/reader/testdata/service/cross_file_2_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/cross_file_2_test.go rename to tools/test-reader/reader/testdata/service/cross_file_2_test.go diff --git a/tools/diff-processor/reader/testdata/service/function_call_test.go b/tools/test-reader/reader/testdata/service/function_call_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/function_call_test.go rename to tools/test-reader/reader/testdata/service/function_call_test.go diff --git a/tools/diff-processor/reader/testdata/service/multiple_resource_test.go b/tools/test-reader/reader/testdata/service/multiple_resource_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/multiple_resource_test.go rename to tools/test-reader/reader/testdata/service/multiple_resource_test.go diff --git a/tools/diff-processor/reader/testdata/service/serial_resource_test.go b/tools/test-reader/reader/testdata/service/serial_resource_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/serial_resource_test.go rename to tools/test-reader/reader/testdata/service/serial_resource_test.go diff --git a/tools/diff-processor/reader/testdata/service/uncovered_resource_test.go b/tools/test-reader/reader/testdata/service/uncovered_resource_test.go similarity index 100% rename from tools/diff-processor/reader/testdata/service/uncovered_resource_test.go rename to tools/test-reader/reader/testdata/service/uncovered_resource_test.go From 808def1f5573d57add94260a6000315c3ce2c58d Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 17 Jun 2024 08:19:40 -0500 Subject: [PATCH 151/356] Update sql_user type (#10835) --- mmv1/third_party/terraform/services/sql/resource_sql_user.go | 5 ++--- .../terraform/website/docs/r/sql_user.html.markdown | 4 +++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_user.go b/mmv1/third_party/terraform/services/sql/resource_sql_user.go index 3a79b15f99d3..cc97ffad3ee1 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_user.go +++ b/mmv1/third_party/terraform/services/sql/resource_sql_user.go @@ -93,7 +93,7 @@ func ResourceSqlUser() *schema.Resource { Optional: true, Sensitive: true, Description: `The password for the user. Can be updated. For Postgres instances this is a Required field, unless type is set to - either CLOUD_IAM_USER or CLOUD_IAM_SERVICE_ACCOUNT.`, + either CLOUD_IAM_USER or CLOUD_IAM_SERVICE_ACCOUNT.`, }, "type": { @@ -102,8 +102,7 @@ func ResourceSqlUser() *schema.Resource { ForceNew: true, DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("BUILT_IN"), Description: `The user type. It determines the method to authenticate the user during login. - The default is the database's built-in user type. Flags include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_GROUP" or "CLOUD_IAM_SERVICE_ACCOUNT".`, - ValidateFunc: validation.StringInSlice([]string{"BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_GROUP", "CLOUD_IAM_SERVICE_ACCOUNT", ""}, false), + The default is the database's built-in user type.`, }, "sql_server_user_details": { Type: schema.TypeList, diff --git a/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown index 2f23a59718e7..9f2c3cde444a 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_user.html.markdown @@ -119,7 +119,9 @@ The following arguments are supported: * `type` - (Optional) The user type. It determines the method to authenticate the user during login. The default is the database's built-in user type. Flags - include "BUILT_IN", "CLOUD_IAM_USER", "CLOUD_IAM_GROUP" or "CLOUD_IAM_SERVICE_ACCOUNT". + include "BUILT_IN", "CLOUD_IAM_USER", and "CLOUD_IAM_SERVICE_ACCOUNT" for both + [Postgres](https://cloud.google.com/sql/docs/postgres/admin-api/rest/v1beta4/users#sqlusertype) and [MySQL](https://cloud.google.com/sql/docs/mysql/admin-api/rest/v1beta4/users#sqlusertype). + MySQL also includes "CLOUD_IAM_GROUP", "CLOUD_IAM_GROUP_USER" and "CLOUD_IAM_GROUP_SERVICE_ACCOUNT". * `deletion_policy` - (Optional) The deletion policy for the user. Setting `ABANDON` allows the resource to be abandoned rather than deleted. This is useful From bc527bd05d95340af8f53282d470651186279c97 Mon Sep 17 00:00:00 2001 From: wj-chen Date: Mon, 17 Jun 2024 06:26:41 -0700 Subject: [PATCH 152/356] Add resource tags to BigQuery Dataset in the Beta provider (#10971) --- mmv1/products/bigquery/Dataset.yaml | 22 +++ .../bigquery_dataset_resource_tags.tf.erb | 41 ++++++ ... => resource_bigquery_dataset_test.go.erb} | 131 ++++++++++++++++++ 3 files changed, 194 insertions(+) create mode 100644 mmv1/templates/terraform/examples/bigquery_dataset_resource_tags.tf.erb rename mmv1/third_party/terraform/services/bigquery/{resource_bigquery_dataset_test.go => resource_bigquery_dataset_test.go.erb} (85%) diff --git a/mmv1/products/bigquery/Dataset.yaml b/mmv1/products/bigquery/Dataset.yaml index 25e64c26e4ec..323e5f06d8b3 100644 --- a/mmv1/products/bigquery/Dataset.yaml +++ b/mmv1/products/bigquery/Dataset.yaml @@ -90,6 +90,19 @@ examples: skip_test: true vars: dataset_id: 'example_dataset' + - !ruby/object:Provider::Terraform::Examples + name: 'bigquery_dataset_resource_tags' + min_version: beta + primary_resource_id: 'dataset' + primary_resource_name: + 'fmt.Sprintf("tf_test_dataset%s", context["random_suffix"])' + skip_docs: true + vars: + dataset_id: 'dataset' + tag_key1: 'tag_key1' + tag_value1: 'tag_value1' + tag_key2: 'tag_key2' + tag_value2: 'tag_value2' virtual_fields: - !ruby/object:Api::Type::Boolean name: 'delete_contents_on_destroy' @@ -400,3 +413,12 @@ properties: LOGICAL is the default if this flag isn't specified. default_from_api: true + - !ruby/object:Api::Type::KeyValuePairs + name: 'resourceTags' + min_version: beta + description: | + The tags attached to this table. Tag keys are globally unique. Tag key is expected to be + in the namespaced format, for example "123456789012/environment" where 123456789012 is the + ID of the parent organization or project resource for this tag key. Tag value is expected + to be the short name, for example "Production". See [Tag definitions](/iam/docs/tags-access-control#definitions) + for more details. diff --git a/mmv1/templates/terraform/examples/bigquery_dataset_resource_tags.tf.erb b/mmv1/templates/terraform/examples/bigquery_dataset_resource_tags.tf.erb new file mode 100644 index 000000000000..8f177846e4e9 --- /dev/null +++ b/mmv1/templates/terraform/examples/bigquery_dataset_resource_tags.tf.erb @@ -0,0 +1,41 @@ +data "google_project" "project" { + provider = "google-beta" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = "google-beta" + parent = "projects/${data.google_project.project.number}" + short_name = "<%= ctx[:vars]['tag_key1'] %>" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = "google-beta" + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "<%= ctx[:vars]['tag_value1'] %>" +} + +resource "google_tags_tag_key" "tag_key2" { + provider = "google-beta" + parent = "projects/${data.google_project.project.number}" + short_name = "<%= ctx[:vars]['tag_key2'] %>" +} + +resource "google_tags_tag_value" "tag_value2" { + provider = "google-beta" + parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" + short_name = "<%= ctx[:vars]['tag_value2'] %>" +} + +resource "google_bigquery_dataset" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + + dataset_id = "<%= ctx[:vars]['dataset_id'] %>" + friendly_name = "test" + description = "This is a test description" + location = "EU" + + resource_tags = { + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key1.short_name}" = "${google_tags_tag_value.tag_value1.short_name}" + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key2.short_name}" = "${google_tags_tag_value.tag_value2.short_name}" + } +} diff --git a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_test.go b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_test.go.erb similarity index 85% rename from mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_test.go rename to mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_test.go.erb index eec2a3a37c56..b317cc27bb90 100644 --- a/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_test.go +++ b/mmv1/third_party/terraform/services/bigquery/resource_bigquery_dataset_test.go.erb @@ -1,3 +1,6 @@ +<% autogen_exception -%> +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 package bigquery_test import ( @@ -418,6 +421,42 @@ func TestAccBigQueryDataset_invalidLongID(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccBigQueryDataset_bigqueryDatasetResourceTags_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context), + }, + { + ResourceName: "google_bigquery_dataset.dataset", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context), + }, + { + ResourceName: "google_bigquery_dataset.dataset", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +<% end -%> func testAccAddTable(t *testing.T, datasetID string, tableID string) resource.TestCheckFunc { // Not actually a check, but adds a table independently of terraform return func(s *terraform.State) error { @@ -735,3 +774,95 @@ resource "google_bigquery_dataset" "test" { } `, datasetID) } +<% unless version == 'ga' -%> + +func testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = "google-beta" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" +} + +resource "google_tags_tag_key" "tag_key2" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key2%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value2" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" + short_name = "tf_test_tag_value2%{random_suffix}" +} + +resource "google_bigquery_dataset" "dataset" { + provider = google-beta + + dataset_id = "dataset%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + + resource_tags = { + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key1.short_name}" = "${google_tags_tag_value.tag_value1.short_name}" + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key2.short_name}" = "${google_tags_tag_value.tag_value2.short_name}" + } +} +`, context) +} + +func testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = "google-beta" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" +} + +resource "google_tags_tag_key" "tag_key2" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key2%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value2" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" + short_name = "tf_test_tag_value2%{random_suffix}" +} + +resource "google_bigquery_dataset" "dataset" { + provider = google-beta + + dataset_id = "dataset%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + + resource_tags = { + } +} +`, context) +} +<% end -%> From 4c0b1d9ce480bb5a8f56c0643da68e1415c147cf Mon Sep 17 00:00:00 2001 From: Mauricio Alvarez Leon <65101411+BBBmau@users.noreply.github.com> Date: Mon, 17 Jun 2024 08:36:02 -0700 Subject: [PATCH 153/356] `google_storage_bucket`: fix `custom_placement_config` values not normalized (#10936) --- .../storage/resource_storage_bucket.go.erb | 11 ++- .../resource_storage_bucket_test.go.erb | 88 +++++++++++++++++++ 2 files changed, 98 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb index ecab8783ddfc..0829abe8f772 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket.go.erb @@ -496,6 +496,9 @@ func ResourceStorageBucket() *schema.Resource { MinItems: 2, Elem: &schema.Schema{ Type: schema.TypeString, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, }, Description: `The list of individual regions that comprise a dual-region bucket. See the docs for a list of acceptable regions. Note: If any of the data_locations changes, it will recreate the bucket.`, }, @@ -1169,9 +1172,15 @@ func flattenBucketCustomPlacementConfig(cfc *storage.BucketCustomPlacementConfig func expandBucketDataLocations(configured interface{}) []string { l := configured.(*schema.Set).List() + // Since we only want uppercase values to prevent unnecessary diffs, we can do a comparison + // to determine whether or not to include the value as part of the request. + + // This extra check comes from the limitations of both DiffStateFunc and StateFunc towards types of Sets,Lists, and Maps. req := make([]string, 0, len(l)) for _, raw := range l { - req = append(req, raw.(string)) + if raw.(string) == strings.ToUpper(raw.(string)) { + req = append(req, raw.(string)) + } } return req } diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb index 5c0d66bed764..af3ef062a21c 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_test.go.erb @@ -249,6 +249,81 @@ func TestAccStorageBucket_dualLocation(t *testing.T) { }) } +func TestAccStorageBucket_dualLocation_lowercase(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation_lowercase(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_dualLocation_versionChange(t *testing.T) { + // Test is not parallel because ENVs are set. + // Need to skip VCR as this test downloads providers from the Terraform Registry + acctest.SkipIfVcr(t) + + creds := envvar.GetTestCredsFromEnv() + project := envvar.GetTestProjectFromEnv() + t.Setenv("GOOGLE_CREDENTIALS", creds) + t.Setenv("GOOGLE_PROJECT", project) + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation(bucketName), + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "5.30.0", + Source: "hashicorp/google", + }, + }, + }, + { + ResourceName: "google_storage_bucket.bucket", + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "5.30.0", + Source: "hashicorp/google", + }, + }, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_dualLocation(bucketName), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + }, + { + ResourceName: "google_storage_bucket.bucket", + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { t.Parallel() bucketName := acctest.TestBucketName(t) @@ -1714,6 +1789,19 @@ resource "google_storage_bucket" "bucket" { `, bucketName) } +func testAccStorageBucket_dualLocation_lowercase(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + custom_placement_config { + data_locations = ["asia-east1", "asia-southeast1"] + } +} +`, bucketName) +} + func testAccStorageBucket_dualLocation_rpo(bucketName string,rpo string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { From a54f94786cdf00542ad2c31bf2419255b8929e47 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 17 Jun 2024 09:19:07 -0700 Subject: [PATCH 154/356] Remove redundant `resource.TestStep` (#10970) --- ..._binary_authorization_attestor_test.go.erb | 2 +- .../resource_compute_autoscaler_test.go.erb | 28 ++--- ...source_compute_backend_service_test.go.erb | 98 ++++++++--------- .../compute/resource_compute_disk_test.go.erb | 20 ++-- ...source_compute_forwarding_rule_test.go.erb | 28 ++--- ...esource_compute_global_address_test.go.erb | 8 +- ...urce_compute_instance_group_manager.go.erb | 4 +- .../resource_compute_instance_test.go.erb | 20 ++-- ...urce_compute_region_autoscaler_test.go.erb | 16 +-- ...compute_region_backend_service_test.go.erb | 12 +- ...mpute_region_instance_template_test.go.erb | 2 +- .../services/container/node_config.go.erb | 36 +++--- .../resource_container_cluster.go.erb | 2 +- ...esource_container_cluster_migratev1.go.erb | 2 +- .../resource_container_cluster_test.go.erb | 18 +-- .../resource_container_node_pool.go.erb | 20 ++-- .../resource_container_node_pool_test.go.erb | 104 +++++++++--------- .../dns/resource_dns_managed_zone_test.go.erb | 32 +++--- .../dns/resource_dns_policy_test.go.erb | 8 +- ...ource_dns_response_policy_rule_test.go.erb | 8 +- .../resource_dns_response_policy_test.go.erb | 12 +- 21 files changed, 240 insertions(+), 240 deletions(-) diff --git a/mmv1/third_party/terraform/services/binaryauthorization/resource_binary_authorization_attestor_test.go.erb b/mmv1/third_party/terraform/services/binaryauthorization/resource_binary_authorization_attestor_test.go.erb index cf47a5373cb0..42bfb8659d84 100644 --- a/mmv1/third_party/terraform/services/binaryauthorization/resource_binary_authorization_attestor_test.go.erb +++ b/mmv1/third_party/terraform/services/binaryauthorization/resource_binary_authorization_attestor_test.go.erb @@ -127,7 +127,7 @@ func TestAccBinaryAuthorizationAttestor_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckBinaryAuthorizationAttestorDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccBinaryAuthorizationAttestorBasic(name), }, { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_autoscaler_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_autoscaler_test.go.erb index fa3e0215c841..477521c6baed 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_autoscaler_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_autoscaler_test.go.erb @@ -22,18 +22,18 @@ func TestAccComputeAutoscaler_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeAutoscaler_basic(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeAutoscaler_update(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -56,10 +56,10 @@ func TestAccComputeAutoscaler_multicondition(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeAutoscaler_multicondition(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -81,10 +81,10 @@ func TestAccComputeAutoscaler_scaleDownControl(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeAutoscaler_scaleDownControl(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -106,10 +106,10 @@ func TestAccComputeAutoscaler_scalingSchedule(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeAutoscaler_scalingSchedule(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -131,10 +131,10 @@ func TestAccComputeAutoscaler_scaleInControl(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeAutoscaler_scaleInControl(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -156,10 +156,10 @@ func TestAccComputeAutoscaler_scaleInControlFixed(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeAutoscaler_scaleInControlFixed(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_autoscaler.foobar", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_backend_service_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_backend_service_test.go.erb index c27ee1637e96..8e1ecf66cb8b 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_backend_service_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_backend_service_test.go.erb @@ -21,19 +21,19 @@ func TestAccComputeBackendService_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_basic(serviceName, checkName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_basicModified( serviceName, checkName, extraCheckName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -54,20 +54,20 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withBackend( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withBackend( serviceName, igName, itName, checkName, 20), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, @@ -86,26 +86,26 @@ func TestAccComputeBackendService_withBackendAndMaxUtilization(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withBackend( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withBackendAndMaxUtilization( serviceName, igName, itName, checkName, 10), PlanOnly: true, ExpectNonEmptyPlan: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withBackendAndMaxUtilization( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, @@ -124,21 +124,21 @@ func TestAccComputeBackendService_withBackendAndIAP(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withBackendAndIAP( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withBackend( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, @@ -158,20 +158,20 @@ func TestAccComputeBackendService_updatePreservesOptionalParameters(t *testing.T ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withSessionAffinity( serviceName, checkName, "initial-description", "GENERATED_COOKIE"), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withSessionAffinity( serviceName, checkName, "updated-description", "GENERATED_COOKIE"), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -191,10 +191,10 @@ func TestAccComputeBackendService_withConnectionDraining(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withConnectionDraining(serviceName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -214,18 +214,18 @@ func TestAccComputeBackendService_withConnectionDrainingAndUpdate(t *testing.T) ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withConnectionDraining(serviceName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_basic(serviceName, checkName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -245,10 +245,10 @@ func TestAccComputeBackendService_withHttpsHealthCheck(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withHttpsHealthCheck(serviceName, checkName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -356,11 +356,11 @@ func TestAccComputeBackendService_withCDNEnabled(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withCDNEnabled( serviceName, checkName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -380,20 +380,20 @@ func TestAccComputeBackendService_withSessionAffinity(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withSessionAffinity( serviceName, checkName, "description", "CLIENT_IP"), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withSessionAffinity( serviceName, checkName, "description", "GENERATED_COOKIE"), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -413,11 +413,11 @@ func TestAccComputeBackendService_withAffinityCookieTtlSec(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withAffinityCookieTtlSec( serviceName, checkName, "description", "GENERATED_COOKIE", 300), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, @@ -439,20 +439,20 @@ func TestAccComputeBackendService_withMaxConnections(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withMaxConnections( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withMaxConnections( serviceName, igName, itName, checkName, 20), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, @@ -474,20 +474,20 @@ func TestAccComputeBackendService_withMaxConnectionsPerInstance(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withMaxConnectionsPerInstance( serviceName, igName, itName, checkName, 10), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_withMaxConnectionsPerInstance( serviceName, igName, itName, checkName, 20), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.lipsum", ImportState: true, ImportStateVerify: true, @@ -581,18 +581,18 @@ func TestAccComputeBackendService_withCustomHeaders(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeBackendService_withCustomHeaders(serviceName, checkName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeBackendService_basic(serviceName, checkName), }, - resource.TestStep{ + { ResourceName: "google_compute_backend_service.foobar", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb index 67e3f01f2730..37180c71ddc4 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb @@ -537,7 +537,7 @@ func TestAccComputeDisk_fromSnapshot(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeDisk_fromSnapshot(projectName, firstDiskName, snapshotName, diskName, "self_link"), }, { @@ -545,7 +545,7 @@ func TestAccComputeDisk_fromSnapshot(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeDisk_fromSnapshot(projectName, firstDiskName, snapshotName, diskName, "name"), }, { @@ -568,7 +568,7 @@ func TestAccComputeDisk_encryption(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeDisk_encryption(diskName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeDiskExists( @@ -668,7 +668,7 @@ func TestAccComputeDisk_deleteDetach(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeDisk_deleteDetach(instanceName, diskName), }, { @@ -680,7 +680,7 @@ func TestAccComputeDisk_deleteDetach(t *testing.T) { // listed as attached to the disk; the instance is created after the // disk. and the disk's properties aren't refreshed unless there's // another step - resource.TestStep{ + { Config: testAccComputeDisk_deleteDetach(instanceName, diskName), }, { @@ -706,7 +706,7 @@ func TestAccComputeDisk_deleteDetachIGM(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeDisk_deleteDetachIGM(diskName, mgrName), }, { @@ -718,7 +718,7 @@ func TestAccComputeDisk_deleteDetachIGM(t *testing.T) { // listed as attached to the disk; the instance is created after the // disk. and the disk's properties aren't refreshed unless there's // another step - resource.TestStep{ + { Config: testAccComputeDisk_deleteDetachIGM(diskName, mgrName), }, { @@ -727,7 +727,7 @@ func TestAccComputeDisk_deleteDetachIGM(t *testing.T) { ImportStateVerify: true, }, // Change the disk name to recreate the instances - resource.TestStep{ + { Config: testAccComputeDisk_deleteDetachIGM(diskName2, mgrName), }, { @@ -736,7 +736,7 @@ func TestAccComputeDisk_deleteDetachIGM(t *testing.T) { ImportStateVerify: true, }, // Add the extra step like before - resource.TestStep{ + { Config: testAccComputeDisk_deleteDetachIGM(diskName2, mgrName), }, { @@ -1308,7 +1308,7 @@ func TestAccComputeDisk_encryptionWithRSAEncryptedKey(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeDisk_encryptionWithRSAEncryptedKey(diskName), Check: resource.ComposeTestCheckFunc( testAccCheckComputeDiskExists( diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb index d31b00d97263..d39c8291820d 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb @@ -20,19 +20,19 @@ func TestAccComputeForwardingRule_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeForwardingRule_basic(poolName, ruleName), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.foobar", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, }, - resource.TestStep{ + { Config: testAccComputeForwardingRule_update(poolName, ruleName), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.foobar", ImportState: true, ImportStateVerify: true, @@ -56,19 +56,19 @@ func TestAccComputeForwardingRule_ip(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName, addressRefFieldID), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.foobar", ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{"ip_address"}, // ignore ip_address because we've specified it by ID }, - resource.TestStep{ + { Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName, addressRefFieldRaw), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.foobar", ImportState: true, ImportStateVerify: true, @@ -124,11 +124,11 @@ func TestAccComputeForwardingRule_networkTier(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeForwardingRule_networkTier(poolName, ruleName), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.foobar", ImportState: true, ImportStateVerify: true, @@ -151,11 +151,11 @@ func TestAccComputeForwardingRule_serviceDirectoryRegistrations(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeForwardingRule_serviceDirectoryRegistrations(poolName, ruleName, svcDirNamespace, serviceName), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.foobar", ImportState: true, ImportStateVerify: true, @@ -177,10 +177,10 @@ func TestAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(t *testing.T ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeForwardingRule_forwardingRuleVpcPscExample(context), }, - resource.TestStep{ + { ResourceName: "google_compute_forwarding_rule.default", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_global_address_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_global_address_test.go.erb index 5f15ed97ec42..a1aae92ed456 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_global_address_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_global_address_test.go.erb @@ -17,10 +17,10 @@ func TestAccComputeGlobalAddress_ipv6(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeGlobalAddressDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeGlobalAddress_ipv6(acctest.RandString(t, 10)), }, - resource.TestStep{ + { ResourceName: "google_compute_global_address.foobar", ImportState: true, ImportStateVerify: true, @@ -37,10 +37,10 @@ func TestAccComputeGlobalAddress_internal(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeGlobalAddressDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeGlobalAddress_internal(acctest.RandString(t, 10), acctest.RandString(t, 10)), }, - resource.TestStep{ + { ResourceName: "google_compute_global_address.foobar", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb index e549fd33182b..25e44caccacc 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_group_manager.go.erb @@ -1529,12 +1529,12 @@ func expandAllInstancesConfig(old []interface{}, new []interface{}) *compute.Ins for _, raw := range old { if raw != nil { data := raw.(map[string]interface{}) - for k, _ := range data["metadata"].(map[string]interface{}) { + for k := range data["metadata"].(map[string]interface{}) { if _, exist := properties.Metadata[k]; !exist { properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) } } - for k, _ := range data["labels"].(map[string]interface{}) { + for k := range data["labels"].(map[string]interface{}) { if _, exist := properties.Labels[k]; !exist { properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 586fd0dd7739..e2eb240b706a 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -906,17 +906,17 @@ func TestAccComputeInstance_with375GbScratchDisk(t *testing.T) { testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceScratchDisk(&instance, []map[string]string{ - map[string]string{ + { "interface": "NVME", }, - map[string]string{ + { "interface": "SCSI", }, - map[string]string{ + { "interface": "NVME", "deviceName": "nvme-local-ssd", }, - map[string]string{ + { "interface": "SCSI", "deviceName": "scsi-local-ssd", }, @@ -948,22 +948,22 @@ func TestAccComputeInstance_with18TbScratchDisk(t *testing.T) { testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceScratchDisk(&instance, []map[string]string{ - map[string]string{ + { "interface": "NVME", }, - map[string]string{ + { "interface": "NVME", }, - map[string]string{ + { "interface": "NVME", }, - map[string]string{ + { "interface": "NVME", }, - map[string]string{ + { "interface": "NVME", }, - map[string]string{ + { "interface": "NVME", }, }), diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_autoscaler_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_autoscaler_test.go.erb index 1d6f65491513..842f6792ebe4 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_autoscaler_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_autoscaler_test.go.erb @@ -20,7 +20,7 @@ func TestAccComputeRegionAutoscaler_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionAutoscaler_basic(itName, tpName, igmName, autoscalerName), }, { @@ -28,7 +28,7 @@ func TestAccComputeRegionAutoscaler_update(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeRegionAutoscaler_update(itName, tpName, igmName, autoscalerName), }, { @@ -53,10 +53,10 @@ func TestAccComputeRegionAutoscaler_scaleDownControl(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionAutoscaler_scaleDownControl(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_region_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -78,10 +78,10 @@ func TestAccComputeRegionAutoscaler_scalingSchedule(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionAutoscaler_scalingSchedule(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_region_autoscaler.foobar", ImportState: true, ImportStateVerify: true, @@ -103,10 +103,10 @@ func TestAccComputeRegionAutoscaler_scaleInControl(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionAutoscaler_scaleInControl(itName, tpName, igmName, autoscalerName), }, - resource.TestStep{ + { ResourceName: "google_compute_region_autoscaler.foobar", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_backend_service_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_backend_service_test.go.erb index 69876a12839d..029a3dd22ca7 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_backend_service_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_backend_service_test.go.erb @@ -22,7 +22,7 @@ func TestAccComputeRegionBackendService_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionBackendService_basic(serviceName, checkName), }, { @@ -30,7 +30,7 @@ func TestAccComputeRegionBackendService_basic(t *testing.T) { ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeRegionBackendService_basicModified( serviceName, checkName, extraCheckName), }, @@ -54,7 +54,7 @@ func TestAccComputeRegionBackendService_ilbBasic_withUnspecifiedProtocol(t *test ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionBackendService_ilbBasic_withUnspecifiedProtocol(serviceName, checkName), }, { @@ -142,7 +142,7 @@ func TestAccComputeRegionBackendService_withBackendMultiNic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionBackendService_withBackendMultiNic( serviceName, net1Name, net2Name, igName, itName, checkName, 10), }, @@ -166,7 +166,7 @@ func TestAccComputeRegionBackendService_withConnectionDrainingAndUpdate(t *testi ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccComputeRegionBackendService_withConnectionDraining(serviceName, checkName, 10), }, { @@ -174,7 +174,7 @@ func TestAccComputeRegionBackendService_withConnectionDrainingAndUpdate(t *testi ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccComputeRegionBackendService_basic(serviceName, checkName), }, { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index 61fa831bd9bd..bbba450d3d5a 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -3405,7 +3405,7 @@ resource "google_compute_region_instance_template" "foobar" { automatic_restart = false provisioning_model = "SPOT" instance_termination_action = "DELETE" - <% unless version == 'ga' -%> +<% unless version == 'ga' -%> max_run_duration { nanos = 123 seconds = 60 diff --git a/mmv1/third_party/terraform/services/container/node_config.go.erb b/mmv1/third_party/terraform/services/container/node_config.go.erb index 8dc9247c46ac..73bb45d887b4 100644 --- a/mmv1/third_party/terraform/services/container/node_config.go.erb +++ b/mmv1/third_party/terraform/services/container/node_config.go.erb @@ -35,35 +35,35 @@ func schemaContainerdConfig() *schema.Schema { Description: "Parameters for containerd configuration.", MaxItems: 1, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "private_registry_access_config": &schema.Schema{ + "private_registry_access_config": { Type: schema.TypeList, Optional: true, Description: "Parameters for private container registries configuration.", MaxItems: 1, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "enabled": &schema.Schema{ + "enabled": { Type: schema.TypeBool, Required: true, Description: "Whether or not private registries are configured.", }, - "certificate_authority_domain_config": &schema.Schema{ + "certificate_authority_domain_config": { Type: schema.TypeList, Optional: true, Description: "Parameters for configuring CA certificate and domains.", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "fqdns": &schema.Schema{ + "fqdns": { Type: schema.TypeList, Required: true, Description: "List of fully-qualified-domain-names. IPv4s and port specification are supported.", Elem: &schema.Schema{Type: schema.TypeString}, }, - "gcp_secret_manager_certificate_config": &schema.Schema{ + "gcp_secret_manager_certificate_config": { Type: schema.TypeList, Required: true, Description: "Parameters for configuring a certificate hosted in GCP SecretManager.", MaxItems: 1, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ - "secret_uri": &schema.Schema{ + "secret_uri": { Type: schema.TypeString, Required: true, Description: "URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.", @@ -134,7 +134,7 @@ func schemaNodeConfig() *schema.Schema { Description: `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd`, }, - "guest_accelerator": &schema.Schema{ + "guest_accelerator": { Type: schema.TypeList, Optional: true, Computed: true, @@ -145,20 +145,20 @@ func schemaNodeConfig() *schema.Schema { Description: `List of the type and count of accelerator cards attached to the instance.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "count": &schema.Schema{ + "count": { Type: schema.TypeInt, Required: true, ForceNew: true, Description: `The number of the accelerator cards exposed to an instance.`, }, - "type": &schema.Schema{ + "type": { Type: schema.TypeString, Required: true, ForceNew: true, DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, Description: `The accelerator type resource name.`, }, - "gpu_driver_installation_config": &schema.Schema{ + "gpu_driver_installation_config": { Type: schema.TypeList, MaxItems: 1, Optional: true, @@ -167,7 +167,7 @@ func schemaNodeConfig() *schema.Schema { Description: `Configuration for auto installation of GPU driver.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "gpu_driver_version": &schema.Schema{ + "gpu_driver_version": { Type: schema.TypeString, Required: true, ForceNew: true, @@ -177,13 +177,13 @@ func schemaNodeConfig() *schema.Schema { }, }, }, - "gpu_partition_size": &schema.Schema{ + "gpu_partition_size": { Type: schema.TypeString, Optional: true, ForceNew: true, Description: `Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)`, }, - "gpu_sharing_config": &schema.Schema{ + "gpu_sharing_config": { Type: schema.TypeList, MaxItems: 1, Optional: true, @@ -192,13 +192,13 @@ func schemaNodeConfig() *schema.Schema { Description: `Configuration for GPU sharing.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "gpu_sharing_strategy": &schema.Schema{ + "gpu_sharing_strategy": { Type: schema.TypeString, Required: true, ForceNew: true, Description: `The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)`, }, - "max_shared_clients_per_gpu": &schema.Schema{ + "max_shared_clients_per_gpu": { Type: schema.TypeInt, Required: true, ForceNew: true, @@ -322,7 +322,7 @@ func schemaNodeConfig() *schema.Schema { ForceNew: true, Description: `Disk image to create the secondary boot disk from`, }, - "mode": &schema.Schema{ + "mode": { Type: schema.TypeString, Optional: true, ForceNew: true, @@ -455,7 +455,7 @@ func schemaNodeConfig() *schema.Schema { Description: `The list of instance tags applied to all nodes.`, }, - "shielded_instance_config": &schema.Schema{ + "shielded_instance_config": { Type: schema.TypeList, Optional: true, Computed: true, @@ -641,7 +641,7 @@ func schemaNodeConfig() *schema.Schema { Description: `Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.`, }, - "advanced_machine_features": &schema.Schema{ + "advanced_machine_features": { Type: schema.TypeList, Optional: true, MaxItems: 1, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index c68a9d196f63..c128e2604441 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -629,7 +629,7 @@ func ResourceContainerCluster() *schema.Resource { ForceNew: true, Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, }, - "shielded_instance_config": &schema.Schema{ + "shielded_instance_config": { Type: schema.TypeList, Optional: true, Description: `Shielded Instance options.`, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb index f1e76dc1b3cd..5e73abe2175f 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb @@ -398,7 +398,7 @@ func resourceContainerClusterResourceV1() *schema.Resource { ForceNew: true, Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, }, - "shielded_instance_config": &schema.Schema{ + "shielded_instance_config": { Type: schema.TypeList, Optional: true, Description: `Shielded Instance options.`, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index d405dc7daf90..5a917f5cf280 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -2059,7 +2059,7 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerCluster_withNodePoolAutoscaling(clusterName, npName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"), @@ -2072,7 +2072,7 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"deletion_protection"}, }, - resource.TestStep{ + { Config: testAccContainerCluster_withNodePoolUpdateAutoscaling(clusterName, npName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"), @@ -2085,7 +2085,7 @@ func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"deletion_protection"}, }, - resource.TestStep{ + { Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"), @@ -2115,7 +2115,7 @@ func TestAccContainerCluster_withNodePoolCIA(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerRegionalCluster_withNodePoolCIA(clusterName, npName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "0"), @@ -2131,7 +2131,7 @@ func TestAccContainerCluster_withNodePoolCIA(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, - resource.TestStep{ + { Config: testAccContainerRegionalClusterUpdate_withNodePoolCIA(clusterName, npName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "0"), @@ -2147,7 +2147,7 @@ func TestAccContainerCluster_withNodePoolCIA(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, }, - resource.TestStep{ + { Config: testAccContainerRegionalCluster_withNodePoolBasic(clusterName, npName, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"), @@ -10768,7 +10768,7 @@ func TestAccContainerCluster_privateRegistry(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerCluster_privateRegistryEnabled(secretID, clusterName, networkName, subnetworkName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( @@ -10816,7 +10816,7 @@ func TestAccContainerCluster_privateRegistry(t *testing.T) { ), ), }, - resource.TestStep{ + { Config: testAccContainerCluster_withNodePoolPrivateRegistry(secretID, clusterName, nodePoolName, networkName, subnetworkName), }, { @@ -10825,7 +10825,7 @@ func TestAccContainerCluster_privateRegistry(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"deletion_protection"}, }, - resource.TestStep{ + { Config: testAccContainerCluster_withNodeConfigPrivateRegistry(secretID, clusterName, networkName, subnetworkName), }, { diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb index 370354cdbfce..b26abb2d57a2 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool.go.erb @@ -134,42 +134,42 @@ var schemaBlueGreenSettings = &schema.Schema{ } var schemaNodePool = map[string]*schema.Schema{ - "autoscaling": &schema.Schema{ + "autoscaling": { Type: schema.TypeList, Optional: true, MaxItems: 1, Description: `Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "min_node_count": &schema.Schema{ + "min_node_count": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(0), Description: `Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.`, }, - "max_node_count": &schema.Schema{ + "max_node_count": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(0), Description: `Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.`, }, - "total_min_node_count": &schema.Schema{ + "total_min_node_count": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(0), Description: `Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.`, }, - "total_max_node_count": &schema.Schema{ + "total_max_node_count": { Type: schema.TypeInt, Optional: true, ValidateFunc: validation.IntAtLeast(0), Description: `Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.`, }, - "location_policy": &schema.Schema{ + "location_policy": { Type: schema.TypeString, Optional: true, Computed: true, @@ -226,7 +226,7 @@ var schemaNodePool = map[string]*schema.Schema{ }, }, - "max_pods_per_node": &schema.Schema{ + "max_pods_per_node": { Type: schema.TypeInt, Optional: true, ForceNew: true, @@ -279,7 +279,7 @@ var schemaNodePool = map[string]*schema.Schema{ }, }, - "initial_node_count": &schema.Schema{ + "initial_node_count": { Type: schema.TypeInt, Optional: true, ForceNew: true, @@ -326,7 +326,7 @@ var schemaNodePool = map[string]*schema.Schema{ }, }, - "name": &schema.Schema{ + "name": { Type: schema.TypeString, Optional: true, Computed: true, @@ -334,7 +334,7 @@ var schemaNodePool = map[string]*schema.Schema{ Description: `The name of the node pool. If left blank, Terraform will auto-generate a unique name.`, }, - "name_prefix": &schema.Schema{ + "name_prefix": { Type: schema.TypeString, Optional: true, Computed: true, diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index b89e0bae993d..c6b941511c48 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -25,10 +25,10 @@ func TestAccContainerNodePool_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -180,10 +180,10 @@ func TestAccContainerNodePool_namePrefix(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_namePrefix(cluster, "tf-np-", networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -207,10 +207,10 @@ func TestAccContainerNodePool_noName(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_noName(cluster, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -273,10 +273,10 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_withNodeConfig(cluster, nodePool, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np_with_node_config", ImportState: true, ImportStateVerify: true, @@ -284,10 +284,10 @@ func TestAccContainerNodePool_withNodeConfig(t *testing.T) { // but will still cause an import diff ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, }, - resource.TestStep{ + { Config: testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np_with_node_config", ImportState: true, ImportStateVerify: true, @@ -312,18 +312,18 @@ func TestAccContainerNodePool_withTaintsUpdate(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_withTaintsUpdate(cluster, nodePool, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -926,10 +926,10 @@ func TestAccContainerNodePool_withGPU(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_withGPU(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np_with_gpu", ImportState: true, ImportStateVerify: true, @@ -957,7 +957,7 @@ func TestAccContainerNodePool_withManagement(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_withManagement(cluster, nodePool, "", networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( @@ -968,12 +968,12 @@ func TestAccContainerNodePool_withManagement(t *testing.T) { "google_container_node_pool.np_with_management", "management.0.auto_upgrade", "true"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np_with_management", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_withManagement(cluster, nodePool, management, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr( @@ -984,7 +984,7 @@ func TestAccContainerNodePool_withManagement(t *testing.T) { "google_container_node_pool.np_with_management", "management.0.auto_upgrade", "false"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np_with_management", ImportState: true, ImportStateVerify: true, @@ -1006,10 +1006,10 @@ func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np_with_node_config_scope_alias", ImportState: true, ImportStateVerify: true, @@ -1032,38 +1032,38 @@ func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_regionalAutoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -1089,7 +1089,7 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_totalSize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "4"), @@ -1097,12 +1097,12 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.location_policy", "BALANCED"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_updateTotalSize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "2"), @@ -1110,19 +1110,19 @@ func TestAccContainerNodePool_totalSize(t *testing.T) { resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.location_policy", "ANY"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_basicTotalSize(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -1147,38 +1147,38 @@ func TestAccContainerNodePool_autoscaling(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_autoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"), resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), Check: resource.ComposeTestCheckFunc( resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), ), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -1283,10 +1283,10 @@ func TestAccContainerNodePool_regionalClusters(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_regionalClusters(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -1388,10 +1388,10 @@ func TestAccContainerNodePool_shieldedInstanceConfig(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_shieldedInstanceConfig(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -1486,10 +1486,10 @@ func TestAccContainerNodePool_ephemeralStorageConfig(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_ephemeralStorageConfig(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -1657,15 +1657,15 @@ func TestAccContainerNodePool_secondaryBootDisks(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_secondaryBootDisks(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np-no-mode", ImportState: true, ImportStateVerify: true, @@ -1738,10 +1738,10 @@ func TestAccContainerNodePool_gcfsConfig(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName), }, - resource.TestStep{ + { ResourceName: "google_container_node_pool.np", ImportState: true, ImportStateVerify: true, @@ -4772,7 +4772,7 @@ func TestAccContainerNodePool_privateRegistry(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccContainerNodePool_privateRegistryEnabled(secretID, cluster, nodepool, networkName, subnetworkName), Check: resource.ComposeAggregateTestCheckFunc( resource.TestCheckResourceAttr( diff --git a/mmv1/third_party/terraform/services/dns/resource_dns_managed_zone_test.go.erb b/mmv1/third_party/terraform/services/dns/resource_dns_managed_zone_test.go.erb index fbbd8ad3bbcf..9bf41938da95 100644 --- a/mmv1/third_party/terraform/services/dns/resource_dns_managed_zone_test.go.erb +++ b/mmv1/third_party/terraform/services/dns/resource_dns_managed_zone_test.go.erb @@ -57,18 +57,18 @@ func TestAccDNSManagedZone_privateUpdate(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsManagedZone_privateUpdate(zoneSuffix, "network-1", "network-2"), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.private", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsManagedZone_privateUpdate(zoneSuffix, "network-2", "network-3"), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.private", ImportState: true, ImportStateVerify: true, @@ -87,18 +87,18 @@ func TestAccDNSManagedZone_dnssec_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsManagedZone_dnssec_on(zoneSuffix), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.foobar", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsManagedZone_dnssec_off(zoneSuffix), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.foobar", ImportState: true, ImportStateVerify: true, @@ -117,10 +117,10 @@ func TestAccDNSManagedZone_dnssec_empty(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsManagedZone_dnssec_empty(zoneSuffix), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.foobar", ImportState: true, ImportStateVerify: true, @@ -139,18 +139,18 @@ func TestAccDNSManagedZone_privateForwardingUpdate(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsManagedZone_privateForwardingUpdate(zoneSuffix, "172.16.1.10", "172.16.1.20", "default", "private"), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.private", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsManagedZone_privateForwardingUpdate(zoneSuffix, "172.16.1.10", "192.168.1.1", "private", "default"), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.private", ImportState: true, ImportStateVerify: true, @@ -211,10 +211,10 @@ func TestAccDNSManagedZone_reverseLookup(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsManagedZone_reverseLookup(zoneSuffix), }, - resource.TestStep{ + { ResourceName: "google_dns_managed_zone.reverse", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/dns/resource_dns_policy_test.go.erb b/mmv1/third_party/terraform/services/dns/resource_dns_policy_test.go.erb index 5ccb7820e919..fe4f51451333 100644 --- a/mmv1/third_party/terraform/services/dns/resource_dns_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/dns/resource_dns_policy_test.go.erb @@ -19,18 +19,18 @@ func TestAccDNSPolicy_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckDNSPolicyDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsPolicy_privateUpdate(policySuffix, "true", "172.16.1.10", "172.16.1.30", "network-1"), }, - resource.TestStep{ + { ResourceName: "google_dns_policy.example-policy", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsPolicy_privateUpdate(policySuffix, "false", "172.16.1.20", "172.16.1.40", "network-2"), }, - resource.TestStep{ + { ResourceName: "google_dns_policy.example-policy", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_rule_test.go.erb b/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_rule_test.go.erb index f87e8da12235..be2387214533 100644 --- a/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_rule_test.go.erb +++ b/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_rule_test.go.erb @@ -20,18 +20,18 @@ func TestAccDNSResponsePolicyRule_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), CheckDestroy: testAccCheckDNSResponsePolicyRuleDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsResponsePolicyRule_privateUpdate(responsePolicyRuleSuffix, "network-1"), }, - resource.TestStep{ + { ResourceName: "google_dns_response_policy_rule.example-response-policy-rule", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsResponsePolicyRule_privateUpdate(responsePolicyRuleSuffix, "network-2"), }, - resource.TestStep{ + { ResourceName: "google_dns_response_policy_rule.example-response-policy-rule", ImportState: true, ImportStateVerify: true, diff --git a/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_test.go.erb b/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_test.go.erb index 5029770f1cef..8d89b291831a 100644 --- a/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/dns/resource_dns_response_policy_test.go.erb @@ -20,26 +20,26 @@ func TestAccDNSResponsePolicy_update(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), CheckDestroy: testAccCheckDNSResponsePolicyDestroyProducer(t), Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccDnsResponsePolicy_privateUpdate(responsePolicySuffix, "network-1"), }, - resource.TestStep{ + { ResourceName: "google_dns_response_policy.example-response-policy", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsResponsePolicy_privateUpdate(responsePolicySuffix, "network-2"), }, - resource.TestStep{ + { ResourceName: "google_dns_response_policy.example-response-policy", ImportState: true, ImportStateVerify: true, }, - resource.TestStep{ + { Config: testAccDnsResponsePolicy_removeNetworks(responsePolicySuffix), }, - resource.TestStep{ + { ResourceName: "google_dns_response_policy.example-response-policy", ImportState: true, ImportStateVerify: true, From 818d8b7a2cfc71c9fc8f3db2de3b991bdd0b2247 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Mon, 17 Jun 2024 09:59:23 -0700 Subject: [PATCH 155/356] Increase timeouts on `google_sql_database_instance` to 90m (#10960) --- .../services/sql/resource_sql_database_instance.go.erb | 6 +++--- .../website/docs/r/sql_database_instance.html.markdown | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb index 61772ba8567c..84de993c8d7e 100644 --- a/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb +++ b/mmv1/third_party/terraform/services/sql/resource_sql_database_instance.go.erb @@ -129,9 +129,9 @@ func ResourceSqlDatabaseInstance() *schema.Resource { }, Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(30 * time.Minute), - Delete: schema.DefaultTimeout(30 * time.Minute), + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), }, CustomizeDiff: customdiff.All( diff --git a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown index 806bcd2bdc02..3fbc06d8fc57 100644 --- a/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/sql_database_instance.html.markdown @@ -578,9 +578,9 @@ performing filtering in a Terraform config. `google_sql_database_instance` provides the following [Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: -- `create` - Default is 40 minutes. -- `update` - Default is 30 minutes. -- `delete` - Default is 30 minutes. +- `create` - Default is 90 minutes. +- `update` - Default is 90 minutes. +- `delete` - Default is 90 minutes. ## Import From 82c34caf69f1ddd049c3f4d947dea3449998b68e Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 17 Jun 2024 12:29:01 -0500 Subject: [PATCH 156/356] go rewrite - refresh templates and add compute go_yaml (#10976) --- mmv1/products/compute/go_Address.yaml | 237 ++ mmv1/products/compute/go_Autoscaler.yaml | 423 +++ mmv1/products/compute/go_BackendBucket.yaml | 281 ++ .../compute/go_BackendBucketSignedUrlKey.yaml | 98 + mmv1/products/compute/go_BackendService.yaml | 1319 +++++++++ .../go_BackendServiceSignedUrlKey.yaml | 98 + mmv1/products/compute/go_Disk.yaml | 504 ++++ .../go_DiskResourcePolicyAttachment.yaml | 88 + mmv1/products/compute/go_DiskType.yaml | 114 + .../compute/go_ExternalVpnGateway.yaml | 123 + mmv1/products/compute/go_Firewall.yaml | 331 +++ mmv1/products/compute/go_ForwardingRule.yaml | 652 +++++ mmv1/products/compute/go_GlobalAddress.yaml | 155 + .../compute/go_GlobalForwardingRule.yaml | 524 ++++ .../compute/go_GlobalNetworkEndpoint.yaml | 105 + .../go_GlobalNetworkEndpointGroup.yaml | 93 + mmv1/products/compute/go_HaVpnGateway.yaml | 162 ++ mmv1/products/compute/go_HealthCheck.yaml | 848 ++++++ mmv1/products/compute/go_HttpHealthCheck.yaml | 124 + .../products/compute/go_HttpsHealthCheck.yaml | 124 + mmv1/products/compute/go_Image.yaml | 275 ++ mmv1/products/compute/go_Instance.yaml | 659 +++++ mmv1/products/compute/go_InstanceGroup.yaml | 121 + .../compute/go_InstanceGroupManager.yaml | 219 ++ .../compute/go_InstanceGroupMembership.yaml | 107 + .../compute/go_InstanceGroupNamedPort.yaml | 105 + .../products/compute/go_InstanceSettings.yaml | 80 + mmv1/products/compute/go_Interconnect.yaml | 403 +++ .../compute/go_InterconnectAttachment.yaml | 340 +++ mmv1/products/compute/go_License.yaml | 47 + mmv1/products/compute/go_MachineImage.yaml | 140 + mmv1/products/compute/go_MachineType.yaml | 131 + .../compute/go_ManagedSslCertificate.yaml | 147 + mmv1/products/compute/go_Network.yaml | 181 ++ .../compute/go_NetworkAttachment.yaml | 206 ++ .../go_NetworkEdgeSecurityService.yaml | 120 + mmv1/products/compute/go_NetworkEndpoint.yaml | 127 + .../compute/go_NetworkEndpointGroup.yaml | 148 + .../products/compute/go_NetworkEndpoints.yaml | 138 + .../compute/go_NetworkFirewallPolicy.yaml | 81 + .../go_NetworkPeeringRoutesConfig.yaml | 109 + mmv1/products/compute/go_NodeGroup.yaml | 213 ++ mmv1/products/compute/go_NodeTemplate.yaml | 155 + .../go_OrganizationSecurityPolicy.yaml | 92 + ...OrganizationSecurityPolicyAssociation.yaml | 75 + .../go_OrganizationSecurityPolicyRule.yaml | 201 ++ mmv1/products/compute/go_PacketMirroring.yaml | 202 ++ .../compute/go_PerInstanceConfig.yaml | 242 ++ .../compute/go_ProjectCloudArmorTier.yaml | 73 + .../compute/go_PublicAdvertisedPrefix.yaml | 77 + .../compute/go_PublicDelegatedPrefix.yaml | 88 + mmv1/products/compute/go_Region.yaml | 132 + .../products/compute/go_RegionAutoscaler.yaml | 403 +++ .../compute/go_RegionBackendService.yaml | 1279 +++++++++ .../products/compute/go_RegionCommitment.yaml | 200 ++ mmv1/products/compute/go_RegionDisk.yaml | 369 +++ ...go_RegionDiskResourcePolicyAttachment.yaml | 89 + mmv1/products/compute/go_RegionDiskType.yaml | 110 + .../compute/go_RegionHealthCheck.yaml | 857 ++++++ .../go_RegionInstanceGroupManager.yaml | 226 ++ .../compute/go_RegionNetworkEndpoint.yaml | 148 + .../go_RegionNetworkEndpointGroup.yaml | 320 +++ .../go_RegionNetworkFirewallPolicy.yaml | 87 + .../compute/go_RegionPerInstanceConfig.yaml | 243 ++ .../compute/go_RegionSecurityPolicy.yaml | 195 ++ .../compute/go_RegionSecurityPolicyRule.yaml | 574 ++++ .../compute/go_RegionSslCertificate.yaml | 136 + mmv1/products/compute/go_RegionSslPolicy.yaml | 137 + .../compute/go_RegionTargetHttpProxy.yaml | 106 + .../compute/go_RegionTargetHttpsProxy.yaml | 183 ++ .../compute/go_RegionTargetTcpProxy.yaml | 116 + mmv1/products/compute/go_RegionUrlMap.yaml | 2073 ++++++++++++++ mmv1/products/compute/go_Reservation.yaml | 243 ++ mmv1/products/compute/go_ResourcePolicy.yaml | 363 +++ mmv1/products/compute/go_Route.yaml | 256 ++ mmv1/products/compute/go_Router.yaml | 199 ++ mmv1/products/compute/go_RouterNat.yaml | 464 +++ .../compute/go_SecurityPolicyRule.yaml | 447 +++ .../compute/go_ServiceAttachment.yaml | 243 ++ mmv1/products/compute/go_Snapshot.yaml | 248 ++ mmv1/products/compute/go_SslCertificate.yaml | 124 + mmv1/products/compute/go_SslPolicy.yaml | 136 + mmv1/products/compute/go_Subnetwork.yaml | 402 +++ mmv1/products/compute/go_TargetGrpcProxy.yaml | 117 + mmv1/products/compute/go_TargetHttpProxy.yaml | 118 + .../products/compute/go_TargetHttpsProxy.yaml | 220 ++ mmv1/products/compute/go_TargetInstance.yaml | 142 + mmv1/products/compute/go_TargetSslProxy.yaml | 145 + mmv1/products/compute/go_TargetTcpProxy.yaml | 110 + mmv1/products/compute/go_UrlMap.yaml | 2530 +++++++++++++++++ mmv1/products/compute/go_VpnGateway.yaml | 107 + mmv1/products/compute/go_VpnTunnel.yaml | 229 ++ mmv1/products/compute/go_Zone.yaml | 117 + mmv1/products/compute/go_product.yaml | 24 + mmv1/template-converter.go | 5 +- .../constants/go/access_approval.go.tmpl | 4 +- .../go/artifact_registry_repository.go.tmpl | 4 +- .../constants/go/backend_service.go.tmpl | 4 +- .../constants/go/billing_budget.tmpl | 4 +- .../go/binaryauthorization_policy.tmpl | 4 +- .../terraform/constants/go/cert_manager.tmpl | 4 +- .../go/compute_service_attachment.go.tmpl | 4 +- .../constants/go/datastream_stream.go.tmpl | 4 +- .../constants/go/dlp_stored_info_type.go.tmpl | 4 +- .../terraform/constants/go/firewall.tmpl | 4 +- .../go/monitoring_alert_policy.go.tmpl | 4 +- .../constants/go/network_endpoints.go.tmpl | 4 +- .../go/network_services_gateway.go.tmpl | 4 +- .../go/region_backend_service.go.tmpl | 4 +- .../constants/go/region_ssl_policy.tmpl | 4 +- .../terraform/constants/go/router.go.tmpl | 4 +- .../terraform/constants/go/router_nat.go.tmpl | 4 +- ...ce_networking_vpc_service_controls.go.tmpl | 76 + .../go/source_repo_repository.go.tmpl | 4 +- .../constants/go/spanner_database.go.tmpl | 4 +- .../constants/go/spanner_instance.go.tmpl | 4 +- .../go/spanner_instance_config.go.tmpl | 4 +- .../terraform/constants/go/ssl_policy.tmpl | 4 +- .../constants/go/subscription.go.tmpl | 4 +- .../constants/go/tagtemplate_fields.go.tmpl | 4 +- .../go/monitoring_monitored_project.go.tmpl | 4 +- ...ce_networking_vpc_service_controls.go.tmpl | 1 + .../array_resourceref_with_validation.go.tmpl | 4 +- .../terraform/custom_expand/go/base64.go.tmpl | 4 +- .../go/bigquery_access_role.go.tmpl | 4 +- .../go/bigquery_dataset_ref.go.tmpl | 4 +- .../go/bigquery_table_ref.go.tmpl | 4 +- .../go/bigquery_table_ref_array.go.tmpl | 4 +- .../go/bigtable_app_profile_routing.tmpl | 4 +- .../go/binaryauthorization_attestors.tmpl | 4 +- .../custom_expand/go/bool_to_object.go.tmpl | 4 +- .../go/bool_to_upper_string.tmpl | 4 +- ...ger_certificate_construct_full_url.go.tmpl | 4 +- .../custom_expand/go/compute_full_url.tmpl | 4 +- .../go/computed_lite_subscription_topic.tmpl | 4 +- .../go/computed_subscription_topic.tmpl | 4 +- .../go/container_analysis_note.tmpl | 4 +- ...ttached_cluster_authorization_user.go.tmpl | 4 +- ...inerattached_cluster_empty_logging.go.tmpl | 4 +- .../custom_expand/go/data_catalog_tag.go.tmpl | 4 +- .../go/datastream_stream_dataset_id.go.tmpl | 4 +- .../go/days_to_duration_string.go.tmpl | 4 +- .../go/default_to_project.go.tmpl | 4 +- .../go/disk_consistency_group_policy.tmpl | 4 +- ...ged_zone_private_visibility_config.go.tmpl | 4 +- .../go/firewall_log_config.go.tmpl | 4 +- .../custom_expand/go/gke_hub_membership.tmpl | 4 +- .../custom_expand/go/json_schema.tmpl | 4 +- .../custom_expand/go/json_value.tmpl | 4 +- .../go/name_or_name_prefix.go.tmpl | 4 +- ..._management_connectivity_test_name.go.tmpl | 4 +- .../go/preserved_state_disks.go.tmpl | 4 +- ...tion_config_throughput_reservation.go.tmpl | 4 +- .../go/redis_instance_authorized_network.tmpl | 4 +- .../go/reference_to_backend.tmpl | 4 +- .../go/resourceref_with_validation.go.tmpl | 4 +- .../go/secret_version_enable.go.tmpl | 4 +- .../go/secret_version_secret_data.go.tmpl | 4 +- .../custom_expand/go/self_link_from_name.tmpl | 4 +- .../custom_expand/go/shortname_to_url.go.tmpl | 4 +- .../go/spanner_instance_config.go.tmpl | 4 +- .../go/subnetwork_log_config.go.tmpl | 4 +- ..._backup_policy_start_times_flatten.go.tmpl | 4 +- ...alloydb_cluster_input_user_flatten.go.tmpl | 4 +- .../go/apigee_organization_property.go.tmpl | 4 +- ...ersion_automatic_scaling_handlenil.go.tmpl | 4 +- .../go/bigquery_connection_flatten.go.tmpl | 4 +- .../go/bigquery_dataset_location.go.tmpl | 4 +- .../go/bigquery_dataset_ref.go.tmpl | 4 +- .../go/bigquery_kms_version.go.tmpl | 4 +- ...gquery_table_ref_copy_sourcetables.go.tmpl | 4 +- .../go/bigtable_app_profile_routing.tmpl | 4 +- .../go/cloudbuild_approval_required.go.tmpl | 4 +- .../clouddomains_ignore_numbers_admin.go.tmpl | 4 +- ...ddomains_ignore_numbers_registrant.go.tmpl | 4 +- ...uddomains_ignore_numbers_technical.go.tmpl | 4 +- ...dfunctions2_function_source_bucket.go.tmpl | 4 +- ...dfunctions2_function_source_object.go.tmpl | 4 +- .../go/cloudrun_ignore_force_override.go.tmpl | 4 +- ...loudscheduler_job_appenginerouting.go.tmpl | 4 +- .../cloudtasks_queue_appenginerouting.go.tmpl | 4 +- .../go/compute_router_range.go.tmpl | 4 +- ...apshot_snapshot_encryption_raw_key.go.tmpl | 4 +- ...umer_quote_override_override_value.go.tmpl | 4 +- ...ttached_cluster_authorization_user.go.tmpl | 4 +- .../go/data_catalog_tag.go.tmpl | 4 +- ...ydb_settings_initial_user_password.go.tmpl | 4 +- ...le_cloudsql_settings_root_password.go.tmpl | 4 +- ..._connection_profile_mysql_password.go.tmpl | 4 +- ...n_profile_mysql_ssl_ca_certificate.go.tmpl | 4 +- ...ofile_mysql_ssl_client_certificate.go.tmpl | 4 +- ...ction_profile_mysql_ssl_client_key.go.tmpl | 4 +- ...rofile_oracle_forward_ssh_password.go.tmpl | 4 +- ...ile_oracle_forward_ssh_private_key.go.tmpl | 4 +- ...connection_profile_oracle_password.go.tmpl | 4 +- ..._profile_oracle_ssl_ca_certificate.go.tmpl | 4 +- ...file_oracle_ssl_client_certificate.go.tmpl | 4 +- ...tion_profile_oracle_ssl_client_key.go.tmpl | 4 +- ...ection_profile_postgresql_password.go.tmpl | 4 +- ...file_postgresql_ssl_ca_certificate.go.tmpl | 4 +- ..._postgresql_ssl_client_certificate.go.tmpl | 4 +- ..._profile_postgresql_ssl_client_key.go.tmpl | 4 +- ...lex_datascan_ignore_profile_result.go.tmpl | 4 +- ..._forward_ssh_connectivity_password.go.tmpl | 4 +- ...rward_ssh_connectivity_private_key.go.tmpl | 4 +- ...ion_profile_mysql_profile_password.go.tmpl | 4 +- ..._profile_ssl_config_ca_certificate.go.tmpl | 4 +- ...file_ssl_config_client_certificate.go.tmpl | 4 +- ...ysql_profile_ssl_config_client_key.go.tmpl | 4 +- ...on_profile_oracle_profile_password.go.tmpl | 4 +- ...rofile_postgresql_profile_password.go.tmpl | 4 +- ...rofile_sql_server_profile_password.go.tmpl | 4 +- .../custom_flatten/go/default_if_empty.tmpl | 4 +- ...tegration_settings_github_settings.go.tmpl | 4 +- .../go/disk_consistency_group_policy.tmpl | 4 +- .../go/duration_string_to_days.go.tmpl | 4 +- ...nstance_networks_reserved_ip_range.go.tmpl | 4 +- .../go/firewall_log_config.go.tmpl | 4 +- .../custom_flatten/go/guard_self_link.go.tmpl | 4 +- .../go/guard_self_link_array.go.tmpl | 4 +- .../go/health_check_log_config.go.tmpl | 4 +- .../custom_flatten/go/http_headers.tmpl | 4 +- ..._oauth2_config_client_secret_value.go.tmpl | 4 +- ..._provider_oidc_client_secret_value.go.tmpl | 4 +- .../custom_flatten/go/id_from_name.tmpl | 4 +- .../go/image_kms_key_name.go.tmpl | 4 +- .../custom_flatten/go/json_schema.tmpl | 4 +- .../go/json_to_string_map.go.tmpl | 4 +- .../monitoring_slo_availability_sli.go.tmpl | 4 +- .../go/name_from_self_link.tmpl | 4 +- .../network_services_timeout_mirror.go.tmpl | 4 +- .../custom_flatten/go/object_to_bool.go.tmpl | 4 +- ...ent_recurring_schedule_time_of_day.go.tmpl | 4 +- .../go/preserved_state_disks.go.tmpl | 4 +- ..._no_wrapper_write_metadata_flatten.go.tmpl | 4 +- .../repository_short_name_from_name.go.tmpl | 4 +- .../go/secret_version_access.go.tmpl | 4 +- .../go/secret_version_enable.go.tmpl | 4 +- .../go/secret_version_version.go.tmpl | 4 +- ...rityposture_custom_constraint_name.go.tmpl | 4 +- .../custom_flatten/go/set_to_project.go.tmpl | 4 +- .../go/subnetwork_log_config.go.tmpl | 4 +- .../go/tags_tag_binding_name.tmpl | 4 +- ...i_feature_group_ignore_description.go.tmpl | 4 +- ...int_private_service_connect_config.go.tmpl | 4 +- ...ai_index_ignore_contents_delta_uri.go.tmpl | 4 +- ...index_ignore_is_complete_overwrite.go.tmpl | 4 +- ...ch_instance_boot_disk_type_flatten.go.tmpl | 4 +- ...ch_instance_data_disk_type_flatten.go.tmpl | 4 +- ...orkbench_instance_vm_image_flatten.go.tmpl | 4 +- ...tions_config_confidential_instance.go.tmpl | 4 +- ...kstations_config_shielded_instance.go.tmpl | 4 +- ...er_service_perimeter_egress_policy.go.tmpl | 4 +- ...r_service_perimeter_ingress_policy.go.tmpl | 4 +- ...manager_service_perimeter_resource.go.tmpl | 4 +- .../go/monitoring_monitored_project.go.tmpl | 4 +- ...s_policy_parent_from_access_policy.go.tmpl | 4 +- ...ccess_policy_parent_from_self_link.go.tmpl | 4 +- .../cloud_identity_group_membership.go.tmpl | 4 +- .../custom_update/go/secret_version.go.tmpl | 4 +- .../decoders/go/backend_service.go.tmpl | 4 +- .../go/bigquery_data_transfer.go.tmpl | 4 +- .../terraform/decoders/go/cloud_run.go.tmpl | 4 +- ...neranalysis_attestation_field_name.go.tmpl | 4 +- .../go/containeranalysis_occurrence.go.tmpl | 4 +- .../decoders/go/dlp_job_trigger.go.tmpl | 4 +- .../decoders/go/dlp_template_id.go.tmpl | 4 +- .../decoders/go/firestore_document.go.tmpl | 4 +- .../terraform/decoders/go/kms.go.tmpl | 4 +- .../go/long_name_to_self_link.go.tmpl | 4 +- .../go/monitoring_monitored_project.go.tmpl | 4 +- .../monitoring_notification_channel.go.tmpl | 4 +- .../decoders/go/network_endpoint.go.tmpl | 4 +- .../decoders/go/network_endpoints.go.tmpl | 4 +- .../go/region_backend_service.go.tmpl | 4 +- .../terraform/decoders/go/snapshot.go.tmpl | 4 +- ...sql_source_representation_instance.go.tmpl | 4 +- .../go/treat_destroyed_state_as_gone.tmpl | 4 +- .../decoders/go/unwrap_global_neg.go.tmpl | 4 +- .../decoders/go/unwrap_resource.go.tmpl | 4 +- .../active_directory_domain_trust.go.erb | 1 - .../go/access_level_never_send_parent.go.tmpl | 4 +- .../go/active_directory_domain_trust.go.tmpl | 5 +- .../encoders/go/backend_service.go.tmpl | 4 +- .../go/bigquery_data_transfer.go.tmpl | 4 +- .../encoders/go/bigquery_job.go.tmpl | 4 +- .../encoders/go/bigtable_app_profile.go.tmpl | 4 +- .../go/cloud_run_domain_mapping.go.tmpl | 4 +- .../encoders/go/cloud_run_service.go.tmpl | 4 +- .../go/cloudbuildv2_repository.go.tmpl | 4 +- .../go/clouddomains_registration.go.tmpl | 4 +- .../compute_global_network_endpoint.go.tmpl | 4 +- .../compute_instance_group_membership.go.tmpl | 4 +- .../go/compute_network_endpoint.go.tmpl | 4 +- .../go/compute_network_endpoints.go.tmpl | 4 +- .../go/compute_per_instance_config.go.tmpl | 4 +- .../compute_region_network_endpoint.go.tmpl | 4 +- ...neranalysis_attestation_field_name.go.tmpl | 4 +- .../go/containeranalysis_occurrence.go.tmpl | 4 +- .../encoders/go/datastream_stream.go.tmpl | 4 +- .../encoders/go/dlp_job_trigger.go.tmpl | 4 +- .../encoders/go/dlp_stored_info_type.go.tmpl | 4 +- .../encoders/go/health_check_type.tmpl | 4 +- .../terraform/encoders/go/index.go.tmpl | 4 +- .../encoders/go/kms_crypto_key.go.tmpl | 4 +- .../encoders/go/location_from_region.go.tmpl | 4 +- .../go/logging_linked_dataset.go.tmpl | 4 +- .../encoders/go/logging_log_view.go.tmpl | 4 +- .../go/monitoring_monitored_project.go.tmpl | 4 +- .../monitoring_notification_channel.go.tmpl | 4 +- .../encoders/go/monitoring_service.go.tmpl | 4 +- .../encoders/go/monitoring_slo.go.tmpl | 4 +- .../go/network_peering_routes_config.go.tmpl | 4 +- .../encoders/go/no_send_name.go.tmpl | 4 +- ...edis_location_id_for_fallback_zone.go.tmpl | 4 +- .../go/region_backend_service.go.tmpl | 4 +- ...sql_source_representation_instance.go.tmpl | 4 +- .../terraform/encoders/go/wrap_object.go.tmpl | 4 +- ...t_with_deployment_resource_pool_id.go.tmpl | 4 +- .../go/wrap_object_with_template_id.go.tmpl | 4 +- ...bscription_push_bq_service_account.tf.tmpl | 56 + ..._push_cloudstorage_service_account.tf.tmpl | 46 + ...working_vpc_service_controls_basic.tf.tmpl | 28 + .../go/bigquery_dataset_access.go.tmpl | 4 +- .../go/bigtable_app_profile.go.tmpl | 4 +- .../extra_schema_entry/go/firewall.tmpl | 4 +- ...cret_version_is_secret_data_base64.go.tmpl | 4 +- .../extra_schema_entry/go/subnetwork.tmpl | 4 +- .../go/bigquery_dataset_access.go.tmpl | 4 +- .../post_create/go/datastream_stream.go.tmpl | 4 +- .../go/firebase_database_instance.go.tmpl | 4 +- .../terraform/post_create/go/index.go.tmpl | 4 +- .../cloud_identity_group_membership.go.tmpl | 4 +- .../post_import/go/cloudbuild_trigger.go.tmpl | 4 +- .../post_import/go/datastream_stream.go.tmpl | 4 +- .../post_update/go/datastream_stream.go.tmpl | 4 +- .../go/os_login_ssh_public_key.go.tmpl | 4 +- .../go/privateca_certificate.go.tmpl | 4 +- .../go/firebase_database_instance.go.tmpl | 4 +- ...re_default_binaryauthorization_policy.tmpl | 4 +- .../go/secret_version_deletion_policy.go.tmpl | 4 +- .../pre_read/go/cloudbuild_trigger.go.tmpl | 4 +- .../go/monitoring_monitored_project.go.tmpl | 4 +- ...cret_version_is_secret_data_base64.go.tmpl | 4 +- ...ce_networking_vpc_service_controls.go.tmpl | 16 + .../go/bigquerydatatransfer_config.tmpl | 4 +- .../go/bigtable_app_profile.go.tmpl | 4 +- .../pre_update/go/cloudbuild_trigger.go.tmpl | 4 +- .../pre_update/go/containeranalysis_note.tmpl | 4 +- .../go/datafusion_instance_update.go.tmpl | 4 +- .../pre_update/go/datastream_stream.go.tmpl | 4 +- .../go/firebase_database_instance.go.tmpl | 4 +- .../go/secret_manager_secret.go.tmpl | 4 +- .../pre_update/go/spanner_database.go.tmpl | 4 +- .../go/active_directory_domain_trust.go.tmpl | 4 +- .../go/compute_per_instance_config.go.tmpl | 4 +- .../go/compute_service_attachment.go.tmpl | 4 +- .../go/containeranalysis_occurrence.go.tmpl | 4 +- .../go/dlp_stored_info_type.go.tmpl | 4 +- .../update_encoder/go/kms_crypto_key.go.tmpl | 4 +- .../update_encoder/go/pubsub_schema.tmpl | 4 +- .../go/pubsub_subscription.tmpl | 4 +- .../update_encoder/go/pubsub_topic.tmpl | 4 +- .../update_encoder/go/reservation.go.tmpl | 4 +- .../go/source_repo_repository.tmpl | 4 +- .../go/spanner_database.go.tmpl | 4 +- .../update_encoder/go/ssl_policy.tmpl | 4 +- .../terraform/yaml_conversion_field.erb | 2 +- 368 files changed, 27429 insertions(+), 535 deletions(-) create mode 100644 mmv1/products/compute/go_Address.yaml create mode 100644 mmv1/products/compute/go_Autoscaler.yaml create mode 100644 mmv1/products/compute/go_BackendBucket.yaml create mode 100644 mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml create mode 100644 mmv1/products/compute/go_BackendService.yaml create mode 100644 mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml create mode 100644 mmv1/products/compute/go_Disk.yaml create mode 100644 mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml create mode 100644 mmv1/products/compute/go_DiskType.yaml create mode 100644 mmv1/products/compute/go_ExternalVpnGateway.yaml create mode 100644 mmv1/products/compute/go_Firewall.yaml create mode 100644 mmv1/products/compute/go_ForwardingRule.yaml create mode 100644 mmv1/products/compute/go_GlobalAddress.yaml create mode 100644 mmv1/products/compute/go_GlobalForwardingRule.yaml create mode 100644 mmv1/products/compute/go_GlobalNetworkEndpoint.yaml create mode 100644 mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml create mode 100644 mmv1/products/compute/go_HaVpnGateway.yaml create mode 100644 mmv1/products/compute/go_HealthCheck.yaml create mode 100644 mmv1/products/compute/go_HttpHealthCheck.yaml create mode 100644 mmv1/products/compute/go_HttpsHealthCheck.yaml create mode 100644 mmv1/products/compute/go_Image.yaml create mode 100644 mmv1/products/compute/go_Instance.yaml create mode 100644 mmv1/products/compute/go_InstanceGroup.yaml create mode 100644 mmv1/products/compute/go_InstanceGroupManager.yaml create mode 100644 mmv1/products/compute/go_InstanceGroupMembership.yaml create mode 100644 mmv1/products/compute/go_InstanceGroupNamedPort.yaml create mode 100644 mmv1/products/compute/go_InstanceSettings.yaml create mode 100644 mmv1/products/compute/go_Interconnect.yaml create mode 100644 mmv1/products/compute/go_InterconnectAttachment.yaml create mode 100644 mmv1/products/compute/go_License.yaml create mode 100644 mmv1/products/compute/go_MachineImage.yaml create mode 100644 mmv1/products/compute/go_MachineType.yaml create mode 100644 mmv1/products/compute/go_ManagedSslCertificate.yaml create mode 100644 mmv1/products/compute/go_Network.yaml create mode 100644 mmv1/products/compute/go_NetworkAttachment.yaml create mode 100644 mmv1/products/compute/go_NetworkEdgeSecurityService.yaml create mode 100644 mmv1/products/compute/go_NetworkEndpoint.yaml create mode 100644 mmv1/products/compute/go_NetworkEndpointGroup.yaml create mode 100644 mmv1/products/compute/go_NetworkEndpoints.yaml create mode 100644 mmv1/products/compute/go_NetworkFirewallPolicy.yaml create mode 100644 mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml create mode 100644 mmv1/products/compute/go_NodeGroup.yaml create mode 100644 mmv1/products/compute/go_NodeTemplate.yaml create mode 100644 mmv1/products/compute/go_OrganizationSecurityPolicy.yaml create mode 100644 mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml create mode 100644 mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml create mode 100644 mmv1/products/compute/go_PacketMirroring.yaml create mode 100644 mmv1/products/compute/go_PerInstanceConfig.yaml create mode 100644 mmv1/products/compute/go_ProjectCloudArmorTier.yaml create mode 100644 mmv1/products/compute/go_PublicAdvertisedPrefix.yaml create mode 100644 mmv1/products/compute/go_PublicDelegatedPrefix.yaml create mode 100644 mmv1/products/compute/go_Region.yaml create mode 100644 mmv1/products/compute/go_RegionAutoscaler.yaml create mode 100644 mmv1/products/compute/go_RegionBackendService.yaml create mode 100644 mmv1/products/compute/go_RegionCommitment.yaml create mode 100644 mmv1/products/compute/go_RegionDisk.yaml create mode 100644 mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml create mode 100644 mmv1/products/compute/go_RegionDiskType.yaml create mode 100644 mmv1/products/compute/go_RegionHealthCheck.yaml create mode 100644 mmv1/products/compute/go_RegionInstanceGroupManager.yaml create mode 100644 mmv1/products/compute/go_RegionNetworkEndpoint.yaml create mode 100644 mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml create mode 100644 mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml create mode 100644 mmv1/products/compute/go_RegionPerInstanceConfig.yaml create mode 100644 mmv1/products/compute/go_RegionSecurityPolicy.yaml create mode 100644 mmv1/products/compute/go_RegionSecurityPolicyRule.yaml create mode 100644 mmv1/products/compute/go_RegionSslCertificate.yaml create mode 100644 mmv1/products/compute/go_RegionSslPolicy.yaml create mode 100644 mmv1/products/compute/go_RegionTargetHttpProxy.yaml create mode 100644 mmv1/products/compute/go_RegionTargetHttpsProxy.yaml create mode 100644 mmv1/products/compute/go_RegionTargetTcpProxy.yaml create mode 100644 mmv1/products/compute/go_RegionUrlMap.yaml create mode 100644 mmv1/products/compute/go_Reservation.yaml create mode 100644 mmv1/products/compute/go_ResourcePolicy.yaml create mode 100644 mmv1/products/compute/go_Route.yaml create mode 100644 mmv1/products/compute/go_Router.yaml create mode 100644 mmv1/products/compute/go_RouterNat.yaml create mode 100644 mmv1/products/compute/go_SecurityPolicyRule.yaml create mode 100644 mmv1/products/compute/go_ServiceAttachment.yaml create mode 100644 mmv1/products/compute/go_Snapshot.yaml create mode 100644 mmv1/products/compute/go_SslCertificate.yaml create mode 100644 mmv1/products/compute/go_SslPolicy.yaml create mode 100644 mmv1/products/compute/go_Subnetwork.yaml create mode 100644 mmv1/products/compute/go_TargetGrpcProxy.yaml create mode 100644 mmv1/products/compute/go_TargetHttpProxy.yaml create mode 100644 mmv1/products/compute/go_TargetHttpsProxy.yaml create mode 100644 mmv1/products/compute/go_TargetInstance.yaml create mode 100644 mmv1/products/compute/go_TargetSslProxy.yaml create mode 100644 mmv1/products/compute/go_TargetTcpProxy.yaml create mode 100644 mmv1/products/compute/go_UrlMap.yaml create mode 100644 mmv1/products/compute/go_VpnGateway.yaml create mode 100644 mmv1/products/compute/go_VpnTunnel.yaml create mode 100644 mmv1/products/compute/go_Zone.yaml create mode 100644 mmv1/products/compute/go_product.yaml create mode 100644 mmv1/templates/terraform/constants/go/service_networking_vpc_service_controls.go.tmpl create mode 100644 mmv1/templates/terraform/custom_create/go/service_networking_vpc_service_controls.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/pubsub_subscription_push_bq_service_account.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/pubsub_subscription_push_cloudstorage_service_account.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/service_networking_vpc_service_controls_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/pre_read/go/service_networking_vpc_service_controls.go.tmpl diff --git a/mmv1/products/compute/go_Address.yaml b/mmv1/products/compute/go_Address.yaml new file mode 100644 index 000000000000..a44d0393d9b2 --- /dev/null +++ b/mmv1/products/compute/go_Address.yaml @@ -0,0 +1,237 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Address' +kind: 'compute#address' +description: | + Represents an Address resource. + + Each virtual machine instance has an ephemeral internal IP address and, + optionally, an external IP address. To communicate between instances on + the same network, you can use an instance's internal IP address. To + communicate with the Internet and instances outside of the same network, + you must specify the instance's external IP address. + + Internal IP addresses are ephemeral and only belong to an instance for + the lifetime of the instance; if the instance is deleted and recreated, + the instance is assigned a new internal IP address, either by Compute + Engine or by you. External IP addresses can be either ephemeral or + static. +references: + guides: + 'Reserving a Static External IP Address': 'https://cloud.google.com/compute/docs/instances-and-network' + 'Reserving a Static Internal IP Address': 'https://cloud.google.com/compute/docs/ip-addresses/reserve-static-internal-ip-address' + api: 'https://cloud.google.com/compute/docs/reference/beta/addresses' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/addresses' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + post_create: 'templates/terraform/post_create/go/labels.tmpl' +examples: + - name: 'address_basic' + primary_resource_id: 'ip_address' + vars: + address_name: 'my-address' + - name: 'address_with_subnetwork' + primary_resource_id: 'internal_with_subnet_and_address' + vars: + address_name: 'my-internal-address' + network_name: 'my-network' + subnetwork_name: 'my-subnet' + - name: 'address_with_gce_endpoint' + primary_resource_id: 'internal_with_gce_endpoint' + vars: + address_name: 'my-internal-address-' + - name: 'address_with_shared_loadbalancer_vip' + primary_resource_id: 'internal_with_shared_loadbalancer_vip' + vars: + address_name: 'my-internal-address' + skip_docs: true + - name: 'instance_with_ip' + primary_resource_id: 'static' + vars: + address_name: 'ipv4-address' + instance_name: 'vm-instance' + - name: 'compute_address_ipsec_interconnect' + primary_resource_id: 'ipsec-interconnect-address' + vars: + address_name: 'test-address' + network_name: 'test-network' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created address should reside. + If it is not provided, the provider region is used. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'address' + type: String + description: | + The static external IP address represented by this resource. + The IP address must be inside the specified subnetwork, + if any. Set by the API if undefined. + default_from_api: true + - name: 'addressType' + type: Enum + description: | + The type of address to reserve. + Note: if you set this argument's value as `INTERNAL` you need to leave the `network_tier` argument unset in that resource block. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: EXTERNAL + enum_values: + - 'INTERNAL' + - 'EXTERNAL' + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + validation: + regex: '^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$' + - name: 'purpose' + type: String + description: | + The purpose of this resource, which can be one of the following values. + + * GCE_ENDPOINT for addresses that are used by VM instances, alias IP + ranges, load balancers, and similar resources. + + * SHARED_LOADBALANCER_VIP for an address that can be used by multiple + internal load balancers. + + * VPC_PEERING for addresses that are reserved for VPC peer networks. + + * IPSEC_INTERCONNECT for addresses created from a private IP range that + are reserved for a VLAN attachment in an HA VPN over Cloud Interconnect + configuration. These addresses are regional resources. + + * PRIVATE_SERVICE_CONNECT for a private network address that is used to + configure Private Service Connect. Only global internal addresses can use + this purpose. + + + This should only be set when using an Internal address. + default_from_api: true + - name: 'networkTier' + type: Enum + description: | + The networking tier used for configuring this address. If this field is not + specified, it is assumed to be PREMIUM. + This argument should not be used when configuring Internal addresses, because [network tier cannot be set for internal traffic; it's always Premium](https://cloud.google.com/network-tiers/docs/overview). + default_from_api: true + enum_values: + - 'PREMIUM' + - 'STANDARD' + - name: 'subnetwork' + type: ResourceRef + description: | + The URL of the subnetwork in which to reserve the address. If an IP + address is specified, it must be within the subnetwork's IP range. + This field can only be used with INTERNAL type with + GCE_ENDPOINT/DNS_RESOLVER purposes. + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'users' + type: Array + description: 'The URLs of the resources that are using this address.' + output: true + item_type: + type: String + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this address. A list of key->value pairs. + immutable: false + update_url: 'projects/{{project}}/regions/{{region}}/addresses/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/regions/{{region}}/addresses/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'network' + type: ResourceRef + description: | + The URL of the network in which to reserve the address. This field + can only be used with INTERNAL type with the VPC_PEERING and + IPSEC_INTERCONNECT purposes. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'prefixLength' + type: Integer + description: | + The prefix length if the resource represents an IP range. + default_from_api: true + - name: 'ipVersion' + type: Enum + description: | + The IP Version that will be used by this address. The default value is `IPV4`. + diff_suppress_func: 'tpgresource.EmptyOrDefaultStringSuppress("IPV4")' + enum_values: + - 'IPV4' + - 'IPV6' + - name: 'ipv6EndpointType' + type: Enum + description: | + The endpoint type of this address, which should be VM or NETLB. This is + used for deciding which type of endpoint this address can be used after + the external IPv6 address reservation. + enum_values: + - 'VM' + - 'NETLB' diff --git a/mmv1/products/compute/go_Autoscaler.yaml b/mmv1/products/compute/go_Autoscaler.yaml new file mode 100644 index 000000000000..703a28c379a3 --- /dev/null +++ b/mmv1/products/compute/go_Autoscaler.yaml @@ -0,0 +1,423 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Autoscaler' +kind: 'compute#autoscaler' +description: | + Represents an Autoscaler resource. + + Autoscalers allow you to automatically scale virtual machine instances in + managed instance groups according to an autoscaling policy that you + define. +references: + guides: + 'Autoscaling Groups of Instances': 'https://cloud.google.com/compute/docs/autoscaler/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/autoscalers' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/autoscalers' +has_self_link: true +update_url: 'projects/{{project}}/zones/{{zone}}/autoscalers?autoscaler={{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'autoscaler_single_instance' + primary_resource_id: 'default' + min_version: 'beta' + vars: + autoscaler_name: 'my-autoscaler' + instance_template_name: 'my-instance-template' + target_pool_name: 'my-target-pool' + igm_name: 'my-igm' + provider_name: 'google-beta' + provider_alias: '' + test_vars_overrides: + 'provider_name': '"google-beta.us-central1"' + 'provider_alias': '"alias = \"us-central1\""' + - name: 'autoscaler_basic' + primary_resource_id: 'foobar' + vars: + autoscaler_name: 'my-autoscaler' + instance_template_name: 'my-instance-template' + target_pool_name: 'my-target-pool' + igm_name: 'my-igm' +parameters: + - name: 'zone' + type: ResourceRef + description: | + URL of the zone where the instance group resides. + required: false + immutable: true + ignore_read: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'autoscalingPolicy' + type: NestedObject + description: | + The configuration parameters for the autoscaling algorithm. You can + define one or more of the policies for an autoscaler: cpuUtilization, + customMetricUtilizations, and loadBalancingUtilization. + + If none of these are specified, the default will be to autoscale based + on cpuUtilization to 0.6 or 60%. + required: true + properties: + - name: 'minReplicas' + type: Integer + description: | + The minimum number of replicas that the autoscaler can scale down + to. This cannot be less than 0. If not provided, autoscaler will + choose a default value depending on maximum number of instances + allowed. + api_name: minNumReplicas + required: true + send_empty_value: true + - name: 'maxReplicas' + type: Integer + description: | + The maximum number of instances that the autoscaler can scale up + to. This is required when creating or updating an autoscaler. The + maximum number of replicas should not be lower than minimal number + of replicas. + api_name: maxNumReplicas + required: true + - name: 'cooldownPeriod' + type: Integer + description: | + The number of seconds that the autoscaler should wait before it + starts collecting information from a new instance. This prevents + the autoscaler from collecting information when the instance is + initializing, during which the collected usage would not be + reliable. The default time autoscaler waits is 60 seconds. + + Virtual machine initialization times might vary because of + numerous factors. We recommend that you test how long an + instance may take to initialize. To do this, create an instance + and time the startup process. + api_name: coolDownPeriodSec + default_value: 60 + - name: 'mode' + type: String + description: | + Defines operating mode for this policy. + default_value: ON + - name: 'scaleDownControl' + type: NestedObject + description: | + Defines scale down controls to reduce the risk of response latency + and outages due to abrupt scale-in events + min_version: 'beta' + required: false + default_from_api: true + properties: + - name: 'maxScaledDownReplicas' + type: NestedObject + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas' + - 'autoscaling_policy.0.scale_down_control.0.time_window_sec' + properties: + - name: 'fixed' + type: Integer + description: | + Specifies a fixed number of VM instances. This must be a positive + integer. + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.fixed' + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.percent' + - name: 'percent' + type: Integer + description: | + Specifies a percentage of instances between 0 to 100%, inclusive. + For example, specify 80 for 80%. + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.fixed' + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.percent' + - name: 'timeWindowSec' + type: Integer + description: | + How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas' + - 'autoscaling_policy.0.scale_down_control.0.time_window_sec' + - name: 'scaleInControl' + type: NestedObject + description: | + Defines scale in controls to reduce the risk of response latency + and outages due to abrupt scale-in events + properties: + - name: 'maxScaledInReplicas' + type: NestedObject + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas' + - 'autoscaling_policy.0.scale_in_control.0.time_window_sec' + properties: + - name: 'fixed' + type: Integer + description: | + Specifies a fixed number of VM instances. This must be a positive + integer. + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed' + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent' + - name: 'percent' + type: Integer + description: | + Specifies a percentage of instances between 0 to 100%, inclusive. + For example, specify 80 for 80%. + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed' + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent' + - name: 'timeWindowSec' + type: Integer + description: | + How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas' + - 'autoscaling_policy.0.scale_in_control.0.time_window_sec' + - name: 'cpuUtilization' + type: NestedObject + description: | + Defines the CPU utilization policy that allows the autoscaler to + scale based on the average CPU utilization of a managed instance + group. + default_from_api: true + properties: + - name: 'target' + type: Double + description: | + The target CPU utilization that the autoscaler should maintain. + Must be a float value in the range (0, 1]. If not specified, the + default is 0.6. + + If the CPU level is below the target utilization, the autoscaler + scales down the number of instances until it reaches the minimum + number of instances you specified or until the average CPU of + your instances reaches the target utilization. + + If the average CPU is above the target utilization, the autoscaler + scales up until it reaches the maximum number of instances you + specified or until the average utilization reaches the target + utilization. + api_name: utilizationTarget + required: true + - name: 'predictiveMethod' + type: String + description: | + Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: + + - NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. + + - OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: NONE + - name: 'metric' + type: Array + description: | + Configuration parameters of autoscaling based on a custom metric. + api_name: customMetricUtilizations + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The identifier (type) of the Stackdriver Monitoring metric. + The metric cannot have negative values. + + The metric must have a value type of INT64 or DOUBLE. + api_name: metric + required: true + - name: 'singleInstanceAssignment' + type: Double + description: | + If scaling is based on a per-group metric value that represents the + total amount of work to be done or resource usage, set this value to + an amount assigned for a single instance of the scaled group. + The autoscaler will keep the number of instances proportional to the + value of this metric, the metric itself should not change value due + to group resizing. + + For example, a good metric to use with the target is + `pubsub.googleapis.com/subscription/num_undelivered_messages` + or a custom metric exporting the total number of requests coming to + your instances. + + A bad example would be a metric exporting an average or median + latency, since this value can't include a chunk assignable to a + single instance, it could be better used with utilization_target + instead. + min_version: 'beta' + - name: 'target' + type: Double + description: | + The target value of the metric that autoscaler should + maintain. This must be a positive value. A utilization + metric scales number of virtual machines handling requests + to increase or decrease proportionally to the metric. + + For example, a good metric to use as a utilizationTarget is + www.googleapis.com/compute/instance/network/received_bytes_count. + The autoscaler will work to keep this value constant for each + of the instances. + api_name: utilizationTarget + - name: 'type' + type: Enum + description: | + Defines how target utilization value is expressed for a + Stackdriver Monitoring metric. + api_name: utilizationTargetType + enum_values: + - 'GAUGE' + - 'DELTA_PER_SECOND' + - 'DELTA_PER_MINUTE' + - name: 'filter' + type: String + description: | + A filter string to be used as the filter string for + a Stackdriver Monitoring TimeSeries.list API call. + This filter is used to select a specific TimeSeries for + the purpose of autoscaling and to determine whether the metric + is exporting per-instance or per-group data. + + You can only use the AND operator for joining selectors. + You can only use direct equality comparison operator (=) without + any functions for each selector. + You can specify the metric in both the filter string and in the + metric field. However, if specified in both places, the metric must + be identical. + + The monitored resource type determines what kind of values are + expected for the metric. If it is a gce_instance, the autoscaler + expects the metric to include a separate TimeSeries for each + instance in a group. In such a case, you cannot filter on resource + labels. + + If the resource type is any other value, the autoscaler expects + this metric to contain values that apply to the entire autoscaled + instance group and resource label filtering can be performed to + point autoscaler at the correct TimeSeries to scale upon. + This is called a per-group metric for the purpose of autoscaling. + + If not specified, the type defaults to gce_instance. + + You should provide a filter that is selective enough to pick just + one TimeSeries for the autoscaled group or for each of the instances + (if you are using gce_instance resource type). If multiple + TimeSeries are returned upon the query execution, the autoscaler + will sum their respective values to obtain its scaling value. + min_version: 'beta' + default_value: resource.type = gce_instance + - name: 'loadBalancingUtilization' + type: NestedObject + description: | + Configuration parameters of autoscaling based on a load balancer. + properties: + - name: 'target' + type: Double + description: | + Fraction of backend capacity utilization (set in HTTP(s) load + balancing configuration) that autoscaler should maintain. Must + be a positive float value. If not defined, the default is 0.8. + api_name: utilizationTarget + required: true + - name: 'scalingSchedules' + type: Map + description: | + Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. + key_name: 'name' + key_description: | + A name for the schedule. + value_type: + type: NestedObject + properties: + - name: 'minRequiredReplicas' + type: Integer + description: | + Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule. + required: true + send_empty_value: true + - name: 'schedule' + type: String + description: | + The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field). + required: true + - name: 'timeZone' + type: String + description: | + The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. + default_value: UTC + - name: 'durationSec' + type: Integer + description: | + The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300. + required: true + - name: 'disabled' + type: Boolean + description: | + A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect. + default_value: false + - name: 'description' + type: String + description: | + A description of a scaling schedule. + - name: 'target' + type: ResourceRef + description: | + URL of the managed instance group that this autoscaler will scale. + required: true + custom_expand: 'templates/terraform/custom_expand/go/compute_full_url.tmpl' + resource: 'InstanceGroupManager' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_BackendBucket.yaml b/mmv1/products/compute/go_BackendBucket.yaml new file mode 100644 index 000000000000..5296bba84b1b --- /dev/null +++ b/mmv1/products/compute/go_BackendBucket.yaml @@ -0,0 +1,281 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BackendBucket' +kind: 'compute#backendBucket' +description: | + Backend buckets allow you to use Google Cloud Storage buckets with HTTP(S) + load balancing. + + An HTTP(S) load balancer can direct traffic to specified URLs to a + backend bucket rather than a backend service. It can send requests for + static content to a Cloud Storage bucket and requests for dynamic content + to a virtual machine instance. +references: + guides: + 'Using a Cloud Storage bucket as a load balancer backend': 'https://cloud.google.com/compute/docs/load-balancing/http/backend-bucket' + api: 'https://cloud.google.com/compute/docs/reference/v1/backendBuckets' +docs: +base_url: 'projects/{{project}}/global/backendBuckets' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + parent_resource_attribute: 'name' + import_format: + - 'projects/{{project}}/global/backendBuckets/{{name}}' + - '{{name}}' + min_version: 'beta' +custom_code: + encoder: 'templates/terraform/encoders/go/compute_backend_bucket.go.tmpl' + post_create: 'templates/terraform/post_create/go/compute_backend_bucket_security_policy.go.tmpl' + post_update: 'templates/terraform/post_create/go/compute_backend_bucket_security_policy.go.tmpl' +examples: + - name: 'backend_bucket_basic' + primary_resource_id: 'image_backend' + primary_resource_name: 'fmt.Sprintf("tf-test-image-backend-bucket%s", context["random_suffix"])' + vars: + backend_bucket_name: 'image-backend-bucket' + bucket_name: 'image-store-bucket' + - name: 'backend_bucket_full' + primary_resource_id: 'image_backend_full' + vars: + backend_bucket_name: 'image-backend-bucket-full' + bucket_name: 'image-store-bucket-full' + skip_docs: true + - name: 'backend_bucket_security_policy' + primary_resource_id: 'image_backend' + vars: + backend_bucket_name: 'image-backend-bucket' + bucket_name: 'image-store-bucket' + - name: 'backend_bucket_query_string_whitelist' + primary_resource_id: 'image_backend' + vars: + backend_bucket_name: 'image-backend-bucket' + - name: 'backend_bucket_include_http_headers' + primary_resource_id: 'image_backend' + vars: + backend_bucket_name: 'image-backend-bucket' + - name: 'external_cdn_lb_with_backend_bucket' + primary_resource_id: 'default' + vars: + my_bucket: 'my-bucket' + index_page: 'index-page' + 404_page: '404-page' + test_object: 'test-object' + example_ip: 'example-ip' + http_lb_forwarding_rule: 'http-lb-forwarding-rule' + http_lb_proxy: 'http-lb-proxy' + http_lb: 'http-lb' + cat_backend_bucket: 'cat-backend-bucket' + skip_docs: true + - name: 'backend_bucket_bypass_cache' + primary_resource_id: 'image_backend' + vars: + backend_bucket_name: 'image-backend-bucket' + bucket_name: 'image-store-bucket' + skip_docs: true + - name: 'backend_bucket_coalescing' + primary_resource_id: 'image_backend' + vars: + backend_bucket_name: 'image-backend-bucket' + bucket_name: 'image-store-bucket' + skip_docs: true +parameters: +properties: + - name: 'bucketName' + type: String + description: 'Cloud Storage bucket name.' + required: true + - name: 'cdnPolicy' + type: NestedObject + description: 'Cloud CDN configuration for this Backend Bucket.' + default_from_api: true + properties: + - name: 'cacheKeyPolicy' + type: NestedObject + description: 'The CacheKeyPolicy for this CdnPolicy.' + properties: + - name: 'queryStringWhitelist' + type: Array + description: | + Names of query string parameters to include in cache keys. + Default parameters are always included. '&' and '=' will + be percent encoded and not treated as delimiters. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + item_type: + type: String + - name: 'includeHttpHeaders' + type: Array + description: | + Allows HTTP request headers (by name) to be used in the + cache key. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + item_type: + type: String + - name: 'signedUrlCacheMaxAgeSec' + type: Integer + description: | + Maximum number of seconds the response to a signed URL request will + be considered fresh. After this time period, + the response will be revalidated before being served. + When serving responses to signed URL requests, + Cloud CDN will internally behave as though + all responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing Cache-Control + header. The actual headers served in responses will not be altered. + send_empty_value: true + - name: 'defaultTtl' + type: Integer + description: | + Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + default_from_api: true + send_empty_value: true + - name: 'maxTtl' + type: Integer + description: | + Specifies the maximum allowed TTL for cached content served by this origin. + default_from_api: true + - name: 'clientTtl' + type: Integer + description: | + Specifies the maximum allowed TTL for cached content served by this origin. + default_from_api: true + send_empty_value: true + - name: 'negativeCaching' + type: Boolean + description: | + Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. + default_from_api: true + send_empty_value: true + - name: 'negativeCachingPolicy' + type: Array + description: | + Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. + Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: | + The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 + can be specified as values, and you cannot specify a status code more than once. + - name: 'ttl' + type: Integer + description: | + The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s + (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + send_empty_value: true + - name: 'cacheMode' + type: Enum + description: | + Specifies the cache setting for all responses from this backend. + The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + default_from_api: true + enum_values: + - 'USE_ORIGIN_HEADERS' + - 'FORCE_CACHE_ALL' + - 'CACHE_ALL_STATIC' + - name: 'serveWhileStale' + type: Integer + description: | + Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. + default_from_api: true + send_empty_value: true + - name: 'requestCoalescing' + type: Boolean + description: | + If true then Cloud CDN will combine multiple concurrent cache fill requests into a small number of requests to the origin. + send_empty_value: true + - name: 'bypassCacheOnRequestHeaders' + type: Array + description: | + Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. The cache is bypassed for all cdnPolicy.cacheMode settings. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The header field name to match on when bypassing cache. Values are case-insensitive. + max_size: 5 + - name: 'compressionMode' + type: Enum + description: | + Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header. + enum_values: + - 'AUTOMATIC' + - 'DISABLED' + - name: 'edgeSecurityPolicy' + type: String + description: | + The security policy associated with this backend bucket. + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'customResponseHeaders' + type: Array + description: | + Headers that the HTTP/S load balancer should add to proxied responses. + item_type: + type: String + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional textual description of the resource; provided by the + client when the resource is created. + - name: 'enableCdn' + type: Boolean + description: 'If true, enable Cloud CDN for this BackendBucket.' + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + immutable: true + validation: + regex: '^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$' diff --git a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml new file mode 100644 index 000000000000..cb6e3777442c --- /dev/null +++ b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml @@ -0,0 +1,98 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BackendBucketSignedUrlKey' +kind: 'compute#BackendBucketSignedUrlKey' +description: | + A key for signing Cloud CDN signed URLs for BackendBuckets. +references: + guides: + 'Using Signed URLs': 'https://cloud.google.com/cdn/docs/using-signed-urls/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/backendBuckets' +docs: +base_url: 'projects/{{project}}/global/backendBuckets/{{backend_bucket}}' +self_link: 'projects/{{project}}/global/backendBuckets/{{backend_bucket}}' +create_url: 'projects/{{project}}/global/backendBuckets/{{backend_bucket}}/addSignedUrlKey' +delete_url: 'projects/{{project}}/global/backendBuckets/{{backend_bucket}}/deleteSignedUrlKey?keyName={{name}}' +delete_verb: 'POST' +immutable: true +mutex: signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/ +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - name +nested_query: + keys: + - cdnPolicy + - signedUrlKeyNames + is_list_of_ids: true + modify_by_patch: false +custom_code: +exclude_tgc: true +examples: + - name: 'backend_bucket_signed_url_key' + primary_resource_id: 'backend_key' + vars: + key_name: 'test-key' + backend_name: 'test-signed-backend-bucket' + bucket_name: 'test-storage-bucket' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'backendBucket' + type: ResourceRef + description: | + The backend bucket this signed URL key belongs. + required: true + immutable: true + ignore_read: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'BackendBucket' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the signed URL key. + api_name: keyName + required: true + immutable: true + validation: + regex: '^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$' + - name: 'keyValue' + type: String + description: | + 128-bit key value used for signing the URL. The key value must be a + valid RFC 4648 Section 5 base64url encoded string. + required: true + immutable: true + ignore_read: true + sensitive: true diff --git a/mmv1/products/compute/go_BackendService.yaml b/mmv1/products/compute/go_BackendService.yaml new file mode 100644 index 000000000000..90a748f145cd --- /dev/null +++ b/mmv1/products/compute/go_BackendService.yaml @@ -0,0 +1,1319 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BackendService' +kind: 'compute#backendService' +description: | + A Backend Service defines a group of virtual machines that will serve + traffic for load balancing. This resource is a global backend service, + appropriate for external load balancing or self-managed internal load balancing. + For managed internal load balancing, use a regional backend service instead. + + Currently self-managed internal load balancing is only available in beta. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/http/backend-service' + api: 'https://cloud.google.com/compute/docs/reference/v1/backendServices' +docs: +base_url: 'projects/{{project}}/global/backendServices' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + allowed_iam_role: 'roles/compute.admin' + parent_resource_attribute: 'name' + iam_conditions_request_type: 'QUERY_PARAM' + min_version: 'beta' +custom_code: + constants: 'templates/terraform/constants/go/backend_service.go.tmpl' + encoder: 'templates/terraform/encoders/go/backend_service.go.tmpl' + decoder: 'templates/terraform/decoders/go/backend_service.go.tmpl' + post_create: 'templates/terraform/post_create/go/compute_backend_service_security_policy.go.tmpl' + post_update: 'templates/terraform/post_create/go/compute_backend_service_security_policy.go.tmpl' +schema_version: 1 +examples: + - name: 'backend_service_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-backend-service%s", context["random_suffix"])' + vars: + backend_service_name: 'backend-service' + http_health_check_name: 'health-check' + - name: 'backend_service_external_iap' + primary_resource_id: 'default' + vars: + backend_service_name: 'tf-test-backend-service-external' + - name: 'backend_service_cache_simple' + primary_resource_id: 'default' + vars: + backend_service_name: 'backend-service' + http_health_check_name: 'health-check' + - name: 'backend_service_cache_include_http_headers' + primary_resource_id: 'default' + vars: + backend_service_name: 'backend-service' + - name: 'backend_service_cache_include_named_cookies' + primary_resource_id: 'default' + vars: + backend_service_name: 'backend-service' + - name: 'backend_service_cache' + primary_resource_id: 'default' + vars: + backend_service_name: 'backend-service' + http_health_check_name: 'health-check' + - name: 'backend_service_cache_bypass_cache_on_request_headers' + primary_resource_id: 'default' + vars: + backend_service_name: 'backend-service' + http_health_check_name: 'health-check' + - name: 'backend_service_traffic_director_round_robin' + primary_resource_id: 'default' + min_version: 'beta' + vars: + backend_service_name: 'backend-service' + health_check_name: 'health-check' + - name: 'backend_service_traffic_director_ring_hash' + primary_resource_id: 'default' + min_version: 'beta' + vars: + backend_service_name: 'backend-service' + health_check_name: 'health-check' + - name: 'backend_service_network_endpoint' + primary_resource_id: 'default' + min_version: 'beta' + vars: + backend_service_name: 'backend-service' + neg_name: 'network-endpoint' + - name: 'backend_service_external_managed' + primary_resource_id: 'default' + vars: + backend_service_name: 'backend-service' + health_check_name: 'health-check' +parameters: +properties: + - name: 'affinityCookieTtlSec' + type: Integer + description: | + Lifetime of cookies in seconds if session_affinity is + GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts + only until the end of the browser session (or equivalent). The + maximum allowed value for TTL is one day. + + When the load balancing scheme is INTERNAL, this field is not used. + - name: 'backend' + type: Array + description: | + The set of backends that serve this BackendService. + api_name: backends + is_set: true + set_hash_func: 'resourceGoogleComputeBackendServiceBackendHash' + item_type: + type: NestedObject + properties: + - name: 'balancingMode' + type: Enum + description: | + Specifies the balancing mode for this backend. + + For global HTTP(S) or TCP/SSL load balancing, the default is + UTILIZATION. Valid values are UTILIZATION, RATE (for HTTP(S)) + and CONNECTION (for TCP/SSL). + + See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) + for an explanation of load balancing modes. + + From version 6.0.0 default value will be UTILIZATION to match default GCP value. + default_value: UTILIZATION + enum_values: + - 'UTILIZATION' + - 'RATE' + - 'CONNECTION' + - name: 'capacityScaler' + type: Double + description: | + A multiplier applied to the group's maximum servicing capacity + (based on UTILIZATION, RATE or CONNECTION). + + Default value is 1, which means the group will serve up to 100% + of its configured capacity (depending on balancingMode). A + setting of 0 means the group is completely drained, offering + 0% of its available Capacity. Valid range is [0.0,1.0]. + send_empty_value: true + default_value: 1.0 + - name: 'description' + type: String + description: | + An optional description of this resource. + Provide this property when you create the resource. + - name: 'group' + type: String + description: | + The fully-qualified URL of an Instance Group or Network Endpoint + Group resource. In case of instance group this defines the list + of instances that serve traffic. Member virtual machine + instances from each instance group must live in the same zone as + the instance group itself. No two backends in a backend service + are allowed to use same Instance Group resource. + + For Network Endpoint Groups this defines list of endpoints. All + endpoints of Network Endpoint Group must be hosted on instances + located in the same zone as the Network Endpoint Group. + + Backend services cannot mix Instance Group and + Network Endpoint Group backends. + + Note that you must specify an Instance Group or Network Endpoint + Group resource using the fully-qualified URL, rather than a + partial URL. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + custom_flatten: 'templates/terraform/custom_flatten/go/guard_self_link.go.tmpl' + - name: 'maxConnections' + type: Integer + description: | + The max number of simultaneous connections for the group. Can + be used with either CONNECTION or UTILIZATION balancing modes. + + For CONNECTION mode, either maxConnections or one + of maxConnectionsPerInstance or maxConnectionsPerEndpoint, + as appropriate for group type, must be set. + default_from_api: true + - name: 'maxConnectionsPerInstance' + type: Integer + description: | + The max number of simultaneous connections that a single + backend instance can handle. This is used to calculate the + capacity of the group. Can be used in either CONNECTION or + UTILIZATION balancing modes. + + For CONNECTION mode, either maxConnections or + maxConnectionsPerInstance must be set. + default_from_api: true + - name: 'maxConnectionsPerEndpoint' + type: Integer + description: | + The max number of simultaneous connections that a single backend + network endpoint can handle. This is used to calculate the + capacity of the group. Can be used in either CONNECTION or + UTILIZATION balancing modes. + + For CONNECTION mode, either + maxConnections or maxConnectionsPerEndpoint must be set. + default_from_api: true + - name: 'maxRate' + type: Integer + description: | + The max requests per second (RPS) of the group. + + Can be used with either RATE or UTILIZATION balancing modes, + but required if RATE mode. For RATE mode, either maxRate or one + of maxRatePerInstance or maxRatePerEndpoint, as appropriate for + group type, must be set. + default_from_api: true + - name: 'maxRatePerInstance' + type: Double + description: | + The max requests per second (RPS) that a single backend + instance can handle. This is used to calculate the capacity of + the group. Can be used in either balancing mode. For RATE mode, + either maxRate or maxRatePerInstance must be set. + default_from_api: true + - name: 'maxRatePerEndpoint' + type: Double + description: | + The max requests per second (RPS) that a single backend network + endpoint can handle. This is used to calculate the capacity of + the group. Can be used in either balancing mode. For RATE mode, + either maxRate or maxRatePerEndpoint must be set. + default_from_api: true + - name: 'maxUtilization' + type: Double + description: | + Used when balancingMode is UTILIZATION. This ratio defines the + CPU utilization target for the group. Valid range is [0.0, 1.0]. + default_from_api: true + - name: 'circuitBreakers' + type: NestedObject + description: | + Settings controlling the volume of connections to a backend service. This field + is applicable only when the load_balancing_scheme is set to INTERNAL_SELF_MANAGED. + properties: + - name: 'connectTimeout' + type: NestedObject + description: | + The timeout for new network connections to hosts. + min_version: 'beta' + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + - name: 'maxRequestsPerConnection' + type: Integer + description: | + Maximum requests for a single backend connection. This parameter + is respected by both the HTTP/1.1 and HTTP/2 implementations. If + not specified, there is no limit. Setting this parameter to 1 + will effectively disable keep alive. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + - name: 'maxConnections' + type: Integer + description: | + The maximum number of connections to the backend cluster. + Defaults to 1024. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 1024 + - name: 'maxPendingRequests' + type: Integer + description: | + The maximum number of pending requests to the backend cluster. + Defaults to 1024. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 1024 + - name: 'maxRequests' + type: Integer + description: | + The maximum number of parallel requests to the backend cluster. + Defaults to 1024. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 1024 + - name: 'maxRetries' + type: Integer + description: | + The maximum number of parallel retries to the backend cluster. + Defaults to 3. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 3 + - name: 'compressionMode' + type: Enum + description: | + Compress text responses using Brotli or gzip compression, based on the client's Accept-Encoding header. + enum_values: + - 'AUTOMATIC' + - 'DISABLED' + - name: 'consistentHash' + type: NestedObject + description: | + Consistent Hash-based load balancing can be used to provide soft session + affinity based on HTTP headers, cookies or other properties. This load balancing + policy is applicable only for HTTP connections. The affinity to a particular + destination host will be lost when one or more hosts are added/removed from the + destination service. This field specifies parameters that control consistent + hashing. This field only applies if the load_balancing_scheme is set to + INTERNAL_SELF_MANAGED. This field is only applicable when locality_lb_policy is + set to MAGLEV or RING_HASH. + properties: + - name: 'httpCookie' + type: NestedObject + description: | + Hash is based on HTTP Cookie. This field describes a HTTP cookie + that will be used as the hash key for the consistent hash load + balancer. If the cookie is not present, it will be generated. + This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + at_least_one_of: + - 'consistent_hash.0.http_cookie' + - 'consistent_hash.0.http_header_name' + - 'consistent_hash.0.minimum_ring_size' + properties: + - name: 'ttl' + type: NestedObject + description: | + Lifetime of the cookie. + at_least_one_of: + - 'consistent_hash.0.http_cookie.0.ttl' + - 'consistent_hash.0.http_cookie.0.name' + - 'consistent_hash.0.http_cookie.0.path' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + - name: 'name' + type: String + description: | + Name of the cookie. + at_least_one_of: + - 'consistent_hash.0.http_cookie.0.ttl' + - 'consistent_hash.0.http_cookie.0.name' + - 'consistent_hash.0.http_cookie.0.path' + - name: 'path' + type: String + description: | + Path to set for the cookie. + at_least_one_of: + - 'consistent_hash.0.http_cookie.0.ttl' + - 'consistent_hash.0.http_cookie.0.name' + - 'consistent_hash.0.http_cookie.0.path' + - name: 'httpHeaderName' + type: String + description: | + The hash based on the value of the specified header field. + This field is applicable if the sessionAffinity is set to HEADER_FIELD. + at_least_one_of: + - 'consistent_hash.0.http_cookie' + - 'consistent_hash.0.http_header_name' + - 'consistent_hash.0.minimum_ring_size' + - name: 'minimumRingSize' + type: Integer + description: | + The minimum number of virtual nodes to use for the hash ring. + Larger ring sizes result in more granular load + distributions. If the number of hosts in the load balancing pool + is larger than the ring size, each host will be assigned a single + virtual node. + Defaults to 1024. + at_least_one_of: + - 'consistent_hash.0.http_cookie' + - 'consistent_hash.0.http_header_name' + - 'consistent_hash.0.minimum_ring_size' + default_value: 1024 + - name: 'cdnPolicy' + type: NestedObject + description: 'Cloud CDN configuration for this BackendService.' + default_from_api: true + properties: + - name: 'cacheKeyPolicy' + type: NestedObject + description: 'The CacheKeyPolicy for this CdnPolicy.' + at_least_one_of: + - 'cdn_policy.0.cache_key_policy' + - 'cdn_policy.0.signed_url_cache_max_age_sec' + properties: + - name: 'includeHost' + type: Boolean + description: | + If true requests to different hosts will be cached separately. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + - name: 'includeProtocol' + type: Boolean + description: | + If true, http and https requests will be cached separately. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + - name: 'includeQueryString' + type: Boolean + description: | + If true, include query string parameters in the cache key + according to query_string_whitelist and + query_string_blacklist. If neither is set, the entire query + string will be included. + + If false, the query string will be excluded from the cache + key entirely. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + - name: 'queryStringBlacklist' + type: Array + description: | + Names of query string parameters to exclude in cache keys. + + All other parameters will be included. Either specify + query_string_whitelist or query_string_blacklist, not both. + '&' and '=' will be percent encoded and not treated as + delimiters. + is_set: true + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'queryStringWhitelist' + type: Array + description: | + Names of query string parameters to include in cache keys. + + All other parameters will be excluded. Either specify + query_string_whitelist or query_string_blacklist, not both. + '&' and '=' will be percent encoded and not treated as + delimiters. + is_set: true + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'includeHttpHeaders' + type: Array + description: | + Allows HTTP request headers (by name) to be used in the + cache key. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'includeNamedCookies' + type: Array + description: | + Names of cookies to include in cache keys. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_http_headers' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'signedUrlCacheMaxAgeSec' + type: Integer + description: | + Maximum number of seconds the response to a signed URL request + will be considered fresh, defaults to 1hr (3600s). After this + time period, the response will be revalidated before + being served. + + When serving responses to signed URL requests, Cloud CDN will + internally behave as though all responses from this backend had a + "Cache-Control: public, max-age=[TTL]" header, regardless of any + existing Cache-Control header. The actual headers served in + responses will not be altered. + at_least_one_of: + - 'cdn_policy.0.cache_key_policy' + - 'cdn_policy.0.signed_url_cache_max_age_sec' + default_value: 3600 + - name: 'defaultTtl' + type: Integer + description: | + Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + default_from_api: true + - name: 'maxTtl' + type: Integer + description: | + Specifies the maximum allowed TTL for cached content served by this origin. + default_from_api: true + - name: 'clientTtl' + type: Integer + description: | + Specifies the maximum allowed TTL for cached content served by this origin. + default_from_api: true + - name: 'negativeCaching' + type: Boolean + description: | + Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. + default_from_api: true + send_empty_value: true + - name: 'negativeCachingPolicy' + type: Array + description: | + Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. + Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: | + The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 + can be specified as values, and you cannot specify a status code more than once. + - name: 'ttl' + type: Integer + description: | + The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s + (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + send_empty_value: true + - name: 'cacheMode' + type: Enum + description: | + Specifies the cache setting for all responses from this backend. + The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + default_from_api: true + enum_values: + - 'USE_ORIGIN_HEADERS' + - 'FORCE_CACHE_ALL' + - 'CACHE_ALL_STATIC' + - name: 'serveWhileStale' + type: Integer + description: | + Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. + default_from_api: true + send_empty_value: true + - name: 'bypassCacheOnRequestHeaders' + type: Array + description: | + Bypass the cache when the specified request headers are matched - e.g. Pragma or Authorization headers. Up to 5 headers can be specified. + The cache is bypassed for all cdnPolicy.cacheMode settings. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The header field name to match on when bypassing cache. Values are case-insensitive. + required: true + - name: 'connectionDraining' + type: NestedObject + description: | + Settings for connection draining + flatten_object: true + properties: + - name: 'connection_draining_timeout_sec' + type: Integer + description: | + Time for which instance will be drained (not accept new + connections, but still work to finish started). + api_name: drainingTimeoutSec + default_value: 300 + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'customRequestHeaders' + type: Array + description: | + Headers that the HTTP/S load balancer should add to proxied + requests. + is_set: true + item_type: + type: String + - name: 'customResponseHeaders' + type: Array + description: | + Headers that the HTTP/S load balancer should add to proxied + responses. + is_set: true + item_type: + type: String + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this + object. This field is used in optimistic locking. + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'enableCDN' + type: Boolean + description: | + If true, enable Cloud CDN for this BackendService. + - name: 'healthChecks' + type: Array + description: | + The set of URLs to the HttpHealthCheck or HttpsHealthCheck resource + for health checking this BackendService. Currently at most one health + check can be specified. + + A health check must be specified unless the backend service uses an internet + or serverless NEG as a backend. + + For internal load balancing, a URL to a HealthCheck resource must be specified instead. + is_set: true + set_hash_func: 'tpgresource.SelfLinkRelativePathHash' + custom_flatten: 'templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl' + item_type: + type: String + min_size: 1 + max_size: 1 + - name: 'generated_id' + type: Integer + description: + 'The unique identifier for the resource. This identifier is defined by the + server.' + api_name: id + output: true + - name: 'iap' + type: NestedObject + description: Settings for enabling Cloud Identity Aware Proxy + send_empty_value: true + properties: + - name: 'oauth2ClientId' + type: String + description: | + OAuth2 Client ID for IAP + required: true + - name: 'oauth2ClientSecret' + type: String + description: | + OAuth2 Client Secret for IAP + required: true + ignore_read: true + sensitive: true + send_empty_value: true + - name: 'oauth2ClientSecretSha256' + type: String + description: | + OAuth2 Client Secret SHA-256 for IAP + sensitive: true + output: true + - name: 'loadBalancingScheme' + type: Enum + description: | + Indicates whether the backend service will be used with internal or + external load balancing. A backend service created for one type of + load balancing cannot be used with the other. For more information, refer to + [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). + immutable: true + default_value: EXTERNAL + enum_values: + - 'EXTERNAL' + - 'INTERNAL_SELF_MANAGED' + - 'INTERNAL_MANAGED' + - 'EXTERNAL_MANAGED' + - name: 'localityLbPolicy' + type: Enum + description: | + The load balancing algorithm used within the scope of the locality. + The possible values are: + + * `ROUND_ROBIN`: This is a simple policy in which each healthy backend + is selected in round robin order. + + * `LEAST_REQUEST`: An O(1) algorithm which selects two random healthy + hosts and picks the host which has fewer active requests. + + * `RING_HASH`: The ring/modulo hash load balancer implements consistent + hashing to backends. The algorithm has the property that the + addition/removal of a host from a set of N hosts only affects + 1/N of the requests. + + * `RANDOM`: The load balancer selects a random healthy host. + + * `ORIGINAL_DESTINATION`: Backend host is selected based on the client + connection metadata, i.e., connections are opened + to the same address as the destination address of + the incoming connection before the connection + was redirected to the load balancer. + + * `MAGLEV`: used as a drop in replacement for the ring hash load balancer. + Maglev is not as stable as ring hash but has faster table lookup + build times and host selection times. For more information about + Maglev, refer to https://ai.google/research/pubs/pub44824 + + * `WEIGHTED_MAGLEV`: Per-instance weighted Load Balancing via health check + reported weights. If set, the Backend Service must + configure a non legacy HTTP-based Health Check, and + health check replies are expected to contain + non-standard HTTP response header field + X-Load-Balancing-Endpoint-Weight to specify the + per-instance weights. If set, Load Balancing is weight + based on the per-instance weights reported in the last + processed health check replies, as long as every + instance either reported a valid weight or had + UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains + equal-weight. + + + This field is applicable to either: + + * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, + and loadBalancingScheme set to INTERNAL_MANAGED. + * A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. + * A regional backend service with loadBalancingScheme set to EXTERNAL (External Network + Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External + Network Load Balancing. The default is MAGLEV. + + + If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, + or RING_HASH, session affinity settings will not take effect. + + Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced + by a URL map that is bound to target gRPC proxy that has validate_for_proxyless + field set to true. + enum_values: + - 'ROUND_ROBIN' + - 'LEAST_REQUEST' + - 'RING_HASH' + - 'RANDOM' + - 'ORIGINAL_DESTINATION' + - 'MAGLEV' + - 'WEIGHTED_MAGLEV' + - name: 'localityLbPolicies' + type: Array + description: | + A list of locality load balancing policies to be used in order of + preference. Either the policy or the customPolicy field should be set. + Overrides any value set in the localityLbPolicy field. + + localityLbPolicies is only supported when the BackendService is referenced + by a URL Map that is referenced by a target gRPC proxy that has the + validateForProxyless field set to true. + item_type: + description: | + Container for either a built-in LB policy supported by gRPC or Envoy or + a custom one implemented by the end user. + type: NestedObject + properties: + - name: 'policy' + type: NestedObject + description: | + The configuration for a built-in load balancing policy. + exactly_one_of: + - 'policy' + - 'customPolicy' + properties: + - name: 'name' + type: Enum + description: | + The name of a locality load balancer policy to be used. The value + should be one of the predefined ones as supported by localityLbPolicy, + although at the moment only ROUND_ROBIN is supported. + + This field should only be populated when the customPolicy field is not + used. + + Note that specifying the same policy more than once for a backend is + not a valid configuration and will be rejected. + + The possible values are: + + * `ROUND_ROBIN`: This is a simple policy in which each healthy backend + is selected in round robin order. + + * `LEAST_REQUEST`: An O(1) algorithm which selects two random healthy + hosts and picks the host which has fewer active requests. + + * `RING_HASH`: The ring/modulo hash load balancer implements consistent + hashing to backends. The algorithm has the property that the + addition/removal of a host from a set of N hosts only affects + 1/N of the requests. + + * `RANDOM`: The load balancer selects a random healthy host. + + * `ORIGINAL_DESTINATION`: Backend host is selected based on the client + connection metadata, i.e., connections are opened + to the same address as the destination address of + the incoming connection before the connection + was redirected to the load balancer. + + * `MAGLEV`: used as a drop in replacement for the ring hash load balancer. + Maglev is not as stable as ring hash but has faster table lookup + build times and host selection times. For more information about + Maglev, refer to https://ai.google/research/pubs/pub44824 + required: true + enum_values: + - 'ROUND_ROBIN' + - 'LEAST_REQUEST' + - 'RING_HASH' + - 'RANDOM' + - 'ORIGINAL_DESTINATION' + - 'MAGLEV' + - name: 'customPolicy' + type: NestedObject + description: | + The configuration for a custom policy implemented by the user and + deployed with the client. + exactly_one_of: + - 'policy' + - 'customPolicy' + properties: + - name: 'name' + type: String + description: | + Identifies the custom policy. + + The value should match the type the custom implementation is registered + with on the gRPC clients. It should follow protocol buffer + message naming conventions and include the full path (e.g. + myorg.CustomLbPolicy). The maximum length is 256 characters. + + Note that specifying the same custom policy more than once for a + backend is not a valid configuration and will be rejected. + required: true + - name: 'data' + type: String + description: | + An optional, arbitrary JSON object with configuration data, understood + by a locally installed custom policy implementation. + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'outlierDetection' + type: NestedObject + description: | + Settings controlling eviction of unhealthy hosts from the load balancing pool. + Applicable backend service types can be a global backend service with the + loadBalancingScheme set to INTERNAL_SELF_MANAGED or EXTERNAL_MANAGED. + + From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. + Default values are enforce by GCP without providing them. + properties: + - name: 'baseEjectionTime' + type: NestedObject + description: | + The base time that a host is ejected for. The real time is equal to the base + time multiplied by the number of times the host has been ejected. Defaults to + 30000ms or 30s. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'consecutiveErrors' + type: Integer + description: | + Number of errors before a host is ejected from the connection pool. When the + backend host is accessed over HTTP, a 5xx return code qualifies as an error. + Defaults to 5. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 5 + - name: 'consecutiveGatewayFailure' + type: Integer + description: | + The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 5 + - name: 'enforcingConsecutiveErrors' + type: Integer + description: | + The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to disable + ejection or to ramp it up slowly. Defaults to 100. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 100 + - name: 'enforcingConsecutiveGatewayFailure' + type: Integer + description: | + The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting can be + used to disable ejection or to ramp it up slowly. Defaults to 0. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 0 + - name: 'enforcingSuccessRate' + type: Integer + description: | + The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be used to + disable ejection or to ramp it up slowly. Defaults to 100. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 100 + - name: 'interval' + type: NestedObject + description: | + Time interval between ejection sweep analysis. This can result in both new + ejections as well as hosts being returned to service. Defaults to 10 seconds. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'maxEjectionPercent' + type: Integer + description: | + Maximum percentage of hosts in the load balancing pool for the backend service + that can be ejected. Defaults to 10%. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 10 + - name: 'successRateMinimumHosts' + type: Integer + description: | + The number of hosts in a cluster that must have enough request volume to detect + success rate outliers. If the number of hosts is less than this setting, outlier + detection via success rate statistics is not performed for any host in the + cluster. Defaults to 5. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 5 + - name: 'successRateRequestVolume' + type: Integer + description: | + The minimum number of total requests that must be collected in one interval (as + defined by the interval duration above) to include this host in success rate + based outlier detection. If the volume is lower than this setting, outlier + detection via success rate statistics is not performed for that host. Defaults + to 100. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 100 + - name: 'successRateStdevFactor' + type: Integer + description: | + This factor is used to determine the ejection threshold for success rate outlier + ejection. The ejection threshold is the difference between the mean success + rate, and the product of this factor and the standard deviation of the mean + success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided + by a thousand to get a double. That is, if the desired factor is 1.9, the + runtime value should be 1900. Defaults to 1900. + # 'port' is deprecated + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 1900 + - name: 'portName' + type: String + description: | + Name of backend port. The same name should appear in the instance + groups referenced by this service. Required when the load balancing + scheme is EXTERNAL. + default_from_api: true + - name: 'protocol' + type: Enum + description: | + The protocol this BackendService uses to communicate with backends. + The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API. **NOTE**: With protocol “UNSPECIFIED”, + the backend service can be used by Layer 4 Internal Load Balancing or Network Load Balancing + with TCP/UDP/L3_DEFAULT Forwarding Rule protocol. + default_from_api: true + enum_values: + - 'HTTP' + - 'HTTPS' + - 'HTTP2' + - 'TCP' + - 'SSL' + - 'GRPC' + - 'UNSPECIFIED' + - name: 'securityPolicy' + type: String + description: | + The security policy associated with this backend service. + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'edgeSecurityPolicy' + type: String + description: | + The resource URL for the edge security policy associated with this backend service. + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'securitySettings' + type: NestedObject + description: | + The security settings that apply to this backend service. This field is applicable to either + a regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, and + load_balancing_scheme set to INTERNAL_MANAGED; or a global backend service with the + load_balancing_scheme set to INTERNAL_SELF_MANAGED. + properties: + - name: 'clientTlsPolicy' + type: ResourceRef + description: | + ClientTlsPolicy is a resource that specifies how a client should authenticate + connections to backends of a service. This resource itself does not affect + configuration unless it is attached to a backend service resource. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' + - name: 'subjectAltNames' + type: Array + description: | + A list of alternate names to verify the subject identity in the certificate. + If specified, the client will verify that the server certificate's subject + alt name matches one of the specified values. + required: true + item_type: + type: String + - name: 'sessionAffinity' + type: Enum + description: | + Type of session affinity to use. The default is NONE. Session affinity is + not applicable if the protocol is UDP. + default_from_api: true + enum_values: + - 'NONE' + - 'CLIENT_IP' + - 'CLIENT_IP_PORT_PROTO' + - 'CLIENT_IP_PROTO' + - 'GENERATED_COOKIE' + - 'HEADER_FIELD' + - 'HTTP_COOKIE' + - name: 'timeoutSec' + type: Integer + description: | + The backend service timeout has a different meaning depending on the type of load balancer. + For more information see, [Backend service settings](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + The default is 30 seconds. + The full range of timeout values allowed goes from 1 through 2,147,483,647 seconds. + default_from_api: true + - name: 'logConfig' + type: NestedObject + description: | + This field denotes the logging options for the load balancer traffic served by this backend service. + If logging is enabled, logs will be exported to Stackdriver. + default_from_api: true + properties: + - name: 'enable' + type: Boolean + description: | + Whether to enable logging for the load balancer traffic served by this backend service. + send_empty_value: true + at_least_one_of: + - 'log_config.0.enable' + - 'log_config.0.sample_rate' + - name: 'sampleRate' + type: Double + description: | + This field can only be specified if logging is enabled for this backend service. The value of + the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer + where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. + The default value is 1.0. + at_least_one_of: + - 'log_config.0.enable' + - 'log_config.0.sample_rate' + diff_suppress_func: 'suppressWhenDisabled' + default_value: 1.0 + - name: 'serviceLbPolicy' + type: String + description: | + URL to networkservices.ServiceLbPolicy resource. + Can only be set if load balancing scheme is EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED or INTERNAL_SELF_MANAGED and the scope is global. diff --git a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml new file mode 100644 index 000000000000..b12dcdc74bb0 --- /dev/null +++ b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml @@ -0,0 +1,98 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BackendServiceSignedUrlKey' +kind: 'compute#BackendServiceSignedUrlKey' +description: | + A key for signing Cloud CDN signed URLs for Backend Services. +references: + guides: + 'Using Signed URLs': 'https://cloud.google.com/cdn/docs/using-signed-urls/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/backendServices' +docs: +base_url: 'projects/{{project}}/global/backendServices/{{backend_service}}' +self_link: 'projects/{{project}}/global/backendServices/{{backend_service}}' +create_url: 'projects/{{project}}/global/backendServices/{{backend_service}}/addSignedUrlKey' +delete_url: 'projects/{{project}}/global/backendServices/{{backend_service}}/deleteSignedUrlKey?keyName={{name}}' +delete_verb: 'POST' +immutable: true +mutex: signedUrlKey/{{project}}/backendServices/{{backend_service}}/ +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - name +nested_query: + keys: + - cdnPolicy + - signedUrlKeyNames + is_list_of_ids: true + modify_by_patch: false +custom_code: +exclude_tgc: true +examples: + - name: 'backend_service_signed_url_key' + primary_resource_id: 'backend_key' + vars: + key_name: 'test-key' + backend_name: 'my-backend-service' + igm_name: 'my-webservers' + external_providers: ["random", "time"] + skip_test: true +parameters: + - name: 'backendService' + type: ResourceRef + description: | + The backend service this signed URL key belongs. + required: true + immutable: true + ignore_read: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'BackendService' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the signed URL key. + api_name: keyName + required: true + immutable: true + validation: + regex: '^(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)$' + - name: 'keyValue' + type: String + description: | + 128-bit key value used for signing the URL. The key value must be a + valid RFC 4648 Section 5 base64url encoded string. + required: true + immutable: true + ignore_read: true + sensitive: true diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml new file mode 100644 index 000000000000..1a1b1d207055 --- /dev/null +++ b/mmv1/products/compute/go_Disk.yaml @@ -0,0 +1,504 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Disk' +kind: 'compute#disk' +description: | + Persistent disks are durable storage devices that function similarly to + the physical disks in a desktop or a server. Compute Engine manages the + hardware behind these devices to ensure data redundancy and optimize + performance for you. Persistent disks are available as either standard + hard disk drives (HDD) or solid-state drives (SSD). + + Persistent disks are located independently from your virtual machine + instances, so you can detach or move persistent disks to keep your data + even after you delete your instances. Persistent disk performance scales + automatically with size, so you can resize your existing persistent disks + or add more persistent disks to an instance to meet your performance and + storage space requirements. + + Add a persistent disk to your instance when you need reliable and + affordable storage with consistent performance characteristics. +references: + guides: + 'Adding a persistent disk': 'https://cloud.google.com/compute/docs/disks/add-persistent-disk' + api: 'https://cloud.google.com/compute/docs/reference/v1/disks' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/disks' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + parent_resource_attribute: 'name' + base_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}' + import_format: + - 'projects/{{project}}/zones/{{zone}}/disks/{{name}}' + - '{{name}}' +custom_code: + constants: 'templates/terraform/constants/go/disk.tmpl' + encoder: 'templates/terraform/encoders/go/disk.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/hyper_disk.go.tmpl' + decoder: 'templates/terraform/decoders/go/disk.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/detach_disk.tmpl' +custom_diff: + - 'customdiff.ForceNewIfChange("size", IsDiskShrinkage)' + - 'hyperDiskIopsUpdateDiffSupress' + - 'tpgresource.SetLabelsDiff' +examples: + - name: 'disk_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-test-disk%s", context["random_suffix"])' + vars: + disk_name: 'test-disk' + - name: 'disk_async' + primary_resource_id: 'primary' + primary_resource_name: 'fmt.Sprintf("tf-test-test-disk%s", context["random_suffix"])' + vars: + disk_name: 'async-test-disk' + secondary_disk_name: 'async-secondary-test-disk' + - name: 'disk_features' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-test-disk%s", context["random_suffix"])' + vars: + disk_name: 'test-disk-features' +parameters: + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the disk resides.' + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' + - name: 'sourceImageEncryptionKey' + type: NestedObject + description: | + The customer-supplied encryption key of the source image. Required if + the source image is protected by a customer-supplied encryption key. + immutable: true + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + output: true + - name: 'kmsKeySelfLink' + type: String + description: | + The self link of the encryption key used to encrypt the disk. Also called KmsKeyName + in the cloud console. Your project's Compute Engine System service account + (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have + `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys + api_name: kmsKeyName + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account used for the encryption request for the given KMS key. + If absent, the Compute Engine Service Agent service account is used. + - name: 'sourceImageId' + type: String + description: | + The ID value of the image used to create this disk. This value + identifies the exact image that was used to create this persistent + disk. For example, if you created the persistent disk from an image + that was later deleted and recreated under the same name, the source + image ID would identify the exact version of the image that was used. + output: true + - name: 'diskEncryptionKey' + type: NestedObject + description: | + Encrypts the disk using a customer-supplied encryption key. + + After you encrypt a disk with a customer-supplied key, you must + provide the same key if you use the disk later (e.g. to create a disk + snapshot or an image, or to attach the disk to a virtual machine). + + Customer-supplied encryption keys do not protect access to metadata of + the disk. + + If you do not provide an encryption key when creating the disk, then + the disk will be encrypted using an automatically generated key and + you do not need to provide a key to use the disk later. + immutable: true + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + sensitive: true + - name: 'rsaEncryptedKey' + type: String + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped 2048-bit + customer-supplied encryption key to either encrypt or decrypt + this resource. You can provide either the rawKey or the rsaEncryptedKey. + sensitive: true + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + output: true + - name: 'kmsKeySelfLink' + type: String + description: | + The self link of the encryption key used to encrypt the disk. Also called KmsKeyName + in the cloud console. Your project's Compute Engine System service account + (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have + `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys + api_name: kmsKeyName + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account used for the encryption request for the given KMS key. + If absent, the Compute Engine Service Agent service account is used. + - name: 'snapshot' + type: ResourceRef + description: | + The source snapshot used to create this disk. You can provide this as + a partial or full URL to the resource. If the snapshot is in another + project than this disk, you must supply a full URL. For example, the + following are valid values: + + * `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot` + * `projects/project/global/snapshots/snapshot` + * `global/snapshots/snapshot` + * `snapshot` + api_name: sourceSnapshot + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Snapshot' + imports: 'selfLink' + - name: 'sourceSnapshotEncryptionKey' + type: NestedObject + description: | + The customer-supplied encryption key of the source snapshot. Required + if the source snapshot is protected by a customer-supplied encryption + key. + immutable: true + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + # TODO(chrisst) Change to ResourceRef once KMS is in Magic Modules + - name: 'kmsKeySelfLink' + type: String + description: | + The self link of the encryption key used to encrypt the disk. Also called KmsKeyName + in the cloud console. Your project's Compute Engine System service account + (`service-{{PROJECT_NUMBER}}@compute-system.iam.gserviceaccount.com`) must have + `roles/cloudkms.cryptoKeyEncrypterDecrypter` to use this feature. + See https://cloud.google.com/compute/docs/disks/customer-managed-encryption#encrypt_a_new_persistent_disk_with_your_own_keys + api_name: kmsKeyName + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + output: true + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account used for the encryption request for the given KMS key. + If absent, the Compute Engine Service Agent service account is used. + - name: 'sourceSnapshotId' + type: String + description: | + The unique ID of the snapshot used to create this disk. This value + identifies the exact snapshot that was used to create this persistent + disk. For example, if you created the persistent disk from a snapshot + that was later deleted and recreated under the same name, the source + snapshot ID would identify the exact version of the snapshot that was + used. + output: true +properties: + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'lastAttachTimestamp' + type: Time + description: 'Last attach timestamp in RFC3339 text format.' + output: true + - name: 'lastDetachTimestamp' + type: Time + description: 'Last detach timestamp in RFC3339 text format.' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this disk. A list of key->value pairs. + immutable: false + update_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}/setLabels' + update_verb: 'POST' + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'size' + type: Integer + description: | + Size of the persistent disk, specified in GB. You can specify this + field when creating a persistent disk using the `image` or + `snapshot` parameter, or specify it alone to create an empty + persistent disk. + + If you specify this field along with `image` or `snapshot`, + the value must not be less than the size of the image + or the size of the snapshot. + + ~>**NOTE** If you change the size, Terraform updates the disk size + if upsizing is detected but recreates the disk if downsizing is requested. + You can add `lifecycle.prevent_destroy` in the config to prevent destroying + and recreating. + api_name: sizeGb + default_from_api: true + update_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}/resize' + update_verb: 'POST' + - name: 'users' + type: Array + description: | + Links to the users of the disk (attached instances) in form: + project/zones/zone/instances/instance + output: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'user' + type: ResourceRef + description: 'A reference to a user of this disk' + resource: 'Instance' + imports: 'selfLink' + - name: 'physicalBlockSizeBytes' + type: Integer + description: | + Physical block size of the persistent disk, in bytes. If not present + in a request, a default value is used. Currently supported sizes + are 4096 and 16384, other sizes may be added in the future. + If an unsupported value is requested, the error message will list + the supported values for the caller's project. + default_from_api: true + - name: 'interface' + type: String + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. + min_version: 'beta' + url_param_only: true + diff_suppress_func: 'tpgresource.AlwaysDiffSuppress' + default_value: SCSI + deprecation_message: '`interface` is deprecated and will be removed in a future major release. This field is no longer used and can be safely removed from your configurations; disk interfaces are automatically determined on attachment.' + - name: 'sourceDisk' + type: String + description: | + The source disk used to create this disk. You can provide this as a partial or full URL to the resource. + For example, the following are valid values: + + * https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk} + * https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk} + * projects/{project}/zones/{zone}/disks/{disk} + * projects/{project}/regions/{region}/disks/{disk} + * zones/{zone}/disks/{disk} + * regions/{region}/disks/{disk} + diff_suppress_func: 'sourceDiskDiffSupress' + - name: 'sourceDiskId' + type: String + description: | + The ID value of the disk used to create this image. This value may + be used to determine whether the image was taken from the current + or a previous instance of a given disk name. + output: true + - name: 'DiskId' + type: String + description: | + The unique identifier for the resource. This identifier is defined by the server. + api_name: id + output: true + - name: 'type' + type: ResourceRef + description: | + URL of the disk type resource describing which disk type to use to + create the disk. Provide this when creating the disk. + diff_suppress_func: 'tpgresource.CompareResourceNames' + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + default_value: pd-standard + resource: 'DiskType' + imports: 'selfLink' + - name: 'image' + type: String + description: | + The image from which to initialize this disk. This can be + one of: the image's `self_link`, `projects/{project}/global/images/{image}`, + `projects/{project}/global/images/family/{family}`, `global/images/{image}`, + `global/images/family/{family}`, `family/{family}`, `{project}/{family}`, + `{project}/{image}`, `{family}`, or `{image}`. If referred by family, the + images names must include the family name. If they don't, use the + [google_compute_image data source](/docs/providers/google/d/compute_image.html). + For instance, the image `centos-6-v20180104` includes its family name `centos-6`. + These images can be referred by family name here. + api_name: sourceImage + immutable: true + diff_suppress_func: 'DiskImageDiffSuppress' + - name: 'resourcePolicies' + type: Array + description: | + Resource policies applied to this disk for automatic snapshot creations. + + ~>**NOTE** This value does not support updating the + resource policy, as resource policies can not be updated more than + one at a time. Use + [`google_compute_disk_resource_policy_attachment`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_disk_resource_policy_attachment) + to allow for updating the resource policy attached to the disk. + min_version: 'beta' + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'resourcePolicy' + type: ResourceRef + description: + 'A resource policy applied to this disk for automatic snapshot + creations.' + resource: 'ResourcePolicy' + imports: 'selfLink' + - name: 'enableConfidentialCompute' + type: Boolean + description: | + Whether this disk is using confidential compute mode. + Note: Only supported on hyperdisk skus, disk_encryption_key is required when setting to true + required: false + default_from_api: true + - name: 'multiWriter' + type: Boolean + description: | + Indicates whether or not the disk can be read/write attached to more than one instance. + min_version: 'beta' + - name: 'provisionedIops' + type: Integer + description: | + Indicates how many IOPS must be provisioned for the disk. + Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk + allows for an update of IOPS every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it + required: false + default_from_api: true + update_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}?paths=provisionedIops' + update_verb: 'PATCH' + - name: 'provisionedThroughput' + type: Integer + description: | + Indicates how much Throughput must be provisioned for the disk. + Note: Updating currently is only supported by hyperdisk skus without the need to delete and recreate the disk, hyperdisk + allows for an update of Throughput every 4 hours. To update your hyperdisk more frequently, you'll need to manually delete and recreate it + default_from_api: true + update_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}?paths=provisionedThroughput' + update_verb: 'PATCH' + - name: 'asyncPrimaryDisk' + type: NestedObject + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + properties: + - name: 'disk' + type: String + description: | + Primary disk for asynchronous disk replication. + required: true + - name: 'guestOsFeatures' + type: Array + description: | + A list of features to enable on the guest operating system. + Applicable only for bootable disks. + is_set: true + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: | + The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. + required: true + - name: 'licenses' + type: Array + description: Any applicable license URI. + immutable: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'license' + type: ResourceRef + description: 'An applicable license URI' + resource: 'License' + imports: 'selfLink' + - name: 'storagePool' + type: String + description: | + The URL of the storage pool in which the new disk is created. + For example: + * https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/storagePools/{storagePool} + * /projects/{project}/zones/{zone}/storagePools/{storagePool} + required: false + immutable: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' diff --git a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml new file mode 100644 index 000000000000..72cb814abaa1 --- /dev/null +++ b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml @@ -0,0 +1,88 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DiskResourcePolicyAttachment' +description: | + Adds existing resource policies to a disk. You can only add one policy + which will be applied to this disk for scheduling snapshot creation. + + ~> **Note:** This resource does not support regional disks (`google_compute_region_disk`). For regional disks, please refer to [`google_compute_region_disk_resource_policy_attachment`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_region_disk_resource_policy_attachment) +docs: +id_format: '{{project}}/{{zone}}/{{disk}}/{{name}}' +base_url: 'projects/{{project}}/zones/{{zone}}/disks/{{disk}}' +self_link: 'projects/{{project}}/zones/{{zone}}/disks/{{disk}}' +create_url: 'projects/{{project}}/zones/{{zone}}/disks/{{disk}}/addResourcePolicies' +delete_url: 'projects/{{project}}/zones/{{zone}}/disks/{{disk}}/removeResourcePolicies' +delete_verb: 'POST' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - name +nested_query: + keys: + - resourcePolicies + is_list_of_ids: true + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_disk_resource_policies_attachment.go.tmpl' + decoder: 'templates/terraform/decoders/go/compute_disk_resource_policies_attachment.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_disk_resource_policies_attachment.go.tmpl' +examples: + - name: 'disk_resource_policy_attachment_basic' + primary_resource_id: 'attachment' + vars: + disk_name: 'my-disk' + policy_name: 'my-resource-policy' +parameters: + - name: 'disk' + type: ResourceRef + description: | + The name of the disk in which the resource policies are attached to. + url_param_only: true + required: true + resource: 'Disk' + imports: 'name' + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the disk resides.' + url_param_only: true + required: false + ignore_read: true + default_from_api: true + resource: 'Zone' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The resource policy to be attached to the disk for scheduling snapshot + creation. Do not specify the self link. + required: true diff --git a/mmv1/products/compute/go_DiskType.yaml b/mmv1/products/compute/go_DiskType.yaml new file mode 100644 index 000000000000..ca11c8ebb5d0 --- /dev/null +++ b/mmv1/products/compute/go_DiskType.yaml @@ -0,0 +1,114 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DiskType' +kind: 'compute#diskType' +description: | + Represents a DiskType resource. A DiskType resource represents the type + of disk to use, such as a pd-ssd, pd-balanced or pd-standard. To reference a disk + type, use the disk type's full or partial URL. +# TODO(nelsonjr): Temporarily make DiskType virtual so no tests gets +# triggered for create. Implement support for read only objects, and delete +# the virtual tag +# | readonly: true +exclude: true +readonly: true +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/diskTypes' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +collection_url_key: 'items' +custom_code: +parameters: + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the disk type resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'defaultDiskSizeGb' + type: Integer + description: 'Server-defined default disk size in GB.' + output: true + - name: 'deprecated' + type: NestedObject + description: 'The deprecation status associated with this disk type.' + output: true + properties: + - name: 'deleted' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to DELETED. + output: true + - name: 'deprecated' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to DEPRECATED. + output: true + - name: 'obsolete' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to OBSOLETE. + output: true + - name: 'replacement' + type: String + description: | + The URL of the suggested replacement for a deprecated resource. The + suggested replacement resource must be the same kind of resource as + the deprecated resource. + output: true + - name: 'state' + type: Enum + description: | + The deprecation state of this resource. This can be DEPRECATED, + OBSOLETE, or DELETED. Operations which create a new resource using a + DEPRECATED resource will return successfully, but with a warning + indicating the deprecated resource and recommending its replacement. + Operations which use OBSOLETE or DELETED resources will be rejected + and result in an error. + output: true + enum_values: + - 'DEPRECATED' + - 'OBSOLETE' + - 'DELETED' + - name: 'description' + type: String + description: 'An optional description of this resource.' + output: true + - name: 'id' + type: Integer + description: 'The unique identifier for the resource.' + output: true + - name: 'name' + type: String + description: 'Name of the resource.' + - name: 'validDiskSize' + type: String + description: | + An optional textual description of the valid disk size, such as + "10GB-10TB". + output: true diff --git a/mmv1/products/compute/go_ExternalVpnGateway.yaml b/mmv1/products/compute/go_ExternalVpnGateway.yaml new file mode 100644 index 000000000000..b31f58b110ff --- /dev/null +++ b/mmv1/products/compute/go_ExternalVpnGateway.yaml @@ -0,0 +1,123 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ExternalVpnGateway' +kind: 'compute#externalVpnGateway' +description: | + Represents a VPN gateway managed outside of GCP. +references: + guides: + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/externalVpnGateways' +docs: +base_url: 'projects/{{project}}/global/externalVpnGateways' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'external_vpn_gateway' + primary_resource_id: 'external_gateway' + vars: + ha_vpn_gateway_name: 'ha-vpn' + network_name: 'network-1' + external_gateway_name: 'external-gateway' + global_address_name: 'global-address' + router_name: 'ha-vpn-router1' + skip_vcr: true + - name: 'only_external_vpn_gateway_full' + primary_resource_id: 'external_gateway' + vars: + external_gateway_name: 'external-gateway' + skip_docs: true +parameters: +properties: + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'labels' + type: KeyValueLabels + description: 'Labels for the external VPN gateway resource.' + immutable: false + update_url: 'projects/{{project}}/global/externalVpnGateways/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/global/externalVpnGateways/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + - name: 'redundancyType' + type: Enum + description: | + Indicates the redundancy type of this external VPN gateway + enum_values: + - 'FOUR_IPS_REDUNDANCY' + - 'SINGLE_IP_INTERNALLY_REDUNDANT' + - 'TWO_IPS_REDUNDANCY' + - name: 'interface' + type: Array + description: | + A list of interfaces on this external VPN gateway. + api_name: interfaces + item_type: + type: NestedObject + properties: + - name: 'id' + type: Integer + description: | + The numeric ID for this interface. Allowed values are based on the redundancy type + of this external VPN gateway + * `0 - SINGLE_IP_INTERNALLY_REDUNDANT` + * `0, 1 - TWO_IPS_REDUNDANCY` + * `0, 1, 2, 3 - FOUR_IPS_REDUNDANCY` + send_empty_value: true + - name: 'ipAddress' + type: String + description: | + IP address of the interface in the external VPN gateway. + Only IPv4 is supported. This IP address can be either from + your on-premise gateway or another Cloud provider's VPN gateway, + it cannot be an IP address from Google Compute Engine. diff --git a/mmv1/products/compute/go_Firewall.yaml b/mmv1/products/compute/go_Firewall.yaml new file mode 100644 index 000000000000..8d820debf8d2 --- /dev/null +++ b/mmv1/products/compute/go_Firewall.yaml @@ -0,0 +1,331 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Firewall' +kind: 'compute#firewall' +description: | + Each network has its own firewall controlling access to and from the + instances. + + All traffic to instances, even from other instances, is blocked by the + firewall unless firewall rules are created to allow it. + + The default network has automatically created firewall rules that are + shown in default firewall rules. No manually created network has + automatically created firewall rules except for a default "allow" rule for + outgoing traffic and a default "deny" for incoming traffic. For all + networks except the default network, you must create any firewall rules + you need. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/vpc/docs/firewalls' + api: 'https://cloud.google.com/compute/docs/reference/v1/firewalls' +docs: + optional_properties: '* `enable_logging` - (Optional, Deprecated) This field denotes whether to enable logging for a particular firewall rule. +If logging is enabled, logs will be exported to Stackdriver. Deprecated in favor of `log_config` +' +base_url: 'projects/{{project}}/global/firewalls' +has_self_link: true +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/firewall.tmpl' + constants: 'templates/terraform/constants/go/firewall.tmpl' +custom_diff: + - 'resourceComputeFirewallEnableLoggingCustomizeDiff' + - 'resourceComputeFirewallSourceFieldsCustomizeDiff' +schema_version: 1 +migrate_state: 'resourceComputeFirewallMigrateState' +examples: + - name: 'firewall_basic' + primary_resource_id: 'default' + vars: + firewall_name: 'test-firewall' + network_name: 'test-network' + - name: 'firewall_with_target_tags' + primary_resource_id: 'rules' + vars: + firewall_name: 'my-firewall-rule' + test_env_vars: + project: 'PROJECT_NAME' +parameters: +properties: + - name: 'allow' + type: Array + description: | + The list of ALLOW rules specified by this firewall. Each rule + specifies a protocol and port-range tuple that describes a permitted + connection. + api_name: allowed + is_set: true + exactly_one_of: + - 'allow' + - 'deny' + set_hash_func: 'resourceComputeFirewallRuleHash' + item_type: + type: NestedObject + properties: + - name: 'protocol' + type: String + description: | + The IP protocol to which this rule applies. The protocol type is + required when creating a firewall rule. This value can either be + one of the following well known protocol strings (tcp, udp, + icmp, esp, ah, sctp, ipip, all), or the IP protocol number. + api_name: IPProtocol + required: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + - name: 'ports' + type: Array + description: | + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + item_type: + type: String + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'deny' + type: Array + description: | + The list of DENY rules specified by this firewall. Each rule specifies + a protocol and port-range tuple that describes a denied connection. + api_name: denied + is_set: true + exactly_one_of: + - 'allow' + - 'deny' + set_hash_func: 'resourceComputeFirewallRuleHash' + item_type: + type: NestedObject + properties: + - name: 'protocol' + type: String + description: | + The IP protocol to which this rule applies. The protocol type is + required when creating a firewall rule. This value can either be + one of the following well known protocol strings (tcp, udp, + icmp, esp, ah, sctp, ipip, all), or the IP protocol number. + api_name: IPProtocol + required: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + - name: 'ports' + type: Array + description: | + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + item_type: + type: String + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'destinationRanges' + type: Array + description: | + If destination ranges are specified, the firewall will apply only to + traffic that has destination IP address in these ranges. These ranges + must be expressed in CIDR format. IPv4 or IPv6 ranges are supported. + is_set: true + default_from_api: true + item_type: + type: String + - name: 'direction' + type: Enum + description: | + Direction of traffic to which this firewall applies; default is + INGRESS. Note: For INGRESS traffic, one of `source_ranges`, + `source_tags` or `source_service_accounts` is required. + immutable: true + default_from_api: true + enum_values: + - 'INGRESS' + - 'EGRESS' + - name: 'disabled' + type: Boolean + description: | + Denotes whether the firewall rule is disabled, i.e not applied to the + network it is associated with. When set to true, the firewall rule is + not enforced and the network behaves as if it did not exist. If this + is unspecified, the firewall rule will be enabled. + send_empty_value: true + - name: 'logConfig' + type: NestedObject + description: | + This field denotes the logging options for a particular firewall rule. + If defined, logging is enabled, and logs will be exported to Cloud Logging. + send_empty_value: true + diff_suppress_func: 'diffSuppressEnableLogging' + custom_flatten: 'templates/terraform/custom_flatten/go/firewall_log_config.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/firewall_log_config.go.tmpl' + properties: + - name: 'metadata' + type: Enum + description: | + This field denotes whether to include or exclude metadata for firewall logs. + required: true + enum_values: + - 'EXCLUDE_ALL_METADATA' + - 'INCLUDE_ALL_METADATA' + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + validation: + function: 'verify.ValidateGCEName' + - name: 'network' + type: ResourceRef + description: | + The name or self_link of the network to attach this firewall to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'priority' + type: Integer + description: | + Priority for this rule. This is an integer between 0 and 65535, both + inclusive. When not specified, the value assumed is 1000. Relative + priorities determine precedence of conflicting rules. Lower value of + priority implies higher precedence (eg, a rule with priority 0 has + higher precedence than a rule with priority 1). DENY rules take + precedence over ALLOW rules having equal priority. + send_empty_value: true + validation: + function: 'validation.IntBetween(0, 65535)' + default_value: 1000 + - name: 'sourceRanges' + type: Array + description: | + If source ranges are specified, the firewall will apply only to + traffic that has source IP address in these ranges. These ranges must + be expressed in CIDR format. One or both of sourceRanges and + sourceTags may be set. If both properties are set, the firewall will + apply to traffic that has source IP address within sourceRanges OR the + source IP that belongs to a tag listed in the sourceTags property. The + connection does not need to match both properties for the firewall to + apply. IPv4 or IPv6 ranges are supported. For INGRESS traffic, one of + `source_ranges`, `source_tags` or `source_service_accounts` is required. + is_set: true + diff_suppress_func: 'diffSuppressSourceRanges' + item_type: + type: String + - name: 'sourceServiceAccounts' + type: Array + description: | + If source service accounts are specified, the firewall will apply only + to traffic originating from an instance with a service account in this + list. Source service accounts cannot be used to control traffic to an + instance's external IP address because service accounts are associated + with an instance, not an IP address. sourceRanges can be set at the + same time as sourceServiceAccounts. If both are set, the firewall will + apply to traffic that has source IP address within sourceRanges OR the + source IP belongs to an instance with service account listed in + sourceServiceAccount. The connection does not need to match both + properties for the firewall to apply. sourceServiceAccounts cannot be + used at the same time as sourceTags or targetTags. For INGRESS traffic, + one of `source_ranges`, `source_tags` or `source_service_accounts` is required. + is_set: true + conflicts: + - source_tags + - target_tags + item_type: + type: String + max_size: 10 + - name: 'sourceTags' + type: Array + description: | + If source tags are specified, the firewall will apply only to traffic + with source IP that belongs to a tag listed in source tags. Source + tags cannot be used to control traffic to an instance's external IP + address. Because tags are associated with an instance, not an IP + address. One or both of sourceRanges and sourceTags may be set. If + both properties are set, the firewall will apply to traffic that has + source IP address within sourceRanges OR the source IP that belongs to + a tag listed in the sourceTags property. The connection does not need + to match both properties for the firewall to apply. For INGRESS traffic, + one of `source_ranges`, `source_tags` or `source_service_accounts` is required. + is_set: true + conflicts: + - source_service_accounts + - target_service_accounts + item_type: + type: String + - name: 'targetServiceAccounts' + type: Array + description: | + A list of service accounts indicating sets of instances located in the + network that may make network connections as specified in allowed[]. + targetServiceAccounts cannot be used at the same time as targetTags or + sourceTags. If neither targetServiceAccounts nor targetTags are + specified, the firewall rule applies to all instances on the specified + network. + is_set: true + conflicts: + - source_tags + - target_tags + item_type: + type: String + max_size: 10 + - name: 'targetTags' + type: Array + description: | + A list of instance tags indicating sets of instances located in the + network that may make network connections as specified in allowed[]. + If no targetTags are specified, the firewall rule applies to all + instances on the specified network. + is_set: true + conflicts: + - source_service_accounts + - target_service_accounts + item_type: + type: String diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml new file mode 100644 index 000000000000..ef174df9b6f4 --- /dev/null +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -0,0 +1,652 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ForwardingRule' +kind: 'compute#forwardingRule' +description: | + A ForwardingRule resource. A ForwardingRule resource specifies which pool + of target virtual machines to forward a packet to if it matches the given + [IPAddress, IPProtocol, portRange] tuple. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules' + api: 'https://cloud.google.com/compute/docs/reference/v1/forwardingRules' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/forwardingRules' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/compute_forwarding_rule.go.tmpl' + post_create: 'templates/terraform/post_create/go/labels.tmpl' +custom_diff: + - 'forwardingRuleCustomizeDiff' + - 'tpgresource.SetLabelsDiff' +legacy_long_form_project: true +examples: + - name: 'internal_http_lb_with_mig_backend' + primary_resource_id: 'google_compute_forwarding_rule' + min_version: 'beta' + vars: + ilb_network_name: 'l7-ilb-network' + proxy_subnet_name: 'l7-ilb-proxy-subnet' + backend_subnet_name: 'l7-ilb-subnet' + forwarding_rule_name: 'l7-ilb-forwarding-rule' + target_http_proxy_name: 'l7-ilb-target-http-proxy' + regional_url_map_name: 'l7-ilb-regional-url-map' + backend_service_name: 'l7-ilb-backend-subnet' + mig_template_name: 'l7-ilb-mig-template' + hc_name: 'l7-ilb-hc' + mig_name: 'l7-ilb-mig1' + fw_allow_iap_hc_name: 'l7-ilb-fw-allow-iap-hc' + fw_allow_ilb_to_backends_name: 'l7-ilb-fw-allow-ilb-to-backends' + vm_test_name: 'l7-ilb-test-vm' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'internal_tcp_udp_lb_with_mig_backend' + primary_resource_id: 'google_compute_forwarding_rule' + min_version: 'beta' + vars: + ilb_network_name: 'l4-ilb-network' + backend_subnet_name: 'l4-ilb-subnet' + forwarding_rule_name: 'l4-ilb-forwarding-rule' + backend_service_name: 'l4-ilb-backend-subnet' + mig_template_name: 'l4-ilb-mig-template' + hc_name: 'l4-ilb-hc' + mig_name: 'l4-ilb-mig1' + fw_allow_hc_name: 'l4-ilb-fw-allow-hc' + fw_allow_ilb_to_backends_name: 'l4-ilb-fw-allow-ilb-to-backends' + fw_allow_ilb_ssh_name: 'l4-ilb-fw-ssh' + vm_test_name: 'l4-ilb-test-vm' + - name: 'forwarding_rule_externallb' + primary_resource_id: 'default' + min_version: 'beta' + vars: + forwarding_rule_name: 'website-forwarding-rule' + backend_name: 'website-backend' + network_name: 'website-net' + ignore_read_extra: + - 'port_range' + - name: 'forwarding_rule_global_internallb' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'website-forwarding-rule' + backend_name: 'website-backend' + network_name: 'website-net' + - name: 'forwarding_rule_basic' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'website-forwarding-rule' + target_pool_name: 'website-target-pool' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'forwarding_rule_l3_default' + primary_resource_id: 'fwd_rule' + min_version: 'beta' + vars: + forwarding_rule_name: 'l3-forwarding-rule' + service_name: 'service' + health_check_name: 'health-check' + - name: 'forwarding_rule_internallb' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'website-forwarding-rule' + backend_name: 'website-backend' + network_name: 'website-net' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'forwarding_rule_http_lb' + primary_resource_id: 'default' + min_version: 'beta' + vars: + forwarding_rule_name: 'website-forwarding-rule' + region_target_http_proxy_name: 'website-proxy' + region_url_map_name: 'website-map' + region_backend_service_name: 'website-backend' + region_health_check_name: 'website-hc' + rigm_name: 'website-rigm' + network_name: 'website-net' + fw_name: 'website-fw' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'forwarding_rule_regional_http_xlb' + primary_resource_id: 'default' + min_version: 'beta' + vars: + forwarding_rule_name: 'website-forwarding-rule' + region_target_http_proxy_name: 'website-proxy' + region_url_map_name: 'website-map' + region_backend_service_name: 'website-backend' + region_health_check_name: 'website-hc' + rigm_name: 'website-rigm' + network_name: 'website-net' + fw_name: 'website-fw' + ip_name: 'website-ip' + ignore_read_extra: + - 'port_range' + - 'target' + - 'ip_address' + - name: 'forwarding_rule_vpc_psc' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'psc-endpoint' + consumer_network_name: 'consumer-net' + ip_name: 'website-ip' + producer_network_name: 'producer-net' + producer_psc_network_name: 'producer-psc-net' + service_attachment_name: 'producer-service' + producer_forwarding_rule_name: 'producer-forwarding-rule' + producer_backend_name: 'producer-service-backend' + producer_healthcheck_name: 'producer-service-health-check' + ignore_read_extra: + - 'port_range' + - 'target' + - 'ip_address' + - name: 'forwarding_rule_vpc_psc_no_automate_dns' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'psc-endpoint' + consumer_network_name: 'consumer-net' + ip_name: 'website-ip' + producer_network_name: 'producer-net' + producer_psc_network_name: 'producer-psc-net' + service_attachment_name: 'producer-service' + producer_forwarding_rule_name: 'producer-forwarding-rule' + producer_backend_name: 'producer-service-backend' + producer_healthcheck_name: 'producer-service-health-check' + ignore_read_extra: + - 'port_range' + - 'target' + - 'ip_address' + - name: 'forwarding_rule_regional_steering' + primary_resource_id: 'steering' + vars: + forwarding_rule_name: 'steering-rule' + ip_name: 'website-ip' + backend_name: 'service-backend' + external_forwarding_rule_name: 'external-forwarding-rule' + - name: 'forwarding_rule_internallb_ipv6' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'ilb-ipv6-forwarding-rule' + backend_name: 'ilb-ipv6-backend' + network_name: 'net-ipv6' + subnet_name: 'subnet-internal-ipv6' + ignore_read_extra: + - 'port_range' + - 'target' +virtual_fields: + - name: 'recreate_closed_psc' + description: + This is used in PSC consumer ForwardingRule to make terraform recreate the ForwardingRule when the status is closed + type: Boolean + default_value: false +parameters: + - name: 'region' + type: ResourceRef + description: | + A reference to the region where the regional forwarding rule resides. + + This field is not applicable to global forwarding rules. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'isMirroringCollector' + type: Boolean + description: | + Indicates whether or not this load balancer can be used as a collector for + packet mirroring. To prevent mirroring loops, instances behind this + load balancer will not have their traffic mirrored even if a + `PacketMirroring` rule applies to them. + + This can only be set to true for load balancers that have their + `loadBalancingScheme` set to `INTERNAL`. + - name: 'pscConnectionId' + type: String + description: 'The PSC connection id of the PSC Forwarding Rule.' + output: true + - name: 'pscConnectionStatus' + type: String + description: + 'The PSC connection status of the PSC Forwarding Rule. Possible values: + `STATUS_UNSPECIFIED`, `PENDING`, `ACCEPTED`, `REJECTED`, `CLOSED`' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + # This is a multi-resource resource reference (Address, GlobalAddress) + - name: 'IPAddress' + type: String + description: | + IP address for which this forwarding rule accepts traffic. When a client + sends traffic to this IP address, the forwarding rule directs the traffic + to the referenced `target` or `backendService`. + + While creating a forwarding rule, specifying an `IPAddress` is + required under the following circumstances: + + * When the `target` is set to `targetGrpcProxy` and + `validateForProxyless` is set to `true`, the + `IPAddress` should be set to `0.0.0.0`. + * When the `target` is a Private Service Connect Google APIs + bundle, you must specify an `IPAddress`. + + + Otherwise, you can optionally specify an IP address that references an + existing static (reserved) IP address resource. When omitted, Google Cloud + assigns an ephemeral IP address. + + Use one of the following formats to specify an IP address while creating a + forwarding rule: + + * IP address number, as in `100.1.2.3` + * IPv6 address range, as in `2600:1234::/96` + * Full resource URL, as in + `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` + * Partial URL or by name, as in: + * `projects/project_id/regions/region/addresses/address-name` + * `regions/region/addresses/address-name` + * `global/addresses/address-name` + * `address-name` + + + The forwarding rule's `target` or `backendService`, + and in most cases, also the `loadBalancingScheme`, determine the + type of IP address that you can use. For detailed information, see + [IP address + specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + + When reading an `IPAddress`, the API always returns the IP + address number. + default_from_api: true + diff_suppress_func: 'tpgresource.InternalIpDiffSuppress' + - name: 'IPProtocol' + type: Enum + description: | + The IP protocol to which this rule applies. + + For protocol forwarding, valid + options are `TCP`, `UDP`, `ESP`, + `AH`, `SCTP`, `ICMP` and + `L3_DEFAULT`. + + The valid IP protocols are different for different load balancing products + as described in [Load balancing + features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends). + + A Forwarding Rule with protocol L3_DEFAULT can attach with target instance or + backend service with UNSPECIFIED protocol. + A forwarding rule with "L3_DEFAULT" IPProtocal cannot be attached to a backend service with TCP or UDP. + default_from_api: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + enum_values: + - 'TCP' + - 'UDP' + - 'ESP' + - 'AH' + - 'SCTP' + - 'ICMP' + - 'L3_DEFAULT' + - name: 'backendService' + type: ResourceRef + description: | + Identifies the backend service to which the forwarding rule sends traffic. + + Required for Internal TCP/UDP Load Balancing and Network Load Balancing; + must be omitted for all other load balancer types. + custom_expand: 'templates/terraform/custom_expand/go/self_link_from_name.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'loadBalancingScheme' + type: Enum + description: | + Specifies the forwarding rule type. + + For more information about forwarding rules, refer to + [Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). + default_value: EXTERNAL + enum_values: + - 'EXTERNAL' + - 'EXTERNAL_MANAGED' + - 'INTERNAL' + - 'INTERNAL_MANAGED' + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with + [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). + + Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + character must be a lowercase letter, and all following characters must + be a dash, lowercase letter, or digit, except the last character, which + cannot be a dash. + + For Private Service Connect forwarding rules that forward traffic to Google + APIs, the forwarding rule name must be a 1-20 characters string with + lowercase letters and numbers and must start with a letter. + required: true + - name: 'network' + type: ResourceRef + description: | + This field is not used for external load balancing. + + For Internal TCP/UDP Load Balancing, this field identifies the network that + the load balanced IP should belong to for this Forwarding Rule. + If the subnetwork is specified, the network of the subnetwork will be used. + If neither subnetwork nor this field is specified, the default network will + be used. + + For Private Service Connect forwarding rules that forward traffic to Google + APIs, a network must be provided. + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'portRange' + type: String + description: | + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `portRange` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: external passthrough + Network Load Balancers, internal and external proxy Network Load + Balancers, internal and external Application Load Balancers, external + protocol forwarding, and Classic VPN. + * Some products have restrictions on what ports can be used. See + [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) + for details. + + For external forwarding rules, two or more forwarding rules cannot use the + same `[IPAddress, IPProtocol]` pair, and cannot have overlapping + `portRange`s. + + For internal forwarding rules within the same VPC network, two or more + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair, and + cannot have overlapping `portRange`s. + + @pattern: \d+(?:-\d+)? + default_from_api: true + diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' + - name: 'ports' + type: Array + description: | + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `ports` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: internal passthrough + Network Load Balancers, backend service-based external passthrough Network + Load Balancers, and internal protocol forwarding. + * You can specify a list of up to five ports by number, separated by + commas. The ports can be contiguous or discontiguous. + + For external forwarding rules, two or more forwarding rules cannot use the + same `[IPAddress, IPProtocol]` pair if they share at least one port + number. + + For internal forwarding rules within the same VPC network, two or more + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair if + they share at least one port number. + + @pattern: \d+(?:-\d+)? + is_set: true + custom_expand: 'templates/terraform/custom_expand/go/set_to_list.tmpl' + item_type: + type: String + max_size: 5 + - name: 'subnetwork' + type: ResourceRef + description: | + This field identifies the subnetwork that the load balanced IP should + belong to for this Forwarding Rule, used in internal load balancing and + network load balancing with IPv6. + + If the network specified is in auto subnet mode, this field is optional. + However, a subnetwork must be specified if the network is in custom subnet + mode or when creating external forwarding rule with IPv6. + # This is a multi-resource resource reference (TargetHttp(s)Proxy, + # TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, + # TargetInstance) + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'target' + type: String + description: | + The URL of the target resource to receive the matched traffic. For + regional forwarding rules, this target must be in the same region as the + forwarding rule. For global forwarding rules, this target must be a global + load balancing resource. + + The forwarded traffic must be of a type appropriate to the target object. + * For load balancers, see the "Target" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + * For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: + * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). + * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). + + + For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setTarget' + update_verb: 'POST' + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + custom_expand: 'templates/terraform/custom_expand/go/self_link_from_name.tmpl' + - name: 'allowGlobalAccess' + type: Boolean + description: | + This field is used along with the `backend_service` field for + internal load balancing or with the `target` field for internal + TargetInstance. + + If the field is set to `TRUE`, clients can access ILB from all + regions. + + Otherwise only allows access from clients in the same region as the + internal load balancer. + send_empty_value: true + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}' + update_verb: 'PATCH' + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this forwarding rule. A list of key->value pairs. + immutable: false + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'allPorts' + type: Boolean + description: | + The `ports`, `portRange`, and `allPorts` fields are mutually exclusive. + Only packets addressed to ports in the specified range will be forwarded + to the backends configured with this forwarding rule. + + The `allPorts` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, SCTP, or + L3_DEFAULT. + * It's applicable only to the following products: internal passthrough + Network Load Balancers, backend service-based external passthrough Network + Load Balancers, and internal and external protocol forwarding. + * Set this field to true to allow packets addressed to any port or packets + lacking destination port information (for example, UDP fragments after the + first fragment) to be forwarded to the backends configured with this + forwarding rule. The L3_DEFAULT protocol requires `allPorts` be set to + true. + - name: 'networkTier' + type: Enum + description: | + This signifies the networking tier used for configuring + this load balancer and can only take the following values: + `PREMIUM`, `STANDARD`. + + For regional ForwardingRule, the valid values are `PREMIUM` and + `STANDARD`. For GlobalForwardingRule, the valid value is + `PREMIUM`. + + If this field is not specified, it is assumed to be `PREMIUM`. + If `IPAddress` is specified, this value must be equal to the + networkTier of the Address. + immutable: true + default_from_api: true + enum_values: + - 'PREMIUM' + - 'STANDARD' + - name: 'serviceDirectoryRegistrations' + type: Array + description: | + Service Directory resources to register this forwarding rule with. + + Currently, only supports a single Service Directory resource. + immutable: true + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'namespace' + type: String + description: | + Service Directory namespace to register the forwarding rule under. + immutable: true + default_from_api: true + - name: 'service' + type: String + description: | + Service Directory service to register the forwarding rule under. + immutable: true + min_size: 0 + max_size: 1 + - name: 'serviceLabel' + type: String + description: | + An optional prefix to the service name for this Forwarding Rule. + If specified, will be the first label of the fully qualified service + name. + + The label must be 1-63 characters long, and comply with RFC1035. + Specifically, the label must be 1-63 characters long and match the + regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + character must be a lowercase letter, and all following characters + must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + + This field is only used for INTERNAL load balancing. + validation: + function: 'verify.ValidateGCEName' + - name: 'serviceName' + type: String + description: | + The internal fully qualified service name for this Forwarding Rule. + + This field is only used for INTERNAL load balancing. + output: true + - name: 'sourceIpRanges' + type: Array + description: + If not empty, this Forwarding Rule will only forward the traffic when the + source IP address matches one of the IP addresses or CIDR ranges set here. + Note that a Forwarding Rule can only have up to 64 source IP ranges, and + this field can only be used with a regional Forwarding Rule whose scheme + is EXTERNAL. Each sourceIpRange entry should be either an IP address (for + example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). + immutable: true + item_type: + type: String + - name: 'baseForwardingRule' + type: String + description: + '[Output Only] The URL for the corresponding base Forwarding Rule. By base + Forwarding Rule, we mean the Forwarding Rule that has the same IP address, + protocol, and port settings with the current Forwarding Rule, but without + sourceIPRanges specified. Always empty if the current Forwarding Rule does + not have sourceIPRanges specified.' + output: true + - name: 'allowPscGlobalAccess' + type: Boolean + description: + This is used in PSC consumer ForwardingRule to control whether the PSC + endpoint can be accessed from another region. + send_empty_value: true + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}' + update_verb: 'PATCH' + update_id: 'allowPscGlobalAccess' + fingerprint_name: 'fingerprint' + - name: 'noAutomateDnsZone' + type: Boolean + description: + This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. + Non-PSC forwarding rules do not use this field. + immutable: true + ignore_read: true + send_empty_value: true + - name: 'ipVersion' + type: Enum + description: | + The IP address version that will be used by this forwarding rule. + Valid options are IPV4 and IPV6. + + If not set, the IPv4 address will be used by default. + immutable: true + default_from_api: true + enum_values: + - 'IPV4' + - 'IPV6' diff --git a/mmv1/products/compute/go_GlobalAddress.yaml b/mmv1/products/compute/go_GlobalAddress.yaml new file mode 100644 index 000000000000..2ae8cfaa6a10 --- /dev/null +++ b/mmv1/products/compute/go_GlobalAddress.yaml @@ -0,0 +1,155 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'GlobalAddress' +kind: 'compute#address' +description: | + Represents a Global Address resource. Global addresses are used for + HTTP(S) load balancing. +references: + guides: + 'Reserving a Static External IP Address': 'https://cloud.google.com/compute/docs/ip-addresses/reserve-static-external-ip-address' + api: 'https://cloud.google.com/compute/docs/reference/v1/globalAddresses' +docs: +base_url: 'projects/{{project}}/global/addresses' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + pre_create: 'templates/terraform/pre_create/go/compute_global_address.go.tmpl' + post_create: 'templates/terraform/post_create/go/labels.tmpl' +examples: + - name: 'global_address_basic' + primary_resource_id: 'default' + vars: + global_address_name: 'global-appserver-ip' + - name: 'global_address_private_services_connect' + primary_resource_id: 'default' + min_version: 'beta' + vars: + global_address_name: 'global-psconnect-ip' + network_name: 'my-network-name' +parameters: +properties: + - name: 'address' + type: String + description: | + The IP address or beginning of the address range represented by this + resource. This can be supplied as an input to reserve a specific + address or omitted to allow GCP to choose a valid one for you. + default_from_api: true + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this address. A list of key->value pairs. + min_version: 'beta' + immutable: false + update_url: 'projects/{{project}}/global/addresses/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + min_version: 'beta' + output: true + update_url: 'projects/{{project}}/global/addresses/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'ipVersion' + type: Enum + description: | + The IP Version that will be used by this address. The default value is `IPV4`. + diff_suppress_func: 'tpgresource.EmptyOrDefaultStringSuppress("IPV4")' + enum_values: + - 'IPV4' + - 'IPV6' + - name: 'prefixLength' + type: Integer + description: | + The prefix length of the IP range. If not present, it means the + address field is a single IP address. + + This field is not applicable to addresses with addressType=INTERNAL + when purpose=PRIVATE_SERVICE_CONNECT + default_from_api: true + - name: 'addressType' + type: Enum + description: | + The type of the address to reserve. + + * EXTERNAL indicates public/external single IP address. + * INTERNAL indicates internal IP ranges belonging to some network. + diff_suppress_func: 'tpgresource.EmptyOrDefaultStringSuppress("EXTERNAL")' + default_value: EXTERNAL + enum_values: + - 'EXTERNAL' + - 'INTERNAL' + - name: 'purpose' + type: String + description: | + The purpose of the resource. Possible values include: + + * VPC_PEERING - for peer networks + + * PRIVATE_SERVICE_CONNECT - for ([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) only) Private Service Connect networks + - name: 'network' + type: ResourceRef + description: | + The URL of the network in which to reserve the IP range. The IP range + must be in RFC1918 space. The network cannot be deleted if there are + any reserved IP ranges referring to it. + + This should only be set when using an Internal address. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml new file mode 100644 index 000000000000..77c4f47f2e7a --- /dev/null +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -0,0 +1,524 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'GlobalForwardingRule' +kind: 'compute#forwardingRule' +description: | + Represents a GlobalForwardingRule resource. Global forwarding rules are + used to forward traffic to the correct load balancer for HTTP load + balancing. Global forwarding rules can only be used for HTTP load + balancing. + + For more information, see +docs: +base_url: 'projects/{{project}}/global/forwardingRules' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + post_create: 'templates/terraform/post_create/go/labels.tmpl' +legacy_long_form_project: true +examples: + - name: 'external_ssl_proxy_lb_mig_backend' + primary_resource_id: 'default' + vars: + ssl_proxy_xlb_network: 'ssl-proxy-xlb-network' + ssl_proxy_xlb_subnet: 'ssl-proxy-xlb-subnet' + ssl_proxy_xlb_ip: 'ssl-proxy-xlb-ip' + default_cert: 'default-cert' + test_proxy: 'test-proxy' + ssl_proxy_xlb_forwarding_rule: 'ssl-proxy-xlb-forwarding-rule' + ssl_proxy_xlb_backend_service: 'ssl-proxy-xlb-backend-service' + ssl_proxy_health_check: 'ssl-proxy-health-check' + ssl_proxy_xlb_mig_template: 'ssl-proxy-xlb-mig-template' + ssl_proxy_xlb_mig1: 'ssl-proxy-xlb-mig1' + ssl_proxy_xlb_fw_allow_hc: 'ssl-proxy-xlb-fw-allow-hc' + ignore_read_extra: + - 'port_range' + - 'target' + - 'ip_address' + skip_test: true + - name: 'external_tcp_proxy_lb_mig_backend' + primary_resource_id: 'default' + min_version: 'beta' + vars: + tcp_proxy_xlb_network: 'tcp-proxy-xlb-network' + tcp_proxy_xlb_subnet: 'tcp-proxy-xlb-subnet' + tcp_proxy_xlb_ip: 'tcp-proxy-xlb-ip' + tcp_proxy_xlb_forwarding_rule: 'tcp-proxy-xlb-forwarding-rule' + test_proxy_health_check: 'test-proxy-health-check' + tcp_proxy_xlb_backend_service: 'tcp-proxy-xlb-backend-service' + tcp_proxy_health_check: 'tcp-proxy-health-check' + tcp_proxy_xlb_mig_template: 'tcp-proxy-xlb-mig-template' + tcp_proxy_xlb_mig1: 'tcp-proxy-xlb-mig1' + tcp_proxy_xlb_fw_allow_hc: 'tcp-proxy-xlb-fw-allow-hc' + ignore_read_extra: + - 'port_range' + - 'target' + - 'ip_address' + - name: 'external_http_lb_mig_backend_custom_header' + primary_resource_id: 'default' + min_version: 'beta' + vars: + xlb_network_name: 'l7-xlb-network' + backend_subnet_name: 'l7-xlb-subnet' + address_name: 'l7-xlb-static-ip' + forwarding_rule_name: 'l7-xlb-forwarding-rule' + target_http_proxy_name: 'l7-xlb-target-http-proxy' + url_map_name: 'l7-xlb-url-map' + backend_service_name: 'l7-xlb-backend-service' + mig_template_name: 'l7-xlb-mig-template' + hc_name: 'l7-xlb-hc' + mig_name: 'l7-xlb-mig1' + fw_allow_hc_name: 'l7-xlb-fw-allow-hc' + ignore_read_extra: + - 'port_range' + - 'target' + - 'ip_address' + - name: 'global_forwarding_rule_http' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'global-rule' + http_proxy_name: 'target-proxy' + backend_service_name: 'backend' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'global_forwarding_rule_internal' + primary_resource_id: 'default' + min_version: 'beta' + vars: + forwarding_rule_name: 'global-rule' + http_proxy_name: 'target-proxy' + backend_service_name: 'backend' + igm_name: 'igm-internal' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'global_forwarding_rule_external_managed' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'global-rule' + http_proxy_name: 'target-proxy' + backend_service_name: 'backend' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'global_forwarding_rule_hybrid' + primary_resource_id: 'default' + vars: + forwarding_rule_name: 'global-rule' + http_proxy_name: 'target-proxy' + network_name: 'my-network' + internal_network_name: 'my-internal-network' + subnetwork_name: 'my-subnetwork' + default_backend_service_name: 'backend-default' + hybrid_backend_service_name: 'backend-hybrid' + internal_backend_service_name": 'backend-internal' + default_neg_name: 'default-neg' + hybrid_neg_name: 'hybrid-neg' + internal_neg_name: 'internal-neg' + health_check_name: 'health-check' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'global_internal_http_lb_with_mig_backend' + primary_resource_id: 'google_compute_forwarding_rule' + min_version: 'beta' + vars: + gilb_network_name: 'l7-gilb-network' + proxy_subnet_name: 'l7-gilb-proxy-subnet' + backend_subnet_name: 'l7-gilb-subnet' + forwarding_rule_name: 'l7-gilb-forwarding-rule' + target_http_proxy_name: 'l7-gilb-target-http-proxy' + url_map_name: 'l7-gilb-url-map' + backend_service_name: 'l7-gilb-backend-subnet' + mig_template_name: 'l7-gilb-mig-template' + hc_name: 'l7-gilb-hc' + mig_name: 'l7-gilb-mig1' + fw_allow_iap_hc_name: 'l7-gilb-fw-allow-iap-hc' + fw_allow_gilb_to_backends_name: 'l7-gilb-fw-allow-gilb-to-backends' + vm_test_name: 'l7-gilb-test-vm' + ignore_read_extra: + - 'port_range' + - 'target' + - name: 'private_service_connect_google_apis' + primary_resource_id: 'default' + min_version: 'beta' + vars: + network_name: 'my-network' + subnetwork_name: 'my-subnetwork' + global_address_name: 'global-psconnect-ip' + forwarding_rule_name: 'globalrule' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'ip_address' + - name: 'private_service_connect_google_apis_no_automate_dns' + primary_resource_id: 'default' + min_version: 'beta' + vars: + network_name: 'my-network' + subnetwork_name: 'my-subnetwork' + global_address_name: 'global-psconnect-ip' + forwarding_rule_name: 'globalrule' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'ip_address' +parameters: +properties: + - name: 'pscConnectionId' + type: String + description: 'The PSC connection id of the PSC Forwarding Rule.' + output: true + - name: 'pscConnectionStatus' + type: String + description: + 'The PSC connection status of the PSC Forwarding Rule. Possible values: + `STATUS_UNSPECIFIED`, `PENDING`, `ACCEPTED`, `REJECTED`, `CLOSED`' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + # This is a multi-resource resource reference (Address, GlobalAddress) + - name: 'IPAddress' + type: String + description: | + IP address for which this forwarding rule accepts traffic. When a client + sends traffic to this IP address, the forwarding rule directs the traffic + to the referenced `target`. + + While creating a forwarding rule, specifying an `IPAddress` is + required under the following circumstances: + + * When the `target` is set to `targetGrpcProxy` and + `validateForProxyless` is set to `true`, the + `IPAddress` should be set to `0.0.0.0`. + * When the `target` is a Private Service Connect Google APIs + bundle, you must specify an `IPAddress`. + + + Otherwise, you can optionally specify an IP address that references an + existing static (reserved) IP address resource. When omitted, Google Cloud + assigns an ephemeral IP address. + + Use one of the following formats to specify an IP address while creating a + forwarding rule: + + * IP address number, as in `100.1.2.3` + * IPv6 address range, as in `2600:1234::/96` + * Full resource URL, as in + `https://www.googleapis.com/compute/v1/projects/project_id/regions/region/addresses/address-name` + * Partial URL or by name, as in: + * `projects/project_id/regions/region/addresses/address-name` + * `regions/region/addresses/address-name` + * `global/addresses/address-name` + * `address-name` + + + The forwarding rule's `target`, + and in most cases, also the `loadBalancingScheme`, determine the + type of IP address that you can use. For detailed information, see + [IP address + specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + + When reading an `IPAddress`, the API always returns the IP + address number. + default_from_api: true + diff_suppress_func: 'tpgresource.InternalIpDiffSuppress' + - name: 'IPProtocol' + type: Enum + description: | + The IP protocol to which this rule applies. + + For protocol forwarding, valid + options are `TCP`, `UDP`, `ESP`, + `AH`, `SCTP`, `ICMP` and + `L3_DEFAULT`. + + The valid IP protocols are different for different load balancing products + as described in [Load balancing + features](https://cloud.google.com/load-balancing/docs/features#protocols_from_the_load_balancer_to_the_backends). + default_from_api: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + enum_values: + - 'TCP' + - 'UDP' + - 'ESP' + - 'AH' + - 'SCTP' + - 'ICMP' + - name: 'ipVersion' + type: Enum + description: | + The IP Version that will be used by this global forwarding rule. + enum_values: + - 'IPV4' + - 'IPV6' + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this forwarding rule. A list of key->value pairs. + immutable: false + update_url: 'projects/{{project}}/global/forwardingRules/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/global/forwardingRules/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'loadBalancingScheme' + type: Enum + description: | + Specifies the forwarding rule type. + + For more information about forwarding rules, refer to + [Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). + default_value: EXTERNAL + enum_values: + - 'EXTERNAL' + - 'EXTERNAL_MANAGED' + - 'INTERNAL_MANAGED' + - 'INTERNAL_SELF_MANAGED' + - name: 'metadataFilters' + type: Array + description: | + Opaque filter criteria used by Loadbalancer to restrict routing + configuration to a limited set xDS compliant clients. In their xDS + requests to Loadbalancer, xDS clients present node metadata. If a + match takes place, the relevant routing configuration is made available + to those proxies. + + For each metadataFilter in this list, if its filterMatchCriteria is set + to MATCH_ANY, at least one of the filterLabels must match the + corresponding label provided in the metadata. If its filterMatchCriteria + is set to MATCH_ALL, then all of its filterLabels must match with + corresponding labels in the provided metadata. + + metadataFilters specified here can be overridden by those specified in + the UrlMap that this ForwardingRule references. + + metadataFilters only applies to Loadbalancers that have their + loadBalancingScheme set to INTERNAL_SELF_MANAGED. + item_type: + type: NestedObject + properties: + - name: 'filterMatchCriteria' + type: Enum + description: | + Specifies how individual filterLabel matches within the list of + filterLabels contribute towards the overall metadataFilter match. + + MATCH_ANY - At least one of the filterLabels must have a matching + label in the provided metadata. + MATCH_ALL - All filterLabels must have matching labels in the + provided metadata. + required: true + enum_values: + - 'MATCH_ANY' + - 'MATCH_ALL' + - name: 'filterLabels' + type: Array + description: | + The list of label value pairs that must match labels in the + provided metadata based on filterMatchCriteria + + This list must not be empty and can have at the most 64 entries. + required: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Name of the metadata label. The length must be between + 1 and 1024 characters, inclusive. + required: true + - name: 'value' + type: String + description: | + The value that the label must match. The value has a maximum + length of 1024 characters. + required: true + min_size: 1 + max_size: 64 + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is created. + The name must be 1-63 characters long, and comply with + [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). + + Specifically, the name must be 1-63 characters long and match the regular + expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + character must be a lowercase letter, and all following characters must + be a dash, lowercase letter, or digit, except the last character, which + cannot be a dash. + + For Private Service Connect forwarding rules that forward traffic to Google + APIs, the forwarding rule name must be a 1-20 characters string with + lowercase letters and numbers and must start with a letter. + required: true + - name: 'network' + type: ResourceRef + description: | + This field is not used for external load balancing. + + For Internal TCP/UDP Load Balancing, this field identifies the network that + the load balanced IP should belong to for this Forwarding Rule. + If the subnetwork is specified, the network of the subnetwork will be used. + If neither subnetwork nor this field is specified, the default network will + be used. + + For Private Service Connect forwarding rules that forward traffic to Google + APIs, a network must be provided. + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'portRange' + type: String + description: | + The `portRange` field has the following limitations: + * It requires that the forwarding rule `IPProtocol` be TCP, UDP, or SCTP, + and + * It's applicable only to the following products: external passthrough + Network Load Balancers, internal and external proxy Network Load + Balancers, internal and external Application Load Balancers, external + protocol forwarding, and Classic VPN. + * Some products have restrictions on what ports can be used. See + [port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications) + for details. + + For external forwarding rules, two or more forwarding rules cannot use the + same `[IPAddress, IPProtocol]` pair, and cannot have overlapping + `portRange`s. + + For internal forwarding rules within the same VPC network, two or more + forwarding rules cannot use the same `[IPAddress, IPProtocol]` pair, and + cannot have overlapping `portRange`s. + + @pattern: \d+(?:-\d+)? + diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' + - name: 'subnetwork' + type: ResourceRef + description: | + This field identifies the subnetwork that the load balanced IP should + belong to for this Forwarding Rule, used in internal load balancing and + network load balancing with IPv6. + + If the network specified is in auto subnet mode, this field is optional. + However, a subnetwork must be specified if the network is in custom subnet + mode or when creating external forwarding rule with IPv6. + # This is a multi-resource resource reference (TargetHttp(s)Proxy, + # TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, + # TargetInstance) + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'target' + type: String + description: | + The URL of the target resource to receive the matched traffic. For + regional forwarding rules, this target must be in the same region as the + forwarding rule. For global forwarding rules, this target must be a global + load balancing resource. + + The forwarded traffic must be of a type appropriate to the target object. + * For load balancers, see the "Target" column in [Port specifications](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#ip_address_specifications). + * For Private Service Connect forwarding rules that forward traffic to Google APIs, provide the name of a supported Google API bundle: + * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). + * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). + + + For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. + required: true + update_url: 'projects/{{project}}/global/forwardingRules/{{name}}/setTarget' + update_verb: 'POST' + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'serviceDirectoryRegistrations' + type: Array + description: | + Service Directory resources to register this forwarding rule with. + + Currently, only supports a single Service Directory resource. + immutable: true + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'namespace' + type: String + description: | + Service Directory namespace to register the forwarding rule under. + immutable: true + default_from_api: true + - name: 'serviceDirectoryRegion' + type: String + description: | + [Optional] Service Directory region to register this global forwarding rule under. + Default to "us-central1". Only used for PSC for Google APIs. All PSC for + Google APIs Forwarding Rules on the same network should use the same Service + Directory region. + immutable: true + min_size: 0 + max_size: 1 + - name: 'sourceIpRanges' + type: Array + description: If not empty, this Forwarding Rule will only forward the traffic when the source IP address matches one of the IP addresses or CIDR ranges set here. Note that a Forwarding Rule can only have up to 64 source IP ranges, and this field can only be used with a regional Forwarding Rule whose scheme is EXTERNAL. Each sourceIpRange entry should be either an IP address (for example, 1.2.3.4) or a CIDR range (for example, 1.2.3.0/24). + immutable: true + item_type: + type: String + - name: 'baseForwardingRule' + type: String + description: '[Output Only] The URL for the corresponding base Forwarding Rule. By base Forwarding Rule, we mean the Forwarding Rule that has the same IP address, protocol, and port settings with the current Forwarding Rule, but without sourceIPRanges specified. Always empty if the current Forwarding Rule does not have sourceIPRanges specified.' + output: true + - name: 'allowPscGlobalAccess' + type: Boolean + description: This is used in PSC consumer ForwardingRule to control whether the PSC endpoint can be accessed from another region. + min_version: 'beta' + - name: 'noAutomateDnsZone' + type: Boolean + description: + This is used in PSC consumer ForwardingRule to control whether it should try to auto-generate a DNS zone or not. + Non-PSC forwarding rules do not use this field. + immutable: true + ignore_read: true + send_empty_value: true diff --git a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml new file mode 100644 index 000000000000..81d152748bad --- /dev/null +++ b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml @@ -0,0 +1,105 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'GlobalNetworkEndpoint' +kind: 'compute#networkEndpoint' +description: | + A Global Network endpoint represents a IP address and port combination that exists outside of GCP. + **NOTE**: Global network endpoints cannot be created outside of a + global network endpoint group. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' +docs: +id_format: '{{project}}/{{global_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}' +base_url: 'projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}' +self_link: 'projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/listNetworkEndpoints' +create_url: 'projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/attachNetworkEndpoints' +read_verb: 'POST' +delete_url: 'projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/detachNetworkEndpoints' +delete_verb: 'POST' +immutable: true +mutex: networkEndpoint/{{project}}/{{global_network_endpoint_group}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - ipAddress + - fqdn + - port +nested_query: + keys: + - items + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_global_network_endpoint.go.tmpl' + decoder: 'templates/terraform/decoders/go/network_endpoint.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_global_network_endpoint.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/compute_global_network_endpoint.go.tmpl' +exclude_tgc: true +examples: + - name: 'global_network_endpoint' + primary_resource_id: 'default-endpoint' + vars: + neg_name: 'my-lb-neg' + skip_test: true +parameters: + - name: 'globalNetworkEndpointGroup' + type: ResourceRef + description: | + The global network endpoint group this endpoint is part of. + url_param_only: true + required: true + ignore_read: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + resource: 'GlobalNetworkEndpointGroup' + imports: 'name' +properties: + - name: 'port' + type: Integer + description: | + Port number of the external endpoint. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_int.go.tmpl' + validation: + function: 'validation.IntAtLeast(1)' + - name: 'ipAddress' + type: String + description: | + IPv4 address external endpoint. + - name: 'fqdn' + type: String + description: | + Fully qualified domain name of network endpoint. + This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT. + at_least_one_of: + - 'fqdn' + - 'ip_address' diff --git a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml new file mode 100644 index 000000000000..2e8f0b57b0b8 --- /dev/null +++ b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml @@ -0,0 +1,93 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'GlobalNetworkEndpointGroup' +kind: 'compute#networkEndpointGroup' +description: | + A global network endpoint group contains endpoints that reside outside of Google Cloud. + Currently a global network endpoint group can only support a single endpoint. + + Recreating a global network endpoint group that's in use by another resource will give a + `resourceInUseByAnotherResource` error. Use `lifecycle.create_before_destroy` + to avoid this type of error. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/internet-neg-concepts' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' +docs: +base_url: 'projects/{{project}}/global/networkEndpointGroups' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'global_network_endpoint_group' + primary_resource_id: 'neg' + vars: + neg_name: 'my-lb-neg' + - name: 'global_network_endpoint_group_ip_address' + primary_resource_id: 'neg' + vars: + neg_name: 'my-lb-neg' +parameters: +properties: + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'networkEndpointType' + type: Enum + description: | + Type of network endpoints in this network endpoint group. + required: true + enum_values: + - 'INTERNET_IP_PORT' + - 'INTERNET_FQDN_PORT' + - name: 'defaultPort' + type: Integer + description: | + The default port used if the port number is not specified in the + network endpoint. diff --git a/mmv1/products/compute/go_HaVpnGateway.yaml b/mmv1/products/compute/go_HaVpnGateway.yaml new file mode 100644 index 000000000000..c62e2447443f --- /dev/null +++ b/mmv1/products/compute/go_HaVpnGateway.yaml @@ -0,0 +1,162 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'HaVpnGateway' +kind: 'compute#vpnGateway' +description: | + Represents a VPN gateway running in GCP. This virtual device is managed + by Google, but used only by you. This type of VPN Gateway allows for the creation + of VPN solutions with higher availability than classic Target VPN Gateways. +references: + guides: + 'Choosing a VPN': 'https://cloud.google.com/vpn/docs/how-to/choosing-a-vpn' + 'Cloud VPN Overview': 'https://cloud.google.com/vpn/docs/concepts/overview' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/vpnGateways' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/vpnGateways' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'ha_vpn_gateway_basic' + primary_resource_id: 'ha_gateway1' + vars: + ha_vpn_gateway1_name: 'ha-vpn-1' + network1_name: 'network1' + - name: 'ha_vpn_gateway_ipv6' + primary_resource_id: 'ha_gateway1' + vars: + ha_vpn_gateway1_name: 'ha-vpn-1' + network1_name: 'network1' + - name: 'ha_vpn_gateway_gcp_to_gcp' + primary_resource_id: 'ha_gateway1' + vars: + ha_vpn_gateway1_name: 'ha-vpn-1' + network1_name: 'network1' + router1_name: 'ha-vpn-router1' + ha_vpn_gateway2_name: 'ha-vpn-2' + network2_name: 'network2' + router2_name: 'ha-vpn-router2' + skip_test: true + skip_docs: true + skip_vcr: true + - name: 'compute_ha_vpn_gateway_encrypted_interconnect' + primary_resource_id: 'vpn-gateway' + vars: + ha_vpn_gateway_name: 'test-ha-vpngw' + interconnect_attachment1_name: 'test-interconnect-attachment1' + interconnect_attachment2_name: 'test-interconnect-attachment2' + address1_name: 'test-address1' + address2_name: 'test-address2' + router_name: 'test-router' + network_name: 'test-network' + skip_test: true +parameters: + - name: 'region' + type: ResourceRef + description: | + The region this gateway should sit in. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + validation: + function: 'verify.ValidateGCEName' + - name: 'network' + type: ResourceRef + description: | + The network this VPN gateway is accepting traffic for. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'stackType' + type: Enum + description: | + The stack type for this VPN gateway to identify the IP protocols that are enabled. + If not specified, IPV4_ONLY will be used. + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: IPV4_ONLY + enum_values: + - 'IPV4_ONLY' + - 'IPV4_IPV6' + - name: 'vpnInterfaces' + type: Array + description: | + A list of interfaces on this VPN gateway. + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'id' + type: Integer + description: 'The numeric ID of this VPN gateway interface.' + - name: 'ipAddress' + type: String + description: 'The external IP address for this VPN gateway interface.' + output: true + - name: 'interconnectAttachment' + type: ResourceRef + description: | + URL of the interconnect attachment resource. When the value + of this field is present, the VPN Gateway will be used for + IPsec-encrypted Cloud Interconnect; all Egress or Ingress + traffic for this VPN Gateway interface will go through the + specified interconnect attachment resource. + + Not currently available publicly. + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'InterconnectAttachment' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_HealthCheck.yaml b/mmv1/products/compute/go_HealthCheck.yaml new file mode 100644 index 000000000000..aa04399b16e3 --- /dev/null +++ b/mmv1/products/compute/go_HealthCheck.yaml @@ -0,0 +1,848 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'HealthCheck' +kind: 'compute#healthCheck' +description: | + Health Checks determine whether instances are responsive and able to do work. + They are an important part of a comprehensive load balancing configuration, + as they enable monitoring instances behind load balancers. + + Health Checks poll instances at a specified interval. Instances that + do not respond successfully to some number of probes in a row are marked + as unhealthy. No new connections are sent to unhealthy instances, + though existing connections will continue. The health check will + continue to poll unhealthy instances. If an instance later responds + successfully to some number of consecutive probes, it is marked + healthy again and can receive new connections. + + ~>**NOTE**: Legacy HTTP(S) health checks must be used for target pool-based network + load balancers. See the [official guide](https://cloud.google.com/load-balancing/docs/health-check-concepts#selecting_hc) + for choosing a type of health check. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/health-checks' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks' +docs: +base_url: 'projects/{{project}}/global/healthChecks' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/health_check.tmpl' + encoder: 'templates/terraform/encoders/go/health_check_type.tmpl' +custom_diff: + - 'healthCheckCustomizeDiff' +examples: + - name: 'health_check_tcp' + primary_resource_id: 'tcp-health-check' + vars: + health_check_name: 'tcp-health-check' + - name: 'health_check_tcp_full' + primary_resource_id: 'tcp-health-check' + vars: + health_check_name: 'tcp-health-check' + - name: 'health_check_ssl' + primary_resource_id: 'ssl-health-check' + vars: + health_check_name: 'ssl-health-check' + - name: 'health_check_ssl_full' + primary_resource_id: 'ssl-health-check' + vars: + health_check_name: 'ssl-health-check' + - name: 'health_check_http' + primary_resource_id: 'http-health-check' + vars: + health_check_name: 'http-health-check' + - name: 'health_check_http_full' + primary_resource_id: 'http-health-check' + vars: + health_check_name: 'http-health-check' + - name: 'health_check_https' + primary_resource_id: 'https-health-check' + vars: + health_check_name: 'https-health-check' + - name: 'health_check_https_full' + primary_resource_id: 'https-health-check' + vars: + health_check_name: 'https-health-check' + - name: 'health_check_http2' + primary_resource_id: 'http2-health-check' + vars: + health_check_name: 'http2-health-check' + - name: 'health_check_http2_full' + primary_resource_id: 'http2-health-check' + vars: + health_check_name: 'http2-health-check' + - name: 'health_check_grpc' + primary_resource_id: 'grpc-health-check' + vars: + health_check_name: 'grpc-health-check' + - name: 'health_check_grpc_full' + primary_resource_id: 'grpc-health-check' + vars: + health_check_name: 'grpc-health-check' + - name: 'health_check_with_logging' + primary_resource_id: 'health-check-with-logging' + min_version: 'beta' + vars: + health_check_name: 'tcp-health-check' +parameters: +properties: + - name: 'checkIntervalSec' + type: Integer + description: | + How often (in seconds) to send a health check. The default value is 5 + seconds. + default_value: 5 + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + send_empty_value: true + - name: 'healthyThreshold' + type: Integer + description: | + A so-far unhealthy instance will be marked healthy after this many + consecutive successes. The default value is 2. + default_value: 2 + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + immutable: true + - name: 'timeoutSec' + type: Integer + description: | + How long (in seconds) to wait before claiming failure. + The default value is 5 seconds. It is invalid for timeoutSec to have + greater value than checkIntervalSec. + default_value: 5 + - name: 'unhealthyThreshold' + type: Integer + description: | + A so-far healthy instance will be marked unhealthy after this many + consecutive failures. The default value is 2. + default_value: 2 + - name: 'type' + type: Enum + description: |- + The type of the health check. One of HTTP, HTTPS, TCP, or SSL. + output: true + enum_values: + - 'TCP' + - 'SSL' + - 'HTTP' + - 'HTTPS' + - 'HTTP2' + - name: 'httpHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'host' + type: String + description: | + The value of the host header in the HTTP health check request. + If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'requestPath' + type: String + description: | + The request path of the HTTP health check request. + The default value is /. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + default_value: / + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTP health check request. + The default value is 80. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, HTTP health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'httpsHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'host' + type: String + description: | + The value of the host header in the HTTPS health check request. + If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'requestPath' + type: String + description: | + The request path of the HTTPS health check request. + The default value is /. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + default_value: / + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTPS health check request. + The default value is 443. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, HTTPS health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'tcpHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'request' + type: String + description: | + The application data to send once the TCP connection has been + established (default value is empty). If both request and response are + empty, the connection establishment alone will indicate health. The request + data can only be ASCII. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the TCP health check request. + The default value is 443. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, TCP health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'sslHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'request' + type: String + description: | + The application data to send once the SSL connection has been + established (default value is empty). If both request and response are + empty, the connection establishment alone will indicate health. The request + data can only be ASCII. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the SSL health check request. + The default value is 443. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, SSL health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'http2HealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'host' + type: String + description: | + The value of the host header in the HTTP2 health check request. + If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'requestPath' + type: String + description: | + The request path of the HTTP2 health check request. + The default value is /. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + default_value: / + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTP2 health check request. + The default value is 443. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, HTTP2 health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'grpcHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'port' + type: Integer + description: | + The port number for the health check request. + Must be specified if portName and portSpecification are not set + or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, gRPC health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'grpcServiceName' + type: String + description: | + The gRPC service name for the health check. + The value of grpcServiceName has the following meanings by convention: + - Empty serviceName means the overall status of all services at the backend. + - Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. + The grpcServiceName can only be ASCII. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + - name: 'logConfig' + type: NestedObject + description: | + Configure logging on this health check. + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/health_check_log_config.go.tmpl' + properties: + - name: 'enable' + type: Boolean + description: | + Indicates whether or not to export logs. This is false by default, + which means no health check logging will be done. + default_value: false diff --git a/mmv1/products/compute/go_HttpHealthCheck.yaml b/mmv1/products/compute/go_HttpHealthCheck.yaml new file mode 100644 index 000000000000..f24acd5ea12c --- /dev/null +++ b/mmv1/products/compute/go_HttpHealthCheck.yaml @@ -0,0 +1,124 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'HttpHealthCheck' +kind: 'compute#httpHealthCheck' +description: | + An HttpHealthCheck resource. This resource defines a template for how + individual VMs should be checked for health, via HTTP. + + + ~> **Note:** google_compute_http_health_check is a legacy health check. + The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) + should be preferred for all uses except + [Network Load Balancers](https://cloud.google.com/compute/docs/load-balancing/network/) + which still require the legacy version. +references: + guides: + 'Adding Health Checks': 'https://cloud.google.com/compute/docs/load-balancing/health-checks#legacy_health_checks' + api: 'https://cloud.google.com/compute/docs/reference/v1/httpHealthChecks' +docs: +base_url: 'projects/{{project}}/global/httpHealthChecks' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'http_health_check_basic' + primary_resource_id: 'default' + vars: + http_health_check_name: 'authentication-health-check' +parameters: +properties: + - name: 'checkIntervalSec' + type: Integer + description: | + How often (in seconds) to send a health check. The default value is 5 + seconds. + default_value: 5 + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'healthyThreshold' + type: Integer + description: | + A so-far unhealthy instance will be marked healthy after this many + consecutive successes. The default value is 2. + default_value: 2 + - name: 'host' + type: String + description: | + The value of the host header in the HTTP health check request. If + left empty (default value), the public IP on behalf of which this + health check is performed will be used. + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + immutable: true + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTP health check request. + The default value is 80. + default_value: 80 + - name: 'requestPath' + type: String + description: | + The request path of the HTTP health check request. + The default value is /. + default_value: / + - name: 'timeoutSec' + type: Integer + description: | + How long (in seconds) to wait before claiming failure. + The default value is 5 seconds. It is invalid for timeoutSec to have + greater value than checkIntervalSec. + default_value: 5 + - name: 'unhealthyThreshold' + type: Integer + description: | + A so-far healthy instance will be marked unhealthy after this many + consecutive failures. The default value is 2. + default_value: 2 diff --git a/mmv1/products/compute/go_HttpsHealthCheck.yaml b/mmv1/products/compute/go_HttpsHealthCheck.yaml new file mode 100644 index 000000000000..f08af7f264dd --- /dev/null +++ b/mmv1/products/compute/go_HttpsHealthCheck.yaml @@ -0,0 +1,124 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'HttpsHealthCheck' +kind: 'compute#httpsHealthCheck' +description: | + An HttpsHealthCheck resource. This resource defines a template for how + individual VMs should be checked for health, via HTTPS. + + + ~> **Note:** google_compute_https_health_check is a legacy health check. + The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) + should be preferred for all uses except + [Network Load Balancers](https://cloud.google.com/compute/docs/load-balancing/network/) + which still require the legacy version. +references: + guides: + 'Adding Health Checks': 'https://cloud.google.com/compute/docs/load-balancing/health-checks#legacy_health_checks' + api: 'https://cloud.google.com/compute/docs/reference/v1/httpsHealthChecks' +docs: +base_url: 'projects/{{project}}/global/httpsHealthChecks' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'https_health_check_basic' + primary_resource_id: 'default' + vars: + https_health_check_name: 'authentication-health-check' +parameters: +properties: + - name: 'checkIntervalSec' + type: Integer + description: | + How often (in seconds) to send a health check. The default value is 5 + seconds. + default_value: 5 + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'healthyThreshold' + type: Integer + description: | + A so-far unhealthy instance will be marked healthy after this many + consecutive successes. The default value is 2. + default_value: 2 + - name: 'host' + type: String + description: | + The value of the host header in the HTTPS health check request. If + left empty (default value), the public IP on behalf of which this + health check is performed will be used. + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + immutable: true + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTPS health check request. + The default value is 443. + default_value: 443 + - name: 'requestPath' + type: String + description: | + The request path of the HTTPS health check request. + The default value is /. + default_value: / + - name: 'timeoutSec' + type: Integer + description: | + How long (in seconds) to wait before claiming failure. + The default value is 5 seconds. It is invalid for timeoutSec to have + greater value than checkIntervalSec. + default_value: 5 + - name: 'unhealthyThreshold' + type: Integer + description: | + A so-far healthy instance will be marked unhealthy after this many + consecutive failures. The default value is 2. + default_value: 2 diff --git a/mmv1/products/compute/go_Image.yaml b/mmv1/products/compute/go_Image.yaml new file mode 100644 index 000000000000..40505e568024 --- /dev/null +++ b/mmv1/products/compute/go_Image.yaml @@ -0,0 +1,275 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Image' +kind: 'compute#image' +description: | + Represents an Image resource. + + Google Compute Engine uses operating system images to create the root + persistent disks for your instances. You specify an image when you create + an instance. Images contain a boot loader, an operating system, and a + root file system. Linux operating system images are also capable of + running containers on Compute Engine. + + Images can be either public or custom. + + Public images are provided and maintained by Google, open-source + communities, and third-party vendors. By default, all projects have + access to these images and can use them to create instances. Custom + images are available only to your project. You can create a custom image + from root persistent disks and other images. Then, use the custom image + to create an instance. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/images' + api: 'https://cloud.google.com/compute/docs/reference/v1/images' +docs: +base_url: 'projects/{{project}}/global/images' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + allowed_iam_role: 'roles/compute.imageUser' + parent_resource_attribute: 'image' + iam_conditions_request_type: 'QUERY_PARAM' +custom_code: +examples: + - name: 'image_basic' + primary_resource_id: 'example' + primary_resource_name: 'fmt.Sprintf("tf-test-example-image%s", context["random_suffix"])' + vars: + image_name: 'example-image' + - name: 'image_guest_os' + primary_resource_id: 'example' + vars: + image_name: 'example-image' + - name: 'image_basic_storage_location' + primary_resource_id: 'example' + vars: + image_name: 'example-sl-image' + primary_resource_name: 'fmt.Sprintf("tf-test-sl-example-image%s", context["random_suffix"])' +parameters: +properties: + - name: 'archiveSizeBytes' + type: Integer + description: | + Size of the image tar.gz archive stored in Google Cloud Storage (in + bytes). + output: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'storageLocations' + type: Array + description: | + Cloud Storage bucket storage location of the image + (regional or multi-regional). + Reference link: https://cloud.google.com/compute/docs/reference/rest/v1/images + default_from_api: true + item_type: + type: String + - name: 'diskSizeGb' + type: Integer + description: | + Size of the image when restored onto a persistent disk (in GB). + # TODO(alexstephen): Build family support. + # Families use a different API + default_from_api: true + - name: 'family' + type: String + description: | + The name of the image family to which this image belongs. You can + create disks by specifying an image family instead of a specific + image name. The image family always returns its latest image that is + not deprecated. The name of the image family must comply with + RFC1035. + - name: 'guestOsFeatures' + type: Array + description: | + A list of features to enable on the guest operating system. + Applicable only for bootable images. + is_set: true + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: Enum + description: | + The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. + required: true + enum_values: + - 'MULTI_IP_SUBNET' + - 'SECURE_BOOT' + - 'SEV_CAPABLE' + - 'UEFI_COMPATIBLE' + - 'VIRTIO_SCSI_MULTIQUEUE' + - 'WINDOWS' + - 'GVNIC' + - 'SEV_LIVE_MIGRATABLE' + - 'SEV_SNP_CAPABLE' + - 'SUSPEND_RESUME_COMPATIBLE' + - 'TDX_CAPABLE' + - 'SEV_LIVE_MIGRATABLE_V2' + - name: 'imageEncryptionKey' + type: NestedObject + description: | + Encrypts the image using a customer-supplied encryption key. + + After you encrypt an image with a customer-supplied key, you must + provide the same key if you use the image later (e.g. to create a + disk from the image) + properties: + - name: 'kmsKeySelfLink' + type: String + description: | + The self link of the encryption key that is stored in Google Cloud + KMS. + api_name: kmsKeyName + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + custom_flatten: 'templates/terraform/custom_flatten/go/image_kms_key_name.go.tmpl' + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account being used for the encryption request for the + given KMS key. If absent, the Compute Engine default service + account is used. + - name: 'labels' + type: KeyValueLabels + description: Labels to apply to this Image. + immutable: false + update_url: 'projects/{{project}}/global/images/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/global/images/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'licenses' + type: Array + description: Any applicable license URI. + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'license' + type: ResourceRef + description: 'An applicable license URI' + resource: 'License' + imports: 'selfLink' + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + - name: 'rawDisk' + type: NestedObject + description: The parameters of the raw disk image. + ignore_read: true + properties: + - name: 'containerType' + type: Enum + description: | + The format used to encode and transmit the block device, which + should be TAR. This is just a container and transmission format + and not a runtime format. Provided by the client when the disk + image is created. + default_value: TAR + enum_values: + - 'TAR' + - name: 'sha1' + type: String + description: | + An optional SHA1 checksum of the disk image before unpackaging. + This is provided by the client when the disk image is created. + # TODO(alexstephen): Figure out cross-module ResourceRefs + api_name: sha1Checksum + - name: 'source' + type: String + description: | + The full Google Cloud Storage URL where disk storage is stored + You must provide either this property or the sourceDisk property + but not both. + required: true + - name: 'sourceDisk' + type: ResourceRef + description: | + The source disk to create this image based on. + You must provide either this property or the + rawDisk.source property but not both to create an image. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Disk' + imports: 'selfLink' + - name: 'sourceImage' + type: ResourceRef + description: | + URL of the source image used to create this image. In order to create an image, you must provide the full or partial + URL of one of the following: + + * The selfLink URL + * This property + * The rawDisk.source URL + * The sourceDisk URL + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Image' + imports: 'selfLink' + - name: 'sourceSnapshot' + type: ResourceRef + description: | + URL of the source snapshot used to create this image. + + In order to create an image, you must provide the full or partial URL of one of the following: + + * The selfLink URL + * This property + * The sourceImage URL + * The rawDisk.source URL + * The sourceDisk URL + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Snapshot' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_Instance.yaml b/mmv1/products/compute/go_Instance.yaml new file mode 100644 index 000000000000..874134de2d9d --- /dev/null +++ b/mmv1/products/compute/go_Instance.yaml @@ -0,0 +1,659 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Instance' +kind: 'compute#instance' +description: | + An instance is a virtual machine (VM) hosted on Google's infrastructure. +exclude_resource: true +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/instances' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + allowed_iam_role: 'roles/compute.osLogin' + parent_resource_attribute: 'instance_name' + iam_conditions_request_type: 'QUERY_PARAM' +custom_code: +examples: + - name: 'instance_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-my-instance%s", context["random_suffix"])' + vars: + instance_name: 'my-instance' +parameters: + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the machine resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'canIpForward' + type: Boolean + description: | + Allows this instance to send and receive packets with non-matching + destination or source IPs. This is required if you plan to use this + instance to forward routes. + - name: 'cpuPlatform' + type: String + description: The CPU platform used by this instance. + output: true + - name: 'creationTimestamp' + type: String + description: Creation timestamp in RFC3339 text format. + output: true + - name: 'deletionProtection' + type: Boolean + description: Whether the resource should be protected against deletion. + # The code for this update is custom because MM doesn't support + # sending empty bodies + the new option as a request parameter. + update_url: '/projects/{{project}}/zones/{{zone}}/instances/{resourceId}/setDeletionProtection' + update_verb: 'POST' + - name: 'disks' + type: Array + description: | + An array of disks that are associated with the instances that are + created from this template. + immutable: true + item_type: + type: NestedObject + properties: + - name: 'autoDelete' + type: Boolean + description: | + Specifies whether the disk will be auto-deleted when the + instance is deleted (but not when the disk is detached from + the instance). + + Tip: Disks should be set to autoDelete=true + so that leftover disks are not left behind on machine + deletion. + - name: 'boot' + type: Boolean + description: | + Indicates that this is a boot disk. The virtual machine will + use the first partition of the disk for its root filesystem. + - name: 'deviceName' + type: String + description: | + Specifies a unique device name of your choice that is + reflected into the /dev/disk/by-id/google-* tree of a Linux + operating system running within the instance. This name can + be used to reference the device for mounting, resizing, and + so on, from within the instance. + - name: 'diskEncryptionKey' + type: NestedObject + description: | + Encrypts or decrypts a disk using a customer-supplied + encryption key. + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, + encoded in RFC 4648 base64 to either encrypt or decrypt + this resource. + - name: 'rsaEncryptedKey' + type: String + description: | + Specifies an RFC 4648 base64 encoded, RSA-wrapped + 2048-bit customer-supplied encryption key to either + encrypt or decrypt this resource. + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the + customer-supplied encryption key that protects this + resource. + output: true + - name: 'index' + type: Integer + description: | + Assigns a zero-based index to this disk, where 0 is + reserved for the boot disk. For example, if you have many + disks attached to an instance, each disk would have a + unique index number. If not specified, the server will + choose an appropriate value. + - name: 'initializeParams' + type: NestedObject + description: | + Specifies the parameters for a new disk that will be + created alongside the new instance. Use initialization + parameters to create boot disks or local SSDs attached to + the new instance. + immutable: true + properties: + - name: 'diskName' + type: String + description: | + Specifies the disk name. If not specified, the default + is to use the name of the instance. + - name: 'diskSizeGb' + type: Integer + description: Specifies the size of the disk in base-2 GB. + # diskStorageType - deprecated + - name: 'diskType' + type: ResourceRef + description: | + Reference to a disk type. + Specifies the disk type to use to create the instance. + If not specified, the default is pd-standard. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'DiskType' + imports: 'selfLink' + - name: 'sourceImage' + type: String + description: | + The source image to create this disk. When creating a + new instance, one of initializeParams.sourceImage or + disks.source is required. To create a disk with one of + the public operating system images, specify the image + by its family name. + - name: 'provisionedIops' + type: Integer + description: | + Indicates how many IOPS to provision for the disk. This + sets the number of I/O operations per second that the + disk can handle. Note: Updating currently is only supported for + hyperdisk skus via disk update api/gcloud without the need to + delete and recreate the disk, hyperdisk allows for an update of + IOPS every 4 hours. To update your hyperdisk more frequently, + you'll need to manually delete and recreate it. + - name: 'provisionedThroughput' + type: Integer + description: | + Indicates how much throughput to provision for the disk. + This sets the number of throughput mb per second that + the disk can handle. Note: Updating currently is only supported + for hyperdisk skus via disk update api/gcloud without the need + to delete and recreate the disk, hyperdisk allows for an update + of throughput every 4 hours. To update your hyperdisk more + frequently, you'll need to manually delete and recreate it. + - name: 'enableConfidentialCompute' + type: Boolean + description: | + Whether this disk is using confidential compute mode. + Note: Only supported on hyperdisk skus, disk_encryption_key + is required when setting to true. + - name: 'sourceImageEncryptionKey' + type: NestedObject + description: | + The customer-supplied encryption key of the source + image. Required if the source image is protected by a + customer-supplied encryption key. + + Instance templates do not store customer-supplied + encryption keys, so you cannot create disks for + instances in a managed instance group if the source + images are encrypted with your own keys. + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption + key, encoded in RFC 4648 base64 to either encrypt + or decrypt this resource. + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the + customer-supplied encryption key that protects this + resource. + output: true + - name: 'interface' + type: Enum + description: | + Specifies the disk interface to use for attaching this + disk, which is either SCSI or NVME. The default is SCSI. + Persistent disks must always use SCSI and the request will + fail if you attempt to attach a persistent disk in any + other format than SCSI. + enum_values: + - 'SCSI' + - 'NVME' + - name: 'mode' + type: Enum + description: | + The mode in which to attach this disk, either READ_WRITE or + READ_ONLY. If not specified, the default is to attach the + disk in READ_WRITE mode. + enum_values: + - 'READ_WRITE' + - 'READ_ONLY' + - name: 'source' + type: ResourceRef + description: | + Reference to a disk. When creating a new instance, + one of initializeParams.sourceImage or disks.source is required. + + If desired, you can also attach existing non-root + persistent disks using this property. This field is only + applicable for persistent disks. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Disk' + imports: 'selfLink' + - name: 'type' + type: Enum + description: | + Specifies the type of the disk, either SCRATCH or + PERSISTENT. If not specified, the default is PERSISTENT. + enum_values: + - 'SCRATCH' + - 'PERSISTENT' + - name: 'licenses' + type: Array + description: 'Any applicable publicly visible licenses.' + output: true + item_type: + type: String + - name: 'guestAccelerators' + type: Array + description: | + List of the type and count of accelerator cards attached to the + instance + item_type: + type: NestedObject + properties: + - name: 'acceleratorCount' + type: Integer + description: | + The number of the guest accelerator cards exposed to this + instance. + # TODO(alexstephen): Change to ResourceRef once AcceleratorType is + # created. + - name: 'acceleratorType' + type: String + description: | + Full or partial URL of the accelerator type resource to expose + to this instance. + - name: 'hostname' + type: String + description: | + The hostname of the instance to be created. The specified hostname + must be RFC1035 compliant. If hostname is not specified, the default + hostname is [INSTANCE_NAME].c.[PROJECT_ID].internal when using the + global DNS, and [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when + using zonal DNS. + - name: 'id' + type: Integer + description: | + The unique identifier for the resource. This identifier is defined by + the server. + output: true + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/zones/{{zone}}/instances/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this instance. A list of key->value pairs. + immutable: false + update_url: 'projects/{{project}}/zones/{{zone}}/instances/{{name}}/setLabels' + update_verb: 'POST' + - name: 'metadata' + type: KeyValuePairs + description: | + The metadata key/value pairs to assign to instances that are + created from this template. These pairs can consist of custom + metadata or predefined keys. + - name: 'machineType' + type: ResourceRef + description: 'A reference to a machine type which defines VM kind.' + update_url: 'projects/{{project}}/zones/{{zone}}/instances/{{name}}/setMachineType' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'MachineType' + imports: 'selfLink' + - name: 'minCpuPlatform' + type: String + description: | + Specifies a minimum CPU platform for the VM instance. Applicable + values are the friendly names of CPU platforms + - name: 'name' + type: String + description: | + The name of the resource, provided by the client when initially + creating the resource. The resource name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a + lowercase letter, and all following characters must be a dash, + lowercase letter, or digit, except the last character, which cannot + be a dash. + - name: 'networkInterfaces' + type: Array + description: | + An array of configurations for this interface. This specifies + how this interface is configured to interact with other + network services, such as connecting to the internet. Only + one network interface is supported per instance. + item_type: + type: NestedObject + properties: + - name: 'accessConfigs' + type: Array + description: | + An array of configurations for this interface. Currently, only + one access config, ONE_TO_ONE_NAT, is supported. If there are no + accessConfigs specified, then this instance will have no + external internet access. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name of this access configuration. The + default and recommended name is External NAT but you can + use any arbitrary string you would like. For example, My + external IP or Network Access. + required: true + - name: 'natIP' + type: ResourceRef + description: | + Reference to an address. + An external IP address associated with this instance. + Specify an unused static external IP address available to + the project or leave this field undefined to use an IP + from a shared ephemeral IP address pool. If you specify a + static external IP address, it must live in the same + region as the zone of the instance. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Address' + imports: 'address' + - name: 'type' + type: Enum + description: | + The type of configuration. The default and only option is + ONE_TO_ONE_NAT. + required: true + enum_values: + - 'ONE_TO_ONE_NAT' + - name: 'setPublicPtr' + type: Boolean + description: | + Specifies whether a public DNS PTR record should be + created to map the external IP address of the instance + to a DNS domain name. + - name: 'publicPtrDomainName' + type: String + description: | + The DNS domain name for the public PTR record. You can + set this field only if the setPublicPtr field is + enabled. + - name: 'networkTier' + type: Enum + description: | + This signifies the networking tier used for configuring + this access configuration. If an AccessConfig is + specified without a valid external IP address, an + ephemeral IP will be created with this networkTier. If an + AccessConfig with a valid external IP address is + specified, it must match that of the networkTier + associated with the Address resource owning that IP. + enum_values: + - 'PREMIUM' + - 'STANDARD' + - name: 'aliasIpRanges' + type: Array + description: | + An array of alias IP ranges for this network interface. Can + only be specified for network interfaces on subnet-mode + networks. + item_type: + type: NestedObject + properties: + - name: 'ipCidrRange' + type: String + description: | + The IP CIDR range represented by this alias IP range. + This IP CIDR range must belong to the specified + subnetwork and cannot contain IP addresses reserved by + system or used by other network interfaces. This range + may be a single IP address (e.g. 10.2.3.4), a netmask + (e.g. /24) or a CIDR format string (e.g. 10.1.2.0/24). + - name: 'subnetworkRangeName' + type: String + description: | + Optional subnetwork secondary range name specifying + the secondary range from which to allocate the IP + CIDR range for this alias IP range. If left + unspecified, the primary range of the subnetwork will + be used. + - name: 'internalIpv6PrefixLength' + type: String + description: | + The prefix length of the primary internal IPv6 range. + - name: 'ipv6Address' + type: String + description: | + An IPv6 internal network address for this network interface. + If not specified, Google Cloud will automatically assign an + internal IPv6 address from the instance's subnetwork. + - name: 'name' + type: String + description: | + The name of the network interface, generated by the + server. For network devices, these are eth0, eth1, etc + output: true + - name: 'network' + type: ResourceRef + description: | + Specifies the title of an existing network. When creating + an instance, if neither the network nor the subnetwork is specified, + the default network global/networks/default is used; if the network + is not specified but the subnetwork is specified, the network is + inferred. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'networkIP' + type: String + description: | + An IPv4 internal network address to assign to the + instance for this network interface. If not specified + by the user, an unused internal IP is assigned by the + system. + - name: 'subnetwork' + type: ResourceRef + description: | + Reference to a VPC network. + If the network resource is in legacy mode, do not + provide this property. If the network is in auto + subnet mode, providing the subnetwork is optional. If + the network is in custom subnet mode, then this field + should be specified. + # networkInterfaces.kind is not necessary for convergence. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'networkAttachment' + type: ResourceRef + description: | + The URL of the network attachment that this interface should connect to in the following format: + projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. + min_version: 'beta' + resource: 'networkAttachment' + imports: 'selfLink' + - name: 'scheduling' + type: NestedObject + description: Sets the scheduling options for this instance. + properties: + - name: 'automaticRestart' + type: Boolean + description: | + Specifies whether the instance should be automatically restarted + if it is terminated by Compute Engine (not terminated by a user). + You can only set the automatic restart option for standard + instances. Preemptible instances cannot be automatically + restarted. + - name: 'onHostMaintenance' + type: String + description: | + Defines the maintenance behavior for this instance. For standard + instances, the default behavior is MIGRATE. For preemptible + instances, the default and only possible behavior is TERMINATE. + For more information, see Setting Instance Scheduling Options. + - name: 'preemptible' + type: Boolean + description: | + Defines whether the instance is preemptible. This can only be set + during instance creation, it cannot be set or changed after the + instance has been created. + - name: 'serviceAccounts' + type: Array + description: | + A list of service accounts, with their specified scopes, authorized + for this instance. Only one service account per VM instance is + supported. + item_type: + type: NestedObject + properties: + - name: 'email' + type: String + description: Email address of the service account. + - name: 'scopes' + type: Array + description: | + The list of scopes to be made available for this service + account. + item_type: + type: String + - name: 'shieldedInstanceConfig' + type: NestedObject + description: + Configuration for various parameters related to shielded instances. + # The code for this update method is custom because MM does not support + # sending just the nested properties + update_url: 'projects/{{project}}/instances/{{name}}/updateShieldedInstanceConfig' + update_verb: 'PATCH' + properties: + - name: 'enableSecureBoot' + type: Boolean + description: Defines whether the instance has Secure Boot enabled. + update_url: 'projects/{{project}}/instances/{{name}}/updateShieldedInstanceConfig' + update_verb: 'PATCH' + - name: 'enableVtpm' + type: Boolean + description: Defines whether the instance has the vTPM enabled + update_url: 'projects/{{project}}/instances/{{name}}/updateShieldedInstanceConfig' + update_verb: 'PATCH' + - name: 'enableIntegrityMonitoring' + type: Boolean + description: + Defines whether the instance has integrity monitoring enabled. + update_url: 'projects/{{project}}/instances/{{name}}/updateShieldedInstanceConfig' + update_verb: 'PATCH' + - name: 'confidentialInstanceConfig' + type: NestedObject + description: + 'Configuration for confidential computing (requires setting the machine + type to any of the n2d-* types and a boot disk of type pd-ssd).' + properties: + - name: 'enableConfidentialCompute' + type: Boolean + description: Enables confidential computing with AMD SEV. + at_least_one_of: + - 'confidential_instance_config.0.enable_confidential_compute' + - 'confidential_instance_config.0.confidential_instance_type' + - name: 'confidentialInstanceType' + type: Enum + description: | + The confidential computing technology the instance uses. + SEV is an AMD feature. One of the following values: SEV, SEV_SNP. + If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required. + min_version: 'beta' + at_least_one_of: + - 'confidential_instance_config.0.enable_confidential_compute' + - 'confidential_instance_config.0.confidential_instance_type' + enum_values: + - 'SEV' + - 'SEV_SNP' + - name: 'status' + type: Enum + description: | + The status of the instance. One of the following values: + PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, + and TERMINATED. + + As a user, use RUNNING to keep a machine "on" and TERMINATED to + turn a machine off + # GCP API shows this as output: true. + # This is incorrect because you can make actions on the Instance (start, stop) + # In an idempotent world, the best way to express these actions is to + # change the status value. + enum_values: + - 'PROVISIONING' + - 'STAGING' + - 'RUNNING' + - 'STOPPING' + - 'SUSPENDING' + - 'SUSPENDED' + - 'TERMINATED' + - name: 'statusMessage' + type: String + description: An optional, human-readable explanation of the status. + output: true + - name: 'tags' + type: NestedObject + description: | + A list of tags to apply to this instance. Tags are used to identify + valid sources or targets for network firewalls and are specified by + the client during instance creation. The tags can be later modified + by the setTags method. Each tag within the list must comply with + RFC1035. + properties: + - name: 'fingerprint' + type: String + description: | + Specifies a fingerprint for this request, which is essentially a + hash of the metadata's contents and used for optimistic locking. + The fingerprint is initially generated by Compute Engine and + changes after every request to modify or update metadata. You + must always provide an up-to-date fingerprint hash in order to + update or change metadata. + - name: 'items' + type: Array + description: | + An array of tags. Each tag must be 1-63 characters long, and + comply with RFC1035. + item_type: + type: String diff --git a/mmv1/products/compute/go_InstanceGroup.yaml b/mmv1/products/compute/go_InstanceGroup.yaml new file mode 100644 index 000000000000..634f78fecdeb --- /dev/null +++ b/mmv1/products/compute/go_InstanceGroup.yaml @@ -0,0 +1,121 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InstanceGroup' +kind: 'compute#instanceGroup' +description: | + Represents an Instance Group resource. Instance groups are self-managed + and can contain identical or different instances. Instance groups do not + use an instance template. Unlike managed instance groups, you must create + and add instances to an instance group manually. +exclude: true +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +parameters: + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the instance group resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + # 'fingerprint' not applicable to state convergence. + - name: 'id' + type: Integer + description: 'A unique identifier for this instance group.' + output: true + - name: 'name' + type: String + description: | + The name of the instance group. + The name must be 1-63 characters long, and comply with RFC1035. + - name: 'namedPorts' + type: Array + description: | + Assigns a name to a port number. + For example: {name: "http", port: 80}. + + This allows the system to reference ports by the assigned name + instead of a port number. Named ports can also contain multiple + ports. + + For example: [{name: "http", port: 80},{name: "http", port: 8080}] + + Named ports apply to all instances in this instance group. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name for this named port. + The name must be 1-63 characters long, and comply with RFC1035. + - name: 'port' + type: Integer + description: | + The port number, which can be a value between 1 and 65535. + - name: 'network' + type: ResourceRef + description: | + The network to which all instances in the instance group belong. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'region' + type: ResourceRef + description: | + The region where the instance group is located + (for regional resources). + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'selfLink' + - name: 'subnetwork' + type: ResourceRef + description: | + The subnetwork to which all instances in the instance group belong. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_InstanceGroupManager.yaml b/mmv1/products/compute/go_InstanceGroupManager.yaml new file mode 100644 index 000000000000..7f560b060068 --- /dev/null +++ b/mmv1/products/compute/go_InstanceGroupManager.yaml @@ -0,0 +1,219 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InstanceGroupManager' +kind: 'compute#instanceGroupManager' +description: | + Creates a managed instance group using the information that you specify in + the request. After the group is created, it schedules an action to create + instances in the group using the specified instance template. This + operation is marked as DONE when the group is created even if the + instances in the group have not yet been created. You must separately + verify the status of the individual instances. + + A managed instance group can have up to 1000 VM instances per group. +exclude: true +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +parameters: + - name: 'zone' + type: ResourceRef + description: 'The zone the managed instance group resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'baseInstanceName' + type: String + description: | + The base instance name to use for instances in this group. The value + must be 1-58 characters long. Instances are named by appending a + hyphen and a random four-character string to the base instance name. + The base instance name must comply with RFC1035. + required: true + - name: 'creationTimestamp' + type: Time + description: | + The creation timestamp for this managed instance group in RFC3339 + text format. + output: true + - name: 'currentActions' + type: NestedObject + description: | + The list of instance actions and the number of instances in this + managed instance group that are scheduled for each of those actions. + output: true + properties: + - name: 'abandoning' + type: Integer + description: | + The total number of instances in the managed instance group that + are scheduled to be abandoned. Abandoning an instance removes it + from the managed instance group without deleting it. + output: true + - name: 'creating' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be created or are currently being created. If the + group fails to create any of these instances, it tries again until + it creates the instance successfully. + + If you have disabled creation retries, this field will not be + populated; instead, the creatingWithoutRetries field will be + populated. + output: true + - name: 'creatingWithoutRetries' + type: Integer + description: | + The number of instances that the managed instance group will + attempt to create. The group attempts to create each instance only + once. If the group fails to create any of these instances, it + decreases the group's targetSize value accordingly. + output: true + - name: 'deleting' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be deleted or are currently being deleted. + output: true + - name: 'none' + type: Integer + description: | + The number of instances in the managed instance group that are + running and have no scheduled actions. + output: true + - name: 'recreating' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be recreated or are currently being being recreated. + Recreating an instance deletes the existing root persistent disk + and creates a new disk from the image that is defined in the + instance template. + output: true + - name: 'refreshing' + type: Integer + description: | + The number of instances in the managed instance group that are + being reconfigured with properties that do not require a restart + or a recreate action. For example, setting or removing target + pools for the instance. + output: true + - name: 'restarting' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be restarted or are currently being restarted. + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + immutable: true + - name: 'id' + type: Integer + description: 'A unique identifier for this resource' + output: true + - name: 'instanceGroup' + type: ResourceRef + description: 'The instance group being managed' + output: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'InstanceGroup' + imports: 'selfLink' + - name: 'instanceTemplate' + type: ResourceRef + description: | + The instance template that is specified for this managed instance + group. The group uses this template to create all new instances in the + managed instance group. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'InstanceTemplate' + imports: 'selfLink' + - name: 'name' + type: String + description: | + The name of the managed instance group. The name must be 1-63 + characters long, and comply with RFC1035. + required: true + - name: 'namedPorts' + type: Array + description: + Named ports configured for the Instance Groups complementary to this + Instance Group Manager. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name for this named port. The name must be 1-63 characters + long, and comply with RFC1035. + - name: 'port' + type: Integer + description: + The port number, which can be a value between 1 and 65535. + - name: 'region' + type: ResourceRef + description: | + The region this managed instance group resides + (for regional resources). + output: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'selfLink' + - name: 'targetPools' + type: Array + description: | + TargetPool resources to which instances in the instanceGroup field are + added. The target pools automatically apply to all of the instances in + the managed instance group. + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'targetPool' + type: ResourceRef + description: 'The targetPool to receive managed instances.' + resource: 'TargetPool' + imports: 'selfLink' + - name: 'targetSize' + type: Integer + description: | + The target number of running instances for this managed instance + group. Deleting or abandoning instances reduces this number. Resizing + the group changes this number. diff --git a/mmv1/products/compute/go_InstanceGroupMembership.yaml b/mmv1/products/compute/go_InstanceGroupMembership.yaml new file mode 100644 index 000000000000..d5592ed914cb --- /dev/null +++ b/mmv1/products/compute/go_InstanceGroupMembership.yaml @@ -0,0 +1,107 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InstanceGroupMembership' +kind: 'compute#instanceGroup' +description: | + Represents the Instance membership to the Instance Group. + + -> **NOTE** You can use this resource instead of the `instances` field in the + `google_compute_instance_group`, however it's not recommended to use it alongside this field. + It might cause inconsistencies, as they can end up competing over control. + + -> **NOTE** This resource has been added to avoid a situation, where after + Instance is recreated, it's removed from Instance Group and it's needed to + perform `apply` twice. To avoid situations like this, please use this resource + with the lifecycle `replace_triggered_by` method, with the passed Instance's ID. +references: + guides: + 'Add instances': 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups/addInstances' + 'Remove instances': 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups/removeInstances' + 'List instances': 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups/listInstances' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroups' +docs: +id_format: '{{project}}/{{zone}}/{{instance_group}}/{{instance}}' +base_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{instance_group}}' +self_link: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{instance_group}}/listInstances' +create_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{instance_group}}/addInstances' +read_verb: 'POST' +delete_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{instance_group}}/removeInstances' +delete_verb: 'POST' +immutable: true +mutex: instanceGroups/{{project}}/zones/{{zone}}/{{instance_group}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - instance +nested_query: + keys: + - items + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_instance_group_membership.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_instance_group_membership.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/compute_instance_group_membership.go.tmpl' +exclude_tgc: true +examples: + - name: 'instance_group_membership' + vars: + network_name: 'network' + instance_group_name: 'instance-group' + instance_name: 'instance' + skip_test: true +parameters: + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the instance group resides.' + url_param_only: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' + - name: 'instanceGroup' + type: ResourceRef + description: | + Represents an Instance Group resource name that the instance belongs to. + url_param_only: true + required: true + ignore_read: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + resource: 'InstanceGroup' + imports: 'name' +properties: + - name: 'instance' + type: ResourceRef + description: 'An instance being added to the InstanceGroup' + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/full_to_relative_path.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Instance' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml new file mode 100644 index 000000000000..2a896ad3aad0 --- /dev/null +++ b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml @@ -0,0 +1,105 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InstanceGroupNamedPort' +description: | + Mange the named ports setting for a managed instance group without + managing the group as whole. This resource is primarily intended for use + with GKE-generated groups that shouldn't otherwise be managed by other + tools. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/instance-groups/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroup' +docs: +id_format: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}' +base_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}' +self_link: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}' +create_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts' +delete_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts' +delete_verb: 'POST' +immutable: true +mutex: projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}} +import_format: + - 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - port + - name +nested_query: + keys: + - namedPorts + is_list_of_ids: false + modify_by_patch: true +custom_code: + encoder: 'templates/terraform/encoders/go/normalize_group.go.tmpl' +examples: + - name: 'instance_group_named_port_gke' + primary_resource_id: 'my_port' + vars: + network_name: 'container-network' + subnetwork_name: 'container-subnetwork' + gke_cluster_name: 'my-cluster' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + skip_vcr: true +parameters: + - name: 'group' + type: ResourceRef + description: | + The name of the instance group. + url_param_only: true + required: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + resource: 'InstanceGroup' + imports: 'name' + - name: 'zone' + type: ResourceRef + description: | + The zone of the instance group. + url_param_only: true + required: false + ignore_read: true + default_from_api: true + resource: 'Zone' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The name for this named port. The name must be 1-63 characters + long, and comply with RFC1035. + required: true + - name: 'port' + type: Integer + description: The port number, which can be a value between 1 and 65535. + required: true diff --git a/mmv1/products/compute/go_InstanceSettings.yaml b/mmv1/products/compute/go_InstanceSettings.yaml new file mode 100644 index 000000000000..4dcf97880ec1 --- /dev/null +++ b/mmv1/products/compute/go_InstanceSettings.yaml @@ -0,0 +1,80 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InstanceSettings' +kind: 'compute#instanceSettings' +description: | + Represents an Instance Settings resource. Instance settings are centralized configuration parameters that allow users to configure the default values for specific VM parameters that are normally set using GCE instance API methods. +references: + guides: + 'Update Instance Settings': 'https://cloud.google.com/compute/docs/metadata/setting-custom-metadata#set-custom-project-zonal-metadata' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/instanceSettings' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/instanceSettings' +self_link: 'projects/{{project}}/zones/{{zone}}/instanceSettings' +create_url: 'projects/{{project}}/zones/{{zone}}/instanceSettings?update_mask=*' +create_verb: 'PATCH' +update_url: 'projects/{{project}}/zones/{{zone}}/instanceSettings?update_mask=*' +update_verb: 'PATCH' +import_format: + - 'projects/{{project}}/zones/{{zone}}/instanceSettings' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + custom_delete: 'templates/terraform/custom_delete/go/clear_instance_settings.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +examples: + - name: 'instance_settings_basic' + primary_resource_id: 'gce_instance_settings' +parameters: + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the machine resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'fingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + custom_expand: 'templates/terraform/custom_expand/go/compute_instance_settings_fingerprint.tmpl' + - name: 'metadata' + type: NestedObject + description: | + The metadata key/value pairs assigned to all the instances in the corresponding scope. + properties: + - name: 'items' + type: KeyValuePairs + description: | + A metadata key/value items map. The total size of all keys and values must be less than 512KB diff --git a/mmv1/products/compute/go_Interconnect.yaml b/mmv1/products/compute/go_Interconnect.yaml new file mode 100644 index 000000000000..0091df5f4c47 --- /dev/null +++ b/mmv1/products/compute/go_Interconnect.yaml @@ -0,0 +1,403 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Interconnect' +kind: 'compute#Interconnect' +description: | + Represents an Interconnect resource. The Interconnect resource is a dedicated connection between + Google's network and your on-premises network. +references: + guides: + 'Create a Dedicated Interconnect': 'https://cloud.google.com/network-connectivity/docs/interconnect/concepts/dedicated-overview' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/interconnects' +docs: +base_url: 'projects/{{project}}/global/interconnects' +self_link: 'projects/{{project}}/global/interconnects/{{name}}' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 10000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'compute_interconnect_basic' + primary_resource_id: 'example-interconnect' + vars: + interconnect_name: 'example-interconnect' + customer_name: 'example_customer' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + skip_test: true + - name: 'compute_interconnect_basic_test' + primary_resource_id: 'example-interconnect' + vars: + interconnect_name: 'example-interconnect' + skip_docs: true +parameters: +properties: + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create the resource. + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is created. The name must be + 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first + character must be a lowercase letter, and all following characters must be a dash, + lowercase letter, or digit, except the last character, which cannot be a dash. + required: true + immutable: true + validation: + regex: '^[a-z]([-a-z0-9]*[a-z0-9])?$' + - name: 'location' + type: ResourceRef + description: | + URL of the InterconnectLocation object that represents where this connection is to be provisioned. + required: true + immutable: true + resource: 'InterconnectLocations' + imports: 'selfLink' + - name: 'linkType' + type: Enum + description: | + Type of link requested. Note that this field indicates the speed of each of the links in the + bundle, not the speed of the entire bundle. Can take one of the following values: + - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics. + - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. + required: true + immutable: true + enum_values: + - 'LINK_TYPE_ETHERNET_10G_LR' + - 'LINK_TYPE_ETHERNET_100G_LR' + - name: 'requestedLinkCount' + type: Integer + description: | + Target number of physical links in the link bundle, as requested by the customer. + required: true + immutable: true + - name: 'interconnectType' + type: Enum + description: | + Type of interconnect. Note that a value IT_PRIVATE has been deprecated in favor of DEDICATED. + Can take one of the following values: + - PARTNER: A partner-managed interconnection shared between customers though a partner. + - DEDICATED: A dedicated physical interconnection with the customer. + required: true + immutable: true + enum_values: + - 'DEDICATED' + - 'PARTNER' + - 'IT_PRIVATE' + - name: 'adminEnabled' + type: Boolean + description: | + Administrative status of the interconnect. When this is set to true, the Interconnect is + functional and can carry traffic. When set to false, no packets can be carried over the + interconnect and no BGP routes are exchanged over it. By default, the status is set to true. + send_empty_value: true + default_value: true + - name: 'nocContactEmail' + type: String + description: | + Email address to contact the customer NOC for operations and maintenance notifications + regarding this Interconnect. If specified, this will be used for notifications in addition to + all other forms described, such as Cloud Monitoring logs alerting and Cloud Notifications. + This field is required for users who sign up for Cloud Interconnect using workforce identity + federation. + - name: 'customerName' + type: String + description: | + Customer name, to put in the Letter of Authorization as the party authorized to request a + crossconnect. + required: true + immutable: true + - name: 'operationalStatus' + type: Enum + description: | + The current status of this Interconnect's functionality, which can take one of the following: + - OS_ACTIVE: A valid Interconnect, which is turned up and is ready to use. Attachments may + be provisioned on this Interconnect. + - OS_UNPROVISIONED: An Interconnect that has not completed turnup. No attachments may be + provisioned on this Interconnect. + - OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal maintenance. No + attachments may be provisioned or updated on this Interconnect. + output: true + enum_values: + - 'OS_ACTIVE' + - 'OS_UNPROVISIONED' + - 'OS_UNDER_MAINTENANCE' + - name: 'provisionedLinkCount' + type: Integer + description: | + Number of links actually provisioned in this interconnect. + output: true + - name: 'interconnectAttachments' + type: Array + description: | + A list of the URLs of all InterconnectAttachments configured to use this Interconnect. + output: true + item_type: + type: String + - name: 'peerIpAddress' + type: String + description: | + IP address configured on the customer side of the Interconnect link. + The customer should configure this IP address during turnup when prompted by Google NOC. + This can be used only for ping tests. + output: true + - name: 'googleIpAddress' + type: String + description: | + IP address configured on the Google side of the Interconnect link. + This can be used only for ping tests. + output: true + - name: 'googleReferenceId' + type: String + description: | + Google reference ID to be used when raising support tickets with Google or otherwise to debug + backend connectivity issues. + output: true + - name: 'expectedOutages' + type: Array + description: A list of outages expected for this Interconnect. + output: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Unique identifier for this outage notification. + output: true + - name: 'description' + type: String + description: | + A description about the purpose of the outage. + output: true + - name: 'source' + type: Enum + description: | + The party that generated this notification. Note that the value of NSRC_GOOGLE has been + deprecated in favor of GOOGLE. Can take the following value: + - GOOGLE: this notification as generated by Google. + output: true + enum_values: + - 'GOOGLE' + - name: 'state' + type: Enum + description: | + State of this notification. Note that the versions of this enum prefixed with "NS_" have + been deprecated in favor of the unprefixed values. Can take one of the following values: + - ACTIVE: This outage notification is active. The event could be in the past, present, + or future. See startTime and endTime for scheduling. + - CANCELLED: The outage associated with this notification was cancelled before the + outage was due to start. + - COMPLETED: The outage associated with this notification is complete. + output: true + enum_values: + - 'ACTIVE' + - 'CANCELLED' + - 'COMPLETED' + - name: 'issueType' + type: Enum + description: | + Form this outage is expected to take. Note that the versions of this enum prefixed with + "IT_" have been deprecated in favor of the unprefixed values. Can take one of the + following values: + - OUTAGE: The Interconnect may be completely out of service for some or all of the + specified window. + - PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole should remain + up, but with reduced bandwidth. + output: true + enum_values: + - 'OUTAGE' + - 'PARTIAL_OUTAGE' + - name: 'affectedCircuits' + type: Array + description: | + If issueType is IT_PARTIAL_OUTAGE, a list of the Google-side circuit IDs that will be + affected. + output: true + item_type: + type: String + - name: 'startTime' + type: String + description: | + Scheduled start time for the outage (milliseconds since Unix epoch). + output: true + - name: 'endTime' + type: String + description: | + Scheduled end time for the outage (milliseconds since Unix epoch). + output: true + - name: 'circuitInfos' + type: Array + description: A list of CircuitInfo objects, that describe the individual circuits in this LAG. + output: true + item_type: + type: NestedObject + properties: + - name: 'googleCircuitId' + type: String + description: | + Google-assigned unique ID for this circuit. Assigned at circuit turn-up. + output: true + - name: 'googleDemarcId' + type: String + description: | + Google-side demarc ID for this circuit. Assigned at circuit turn-up and provided by + Google to the customer in the LOA. + output: true + - name: 'customerDemarcId' + type: String + description: | + Customer-side demarc ID for this circuit. + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Labels for this resource. These can only be added or modified by the setLabels + method. Each label key/value pair must comply with RFC1035. Label values may be empty. + immutable: false + - name: 'labelFingerprint' + type: Fingerprint + description: | + A fingerprint for the labels being applied to this Interconnect, which is essentially a hash + of the labels set used for optimistic locking. The fingerprint is initially generated by + Compute Engine and changes after every request to modify or update labels. + You must always provide an up-to-date fingerprint hash in order to update or change labels, + otherwise the request will fail with error 412 conditionNotMet. + output: true + - name: 'state' + type: Enum + description: | + The current state of Interconnect functionality, which can take one of the following values: + - ACTIVE: The Interconnect is valid, turned up and ready to use. + Attachments may be provisioned on this Interconnect. + - UNPROVISIONED: The Interconnect has not completed turnup. No attachments may b + provisioned on this Interconnect. + - UNDER_MAINTENANCE: The Interconnect is undergoing internal maintenance. No attachments may + be provisioned or updated on this Interconnect. + output: true + enum_values: + - 'ACTIVE' + - 'UNPROVISIONED' + - 'UNDER_MAINTENANCE' + - name: 'satisfiesPzs' + type: Boolean + description: Reserved for future use. + output: true + - name: 'macsec' + type: NestedObject + description: | + Configuration that enables Media Access Control security (MACsec) on the Cloud + Interconnect connection between Google and your on-premises router. + properties: + - name: 'preSharedKeys' + type: Array + description: | + A keychain placeholder describing a set of named key objects along with their + start times. A MACsec CKN/CAK is generated for each key in the key chain. + Google router automatically picks the key with the most recent startTime when establishing + or re-establishing a MACsec secure link. + required: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + A name for this pre-shared key. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + validation: + regex: '^[a-z]([-a-z0-9]*[a-z0-9])?$' + - name: 'startTime' + type: String + description: | + A RFC3339 timestamp on or after which the key is valid. startTime can be in the + future. If the keychain has a single key, startTime can be omitted. If the keychain + has multiple keys, startTime is mandatory for each key. The start times of keys must + be in increasing order. The start times of two consecutive keys must be at least 6 + hours apart. + - name: 'failOpen' + type: Boolean + description: | + If set to true, the Interconnect connection is configured with a should-secure + MACsec security policy, that allows the Google router to fallback to cleartext + traffic if the MKA session cannot be established. By default, the Interconnect + connection is configured with a must-secure security policy that drops all traffic + if the MKA session cannot be established with your router. + - name: 'macsecEnabled' + type: Boolean + description: | + Enable or disable MACsec on this Interconnect connection. + MACsec enablement fails if the MACsec object is not specified. + - name: 'remoteLocation' + type: String + description: | + Indicates that this is a Cross-Cloud Interconnect. This field specifies the location outside + of Google's network that the interconnect is connected to. + - name: 'requestedFeatures' + type: Array + description: | + interconnects.list of features requested for this Interconnect connection. Options: MACSEC ( + If specified then the connection is created on MACsec capable hardware ports. If not + specified, the default value is false, which allocates non-MACsec capable ports first if + available). + item_type: + type: Enum + description: | + interconnects.list of features requested for this Interconnect connection + enum_values: + - 'MACSEC' + - name: 'availableFeatures' + type: Array + description: | + interconnects.list of features available for this Interconnect connection. Can take the value: + MACSEC. If present then the Interconnect connection is provisioned on MACsec capable hardware + ports. If not present then the Interconnect connection is provisioned on non-MACsec capable + ports and MACsec isn't supported and enabling MACsec fails). + output: true + item_type: + type: Enum + description: | + interconnects.list of features available for this Interconnect connection, + enum_values: + - 'MACSEC' diff --git a/mmv1/products/compute/go_InterconnectAttachment.yaml b/mmv1/products/compute/go_InterconnectAttachment.yaml new file mode 100644 index 000000000000..b991a7353f58 --- /dev/null +++ b/mmv1/products/compute/go_InterconnectAttachment.yaml @@ -0,0 +1,340 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InterconnectAttachment' +kind: 'compute#interconnectAttachment' +description: | + Represents an InterconnectAttachment (VLAN attachment) resource. For more + information, see Creating VLAN Attachments. +docs: +base_url: 'projects/{{project}}/regions/{{region}}/interconnectAttachments' +has_self_link: true +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/interconnect_attachment.go.tmpl' + post_create: 'templates/terraform/post_create/go/interconnect_attachment.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/interconnect_attachment.go.tmpl' +examples: + - name: 'interconnect_attachment_basic' + primary_resource_id: 'on_prem' + vars: + interconnect_attachment_name: 'on-prem-attachment' + router_name: 'router-1' + network_name: 'network-1' + - name: 'interconnect_attachment_dedicated' + primary_resource_id: 'on_prem' + vars: + interconnect_name: 'interconenct-1' + interconnect_attachment_name: 'on-prem-attachment' + router_name: 'router-1' + network_name: 'network-1' + skip_docs: true + - name: 'compute_interconnect_attachment_ipsec_encryption' + primary_resource_id: 'ipsec-encrypted-interconnect-attachment' + vars: + interconnect_attachment_name: 'test-interconnect-attachment' + address_name: 'test-address' + router_name: 'test-router' + network_name: 'test-network' +parameters: + - name: 'region' + type: ResourceRef + description: | + Region where the regional interconnect attachment resides. + required: false + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'selfLink' +properties: + - name: 'adminEnabled' + type: Boolean + description: | + Whether the VLAN attachment is enabled or disabled. When using + PARTNER type this will Pre-Activate the interconnect attachment + send_empty_value: true + default_value: true + - name: 'cloudRouterIpAddress' + type: String + description: | + IPv4 address + prefix length to be configured on Cloud Router + Interface for this interconnect attachment. + output: true + - name: 'customerRouterIpAddress' + type: String + description: | + IPv4 address + prefix length to be configured on the customer + router subinterface for this interconnect attachment. + output: true + - name: 'interconnect' + type: String + description: | + URL of the underlying Interconnect object that this attachment's + traffic will traverse through. Required if type is DEDICATED, must not + be set if type is PARTNER. + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'mtu' + type: String + description: | + Maximum Transmission Unit (MTU), in bytes, of packets passing through + this interconnect attachment. Currently, only 1440 and 1500 are allowed. If not specified, the value will default to 1440. + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_int_to_string.go.tmpl' + - name: 'bandwidth' + type: Enum + description: | + Provisioned bandwidth capacity for the interconnect attachment. + For attachments of type DEDICATED, the user can set the bandwidth. + For attachments of type PARTNER, the Google Partner that is operating the interconnect must set the bandwidth. + Output only for PARTNER type, mutable for PARTNER_PROVIDER and DEDICATED, + Defaults to BPS_10G + default_from_api: true + enum_values: + - 'BPS_50M' + - 'BPS_100M' + - 'BPS_200M' + - 'BPS_300M' + - 'BPS_400M' + - 'BPS_500M' + - 'BPS_1G' + - 'BPS_2G' + - 'BPS_5G' + - 'BPS_10G' + - 'BPS_20G' + - 'BPS_50G' + - name: 'edgeAvailabilityDomain' + type: String + description: | + Desired availability domain for the attachment. Only available for type + PARTNER, at creation time. For improved reliability, customers should + configure a pair of attachments with one per availability domain. The + selected availability domain will be provided to the Partner via the + pairing key so that the provisioned circuit will lie in the specified + domain. If not specified, the value will default to AVAILABILITY_DOMAIN_ANY. + immutable: true + default_from_api: true + - name: 'pairingKey' + type: String + description: | + [Output only for type PARTNER. Not present for DEDICATED]. The opaque + identifier of an PARTNER attachment used to initiate provisioning with + a selected partner. Of the form "XXXXX/region/domain" + output: true + - name: 'partnerAsn' + type: String + description: | + [Output only for type PARTNER. Not present for DEDICATED]. Optional + BGP ASN for the router that should be supplied by a layer 3 Partner if + they configured BGP on behalf of the customer. + output: true + - name: 'privateInterconnectInfo' + type: NestedObject + description: | + Information specific to an InterconnectAttachment. This property + is populated if the interconnect that this is attached to is of type DEDICATED. + output: true + properties: + - name: 'tag8021q' + type: Integer + description: | + 802.1q encapsulation tag to be used for traffic between + Google and the customer, going to and from this network and region. + output: true + - name: 'type' + type: Enum + description: | + The type of InterconnectAttachment you wish to create. Defaults to + DEDICATED. + immutable: true + default_from_api: true + enum_values: + - 'DEDICATED' + - 'PARTNER' + - 'PARTNER_PROVIDER' + - name: 'state' + type: Enum + description: | + [Output Only] The current state of this attachment's functionality. + output: true + enum_values: + - 'ACTIVE' + - 'DEFUNCT' + - 'PARTNER_REQUEST_RECEIVED' + - 'PENDING_CUSTOMER' + - 'PENDING_PARTNER' + - 'STATE_UNSPECIFIED' + - name: 'googleReferenceId' + type: String + description: | + Google reference ID, to be used when raising support tickets with + Google or otherwise to debug backend connectivity issues. + output: true + - name: 'router' + type: ResourceRef + description: | + URL of the cloud router to be used for dynamic routing. This router must be in + the same region as this InterconnectAttachment. The InterconnectAttachment will + automatically connect the Interconnect to the network & region within which the + Cloud Router is configured. + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Router' + imports: 'selfLink' + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a + lowercase letter, and all following characters must be a dash, lowercase + letter, or digit, except the last character, which cannot be a dash. + required: true + immutable: true + validation: + regex: '^[a-z]([-a-z0-9]*[a-z0-9])?$' + - name: 'candidateSubnets' + type: Array + description: | + Up to 16 candidate prefixes that can be used to restrict the allocation + of cloudRouterIpAddress and customerRouterIpAddress for this attachment. + All prefixes must be within link-local address space (169.254.0.0/16) + and must be /29 or shorter (/28, /27, etc). Google will attempt to select + an unused /29 from the supplied candidate prefix(es). The request will + fail if all possible /29s are in use on Google's edge. If not supplied, + Google will randomly select an unused /29 from all of link-local space. + immutable: true + ignore_read: true + item_type: + type: String + - name: 'vlanTag8021q' + type: Integer + description: | + The IEEE 802.1Q VLAN tag for this attachment, in the range 2-4094. When + using PARTNER type this will be managed upstream. + immutable: true + default_from_api: true + - name: 'ipsecInternalAddresses' + type: Array + description: | + URL of addresses that have been reserved for the interconnect attachment, + Used only for interconnect attachment that has the encryption option as + IPSEC. + The addresses must be RFC 1918 IP address ranges. When creating HA VPN + gateway over the interconnect attachment, if the attachment is configured + to use an RFC 1918 IP address, then the VPN gateway's IP address will be + allocated from the IP address range specified here. + For example, if the HA VPN gateway's interface 0 is paired to this + interconnect attachment, then an RFC 1918 IP address for the VPN gateway + interface 0 will be allocated from the IP address specified for this + interconnect attachment. + If this field is not specified for interconnect attachment that has + encryption option as IPSEC, later on when creating HA VPN gateway on this + interconnect attachment, the HA VPN gateway's IP address will be + allocated from regional external IP address pool. + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'ipsecInternalAddress' + type: ResourceRef + description: | + URL of an address that has been reserved for the interconnect + attachment. + resource: 'Address' + imports: 'selfLink' + - name: 'encryption' + type: Enum + description: | + Indicates the user-supplied encryption option of this interconnect + attachment. Can only be specified at attachment creation for PARTNER or + DEDICATED attachments. + * NONE - This is the default value, which means that the VLAN attachment + carries unencrypted traffic. VMs are able to send traffic to, or receive + traffic from, such a VLAN attachment. + * IPSEC - The VLAN attachment carries only encrypted traffic that is + encrypted by an IPsec device, such as an HA VPN gateway or third-party + IPsec VPN. VMs cannot directly send traffic to, or receive traffic from, + such a VLAN attachment. To use HA VPN over Cloud Interconnect, the VLAN + attachment must be created with this option. + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: NONE + enum_values: + - 'NONE' + - 'IPSEC' + - name: 'stackType' + type: Enum + description: | + The stack type for this interconnect attachment to identify whether the IPv6 + feature is enabled or not. If not specified, IPV4_ONLY will be used. + This field can be both set at interconnect attachments creation and update + interconnect attachment operations. + default_from_api: true + enum_values: + - 'IPV4_IPV6' + - 'IPV4_ONLY' + - name: 'cloudRouterIpv6Address' + type: String + description: | + IPv6 address + prefix length to be configured on Cloud Router + Interface for this interconnect attachment. + output: true + - name: 'customerRouterIpv6Address' + type: String + description: | + IPv6 address + prefix length to be configured on the customer + router subinterface for this interconnect attachment. + output: true + - name: 'subnetLength' + type: Integer + description: | + Length of the IPv4 subnet mask. Allowed values: 29 (default), 30. The default value is 29, + except for Cross-Cloud Interconnect connections that use an InterconnectRemoteLocation with a + constraints.subnetLengthRange.min equal to 30. For example, connections that use an Azure + remote location fall into this category. In these cases, the default value is 30, and + requesting 29 returns an error. Where both 29 and 30 are allowed, 29 is preferred, because it + gives Google Cloud Support more debugging visibility. + immutable: true + ignore_read: true diff --git a/mmv1/products/compute/go_License.yaml b/mmv1/products/compute/go_License.yaml new file mode 100644 index 000000000000..7774d584f274 --- /dev/null +++ b/mmv1/products/compute/go_License.yaml @@ -0,0 +1,47 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'License' +kind: 'compute#license' +description: | + A License resource represents a software license. Licenses are used to + track software usage in images, persistent disks, snapshots, and virtual + machine instances. +# Used as a resource reference +exclude: true +readonly: true +docs: +base_url: '/projects/{{project}}/global/licenses' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +collection_url_key: 'items' +custom_code: +parameters: +properties: + - name: 'name' + type: String + description: | + Name of the resource. The name is 1-63 characters long + and complies with RFC1035. + output: true + - name: 'chargesUseFee' + type: Boolean + description: | + If true, the customer will be charged license fee for + running software that contains this license on an instance. + output: true diff --git a/mmv1/products/compute/go_MachineImage.yaml b/mmv1/products/compute/go_MachineImage.yaml new file mode 100644 index 000000000000..80ab7f070583 --- /dev/null +++ b/mmv1/products/compute/go_MachineImage.yaml @@ -0,0 +1,140 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'MachineImage' +kind: 'compute#machineImage' +description: | + Represents a Machine Image resource. Machine images store all the configuration, + metadata, permissions, and data from one or more disks required to create a + Virtual machine (VM) instance. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/machine-images' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/machineImages' +docs: +base_url: 'projects/{{project}}/global/machineImages' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + allowed_iam_role: 'roles/compute.admin' + parent_resource_attribute: 'machine_image' + iam_conditions_request_type: 'QUERY_PARAM' +custom_code: +examples: + - name: 'machine_image_basic' + primary_resource_id: 'image' + primary_resource_name: 'fmt.Sprintf("tf-test-my-image%s", context["random_suffix"])' + vars: + vm_name: 'my-vm' + image_name: 'my-image' + - name: 'compute_machine_image_kms' + primary_resource_id: 'image' + primary_resource_name: 'fmt.Sprintf("tf-test-my-image%s", context["random_suffix"])' + vars: + vm_name: 'my-vm' + image_name: 'my-image' + key_name: 'key' + keyring_name: 'keyring' + test_vars_overrides: + 'policyChanged': 'acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter")' +parameters: +properties: + - name: 'name' + type: String + description: 'Name of the resource.' + min_version: 'beta' + required: true + - name: 'description' + type: String + description: 'A text description of the resource.' + min_version: 'beta' + - name: 'sourceInstance' + type: ResourceRef + description: + 'The source instance used to create the machine image. You can provide + this as a partial or full URL to the resource.' + min_version: 'beta' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Instance' + imports: 'selfLink' + - name: 'storageLocations' + type: Array + description: | + The regional or multi-regional Cloud Storage bucket location where the machine image is stored. + min_version: 'beta' + output: true + item_type: + type: String + - name: 'guestFlush' + type: Boolean + description: | + Specify this to create an application consistent machine image by informing the OS to prepare for the snapshot process. + Currently only supported on Windows instances using the Volume Shadow Copy Service (VSS). + min_version: 'beta' + - name: 'machineImageEncryptionKey' + type: NestedObject + description: | + Encrypts the machine image using a customer-supplied encryption key. + + After you encrypt a machine image with a customer-supplied key, you must + provide the same key if you use the machine image later (e.g. to create a + instance from the image) + min_version: 'beta' + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + min_version: 'beta' + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the + customer-supplied encryption key that protects this resource. + min_version: 'beta' + output: true + - name: 'kmsKeyName' + type: String + description: | + The name of the encryption key that is stored in Google Cloud KMS. + min_version: 'beta' + diff_suppress_func: 'tpgresource.CompareCryptoKeyVersions' + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account used for the encryption request for the given KMS key. + If absent, the Compute Engine Service Agent service account is used. + min_version: 'beta' diff --git a/mmv1/products/compute/go_MachineType.yaml b/mmv1/products/compute/go_MachineType.yaml new file mode 100644 index 000000000000..7df93a83dfcf --- /dev/null +++ b/mmv1/products/compute/go_MachineType.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'MachineType' +kind: 'compute#machineType' +description: | + Represents a MachineType resource. Machine types determine the virtualized + hardware specifications of your virtual machine instances, such as the + amount of memory or number of virtual CPUs. +exclude: true +readonly: true +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/machineTypes' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +collection_url_key: 'items' +custom_code: +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'deprecated' + type: NestedObject + description: 'The deprecation status associated with this machine type.' + output: true + properties: + - name: 'deleted' + type: Time + description: | + An optional RFC3339 timestamp on or after which the state of this + resource is intended to change to DELETED. This is only + informational and the status will not change unless the client + explicitly changes it. + output: true + - name: 'deprecated' + type: Time + description: | + An optional RFC3339 timestamp on or after which the state of this + resource is intended to change to DEPRECATED. This is only + informational and the status will not change unless the client + explicitly changes it. + output: true + - name: 'obsolete' + type: Time + description: | + An optional RFC3339 timestamp on or after which the state of this + resource is intended to change to OBSOLETE. This is only + informational and the status will not change unless the client + explicitly changes it. + output: true + - name: 'replacement' + type: String + description: | + The URL of the suggested replacement for a deprecated resource. + The suggested replacement resource must be the same kind of + resource as the deprecated resource. + output: true + - name: 'state' + type: Enum + description: | + The deprecation state of this resource. This can be DEPRECATED, + OBSOLETE, or DELETED. Operations which create a new resource + using a DEPRECATED resource will return successfully, but with a + warning indicating the deprecated resource and recommending its + replacement. Operations which use OBSOLETE or DELETED resources + will be rejected and result in an error. + output: true + enum_values: + - 'DEPRECATED' + - 'OBSOLETE' + - 'DELETED' + - name: 'description' + type: String + description: 'An optional textual description of the resource.' + output: true + - name: 'guestCpus' + type: Integer + description: | + The number of virtual CPUs that are available to the instance. + output: true + - name: 'id' + type: Integer + description: 'The unique identifier for the resource.' + output: true + - name: 'isSharedCpu' + type: Boolean + description: | + Whether this machine type has a shared CPU. See Shared-core machine + types for more information. + output: true + - name: 'maximumPersistentDisks' + type: Integer + description: 'Maximum persistent disks allowed.' + output: true + - name: 'maximumPersistentDisksSizeGb' + type: Integer + description: 'Maximum total persistent disks size (GB) allowed.' + output: true + - name: 'memoryMb' + type: Integer + description: | + The amount of physical memory available to the instance, defined in + MB. + output: true + - name: 'name' + type: String + description: 'Name of the resource.' + - name: 'zone' + type: ResourceRef + description: 'The zone the machine type is defined.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml new file mode 100644 index 000000000000..2dd937bc54e6 --- /dev/null +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -0,0 +1,147 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ManagedSslCertificate' +kind: 'compute#sslCertificate' +description: | + An SslCertificate resource, used for HTTPS load balancing. This resource + represents a certificate for which the certificate secrets are created and + managed by Google. + + For a resource where you provide the key, see the + SSL Certificate resource. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates' +docs: + warning: 'This resource should be used with extreme caution! Provisioning an SSL +certificate is complex. Ensure that you understand the lifecycle of a +certificate before attempting complex tasks like cert rotation automatically. +This resource will "return" as soon as the certificate object is created, +but post-creation the certificate object will go through a "provisioning" +process. The provisioning process can complete only when the domain name +for which the certificate is created points to a target pool which, itself, +points at the certificate. Depending on your DNS provider, this may take +some time, and migrating from self-managed certificates to Google-managed +certificates may entail some downtime while the certificate provisions. + +In conclusion: Be extremely cautious. +' +base_url: 'projects/{{project}}/global/sslCertificates' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'managed_ssl_certificate_basic' + primary_resource_id: 'default' + vars: + cert_name: 'test-cert' + proxy_name: 'test-proxy' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + dns_zone_name: 'dnszone' + forwarding_rule_name: 'forwarding-rule' + http_health_check_name: 'http-health-check' + - name: 'managed_ssl_certificate_recreation' + primary_resource_id: 'cert' + external_providers: ["random", "time"] + skip_vcr: true +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'certificate_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + default_from_api: true + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + + + These are in the same namespace as the managed SSL certificates. + - name: 'managed' + type: NestedObject + description: | + Properties relevant to a managed certificate. These will be used if the + certificate is managed (as indicated by a value of `MANAGED` in `type`). + properties: + - name: 'domains' + type: Array + description: | + Domains for which a managed SSL certificate will be valid. Currently, + there can be up to 100 domains in this list. + required: true + diff_suppress_func: 'tpgresource.AbsoluteDomainSuppress' + item_type: + type: String + max_size: 100 + - name: 'type' + type: Enum + description: | + Enum field whose value is always `MANAGED` - used to signal to the API + which type this is. + default_value: MANAGED + enum_values: + - 'MANAGED' + - name: 'subjectAlternativeNames' + type: Array + description: | + Domains associated with the certificate via Subject Alternative Name. + output: true + item_type: + type: String + - name: 'expireTime' + type: Time + description: | + Expire time of the certificate in RFC3339 text format. + output: true diff --git a/mmv1/products/compute/go_Network.yaml b/mmv1/products/compute/go_Network.yaml new file mode 100644 index 000000000000..28cd90a53fe6 --- /dev/null +++ b/mmv1/products/compute/go_Network.yaml @@ -0,0 +1,181 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Network' +kind: 'compute#network' +description: | + Manages a VPC network or legacy network resource on GCP. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/vpc/docs/vpc' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/networks' +docs: +base_url: 'projects/{{project}}/global/networks' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + encoder: 'templates/terraform/encoders/go/compute_network.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/compute_network.go.tmpl' + decoder: 'templates/terraform/decoders/go/compute_network.go.tmpl' + post_create: 'templates/terraform/post_create/go/compute_network_delete_default_route.tmpl' +examples: + - name: 'network_basic' + primary_resource_id: 'vpc_network' + vars: + network_name: 'vpc-network' + - name: 'network_custom_mtu' + primary_resource_id: 'vpc_network' + vars: + network_name: 'vpc-network' + test_env_vars: + project: 'PROJECT_NAME' + - name: 'network_custom_firewall_enforcement_order' + primary_resource_id: 'vpc_network' + vars: + network_name: 'vpc-network' + test_env_vars: + project: 'PROJECT_NAME' +virtual_fields: + - name: 'delete_default_routes_on_create' + description: | + If set to `true`, default routes (`0.0.0.0/0`) will be deleted + immediately after network creation. Defaults to `false`. + type: Boolean + default_value: false +parameters: +properties: + - name: 'description' + type: String + description: | + An optional description of this resource. The resource must be + recreated to modify this field. + immutable: true + - name: 'gateway_ipv4' + type: String + description: | + The gateway address for default routing out of the network. This value + is selected by GCP. + api_name: gatewayIPv4 + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + validation: + function: 'verify.ValidateGCEName' + - name: 'numericId' + type: String + description: | + The unique identifier for the resource. This identifier is defined by the server. + output: true + - name: 'autoCreateSubnetworks' + type: Boolean + description: | + When set to `true`, the network is created in "auto subnet mode" and + it will create a subnet for each region automatically across the + `10.128.0.0/9` address range. + + When set to `false`, the network is created in "custom subnet mode" so + the user can explicitly connect subnetwork resources. + immutable: true + send_empty_value: true + default_value: true + - name: 'routingConfig' + type: NestedObject + description: | + The network-level routing configuration for this network. Used by Cloud + Router to determine what type of network-wide routing behavior to + enforce. + update_url: 'projects/{{project}}/global/networks/{{name}}' + update_verb: 'PATCH' + flatten_object: true + properties: + - name: 'routingMode' + type: Enum + description: | + The network-wide routing mode to use. If set to `REGIONAL`, this + network's cloud routers will only advertise routes with subnetworks + of this network in the same region as the router. If set to `GLOBAL`, + this network's cloud routers will advertise routes with all + subnetworks of this network, across regions. + required: false + default_from_api: true + update_url: 'projects/{{project}}/global/networks/{{name}}' + update_verb: 'PATCH' + enum_values: + - 'REGIONAL' + - 'GLOBAL' + - name: 'mtu' + type: Integer + description: | + Maximum Transmission Unit in bytes. The default value is 1460 bytes. + The minimum value for this field is 1300 and the maximum value is 8896 bytes (jumbo frames). + Note that packets larger than 1500 bytes (standard Ethernet) can be subject to TCP-MSS clamping or dropped + with an ICMP `Fragmentation-Needed` message if the packets are routed to the Internet or other VPCs + with varying MTUs. + immutable: true + default_from_api: true + - name: 'enableUlaInternalIpv6' + type: Boolean + description: | + Enable ULA internal ipv6 on this network. Enabling this feature will assign + a /48 from google defined ULA prefix fd20::/20. + immutable: true + - name: 'internalIpv6Range' + type: String + description: | + When enabling ula internal ipv6, caller optionally can specify the /48 range + they want from the google defined ULA prefix fd20::/20. The input must be a + valid /48 ULA IPv6 address and must be within the fd20::/20. Operation will + fail if the speficied /48 is already in used by another resource. + If the field is not speficied, then a /48 range will be randomly allocated from fd20::/20 and returned via this field. + immutable: true + default_from_api: true + - name: 'networkFirewallPolicyEnforcementOrder' + type: Enum + description: | + Set the order that Firewall Rules and Firewall Policies are evaluated. + update_url: 'projects/{{project}}/global/networks/{{name}}' + update_verb: 'PATCH' + default_value: AFTER_CLASSIC_FIREWALL + enum_values: + - 'BEFORE_CLASSIC_FIREWALL' + - 'AFTER_CLASSIC_FIREWALL' diff --git a/mmv1/products/compute/go_NetworkAttachment.yaml b/mmv1/products/compute/go_NetworkAttachment.yaml new file mode 100644 index 000000000000..51700c03672c --- /dev/null +++ b/mmv1/products/compute/go_NetworkAttachment.yaml @@ -0,0 +1,206 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkAttachment' +kind: 'compute#networkAttachment' +description: | + A network attachment is a resource that lets a producer Virtual Private Cloud (VPC) network initiate connections to a consumer VPC network through a Private Service Connect interface. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/vpc/docs/about-network-attachments' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/networkAttachments' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/networkAttachments' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'network_attachment_basic' + primary_resource_id: 'default' + vars: + resource_name: 'basic-network-attachment' + network_name: 'basic-network' + subnetwork_name: 'basic-subnetwork' + accepted_producer_project_name: 'prj-accepted' + rejected_producer_project_name: 'prj-rejected' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + - name: 'network_attachment_instance_usage' + primary_resource_id: 'default' + vars: + resource_name: 'basic-network-attachment' + network_name: 'basic-network' + subnetwork_name: 'basic-subnetwork' + instance_name: 'basic-instance' +parameters: + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + min_version: 'beta' + required: true + - name: 'region' + type: ResourceRef + description: | + URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. + min_version: 'beta' + required: true + immutable: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'kind' + type: String + description: 'Type of the resource.' + min_version: 'beta' + output: true + - name: 'id' + type: String + description: 'The unique identifier for the resource type. The server generates this identifier.' + min_version: 'beta' + output: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + min_version: 'beta' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create the resource. + min_version: 'beta' + - name: 'selfLink' + type: String + description: 'Server-defined URL for the resource.' + min_version: 'beta' + output: true + - name: 'selfLinkWithId' + type: String + description: | + Server-defined URL for this resource's resource id. + min_version: 'beta' + output: true + - name: 'connectionPreference' + type: Enum + description: | + The connection preference of service attachment. The value can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is one that always accepts the connection from consumer forwarding rules. + min_version: 'beta' + required: true + enum_values: + - 'ACCEPT_AUTOMATIC' + - 'ACCEPT_MANUAL' + - 'INVALID' + - name: 'connectionEndpoints' + type: Array + description: | + An array of connections for all the producers connected to this network attachment. + min_version: 'beta' + output: true + item_type: + type: NestedObject + properties: + - name: 'status' + type: String + description: | + The status of a connected endpoint to this network attachment. + min_version: 'beta' + output: true + - name: 'projectIdOrNum' + type: String + description: | + The project id or number of the interface to which the IP was assigned. + min_version: 'beta' + output: true + - name: 'subnetwork' + type: String + description: | + The subnetwork used to assign the IP to the producer instance network interface. + min_version: 'beta' + output: true + - name: 'ipAddress' + type: String + description: | + The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. + min_version: 'beta' + output: true + - name: 'secondaryIpCidrRanges' + type: String + description: | + Alias IP ranges from the same subnetwork. + min_version: 'beta' + output: true + - name: 'subnetworks' + type: Array + description: | + An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment. + min_version: 'beta' + required: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'subnet' + type: ResourceRef + description: | + A subnet that is provided to set this network attachment. + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'producerRejectLists' + type: Array + description: | + Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number. + min_version: 'beta' + item_type: + type: String + - name: 'producerAcceptLists' + type: Array + description: | + Projects that are allowed to connect to this network attachment. The project can be specified using its id or number. + min_version: 'beta' + item_type: + type: String + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch. + min_version: 'beta' + output: true + - name: 'network' + type: String + description: | + The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. + Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks. + min_version: 'beta' + output: true diff --git a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml new file mode 100644 index 000000000000..26634af91311 --- /dev/null +++ b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml @@ -0,0 +1,120 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkEdgeSecurityService' +kind: 'compute#networkEdgeSecurityService' +description: | + Google Cloud Armor network edge security service resource. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/armor/docs/advanced-network-ddos' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/networkEdgeSecurityServices' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices' +self_link: 'projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices/{{name}}' +create_url: 'projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices?networkEdgeSecurityService={{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/regions/{{region}}/networkEdgeSecurityServices/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +skip_sweeper: true +examples: + - name: 'compute_network_edge_security_service_basic' + primary_resource_id: 'default' + vars: + resource_name: 'my-edge-security-service' + test_env_vars: + project_id: 'PROJECT_NAME' +parameters: + - name: 'region' + type: ResourceRef + description: | + The region of the gateway security policy. + min_version: 'beta' + url_param_only: true + required: false + immutable: true + resource: 'Region' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is created. + min_version: 'beta' + required: true + immutable: true + - name: 'description' + type: String + description: | + Free-text description of the resource. + min_version: 'beta' + - name: 'serviceId' + type: String + description: | + The unique identifier for the resource. This identifier is defined by the server. + api_name: id + min_version: 'beta' + output: true + - name: 'creationTimestamp' + type: String + description: | + Creation timestamp in RFC3339 text format. + min_version: 'beta' + output: true + - name: 'selfLink' + type: String + description: | + Server-defined URL for the resource. + min_version: 'beta' + output: true + - name: 'selfLinkWithServiceId' + type: String + description: | + Server-defined URL for this resource with the resource id. + api_name: selfLinkWithId + min_version: 'beta' + output: true + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a NetworkEdgeSecurityService. + An up-to-date fingerprint must be provided in order to update the NetworkEdgeSecurityService, otherwise the request will fail with error 412 conditionNotMet. + min_version: 'beta' + output: true + - name: 'securityPolicy' + type: String + description: | + The resource URL for the network edge security service associated with this network edge security service. + min_version: 'beta' diff --git a/mmv1/products/compute/go_NetworkEndpoint.yaml b/mmv1/products/compute/go_NetworkEndpoint.yaml new file mode 100644 index 000000000000..4ed2e1b8ed11 --- /dev/null +++ b/mmv1/products/compute/go_NetworkEndpoint.yaml @@ -0,0 +1,127 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkEndpoint' +kind: 'compute#networkEndpoint' +description: | + A Network endpoint represents a IP address and port combination that is + part of a specific network endpoint group (NEG). NEGs are zonal + collections of these endpoints for GCP resources within a + single subnet. **NOTE**: Network endpoints cannot be created outside of a + network endpoint group. + + -> **NOTE** In case the Endpoint's Instance is recreated, it's needed to + perform `apply` twice. To avoid situations like this, please use this resource + with the lifecycle `replace_triggered_by` method, with the passed Instance's ID. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' +docs: +id_format: '{{project}}/{{zone}}/{{network_endpoint_group}}/{{instance}}/{{ip_address}}/{{port}}' +base_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}' +self_link: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints' +create_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints' +read_verb: 'POST' +delete_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints' +delete_verb: 'POST' +immutable: true +mutex: networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - instance + - ipAddress + - port +nested_query: + keys: + - items + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_network_endpoint.go.tmpl' + decoder: 'templates/terraform/decoders/go/unwrap_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_network_endpoint.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/compute_network_endpoint.go.tmpl' +exclude_tgc: true +examples: + - name: 'network_endpoint' + primary_resource_id: 'default-endpoint' + vars: + neg_name: 'my-lb-neg' + instance_name: 'endpoint-instance' + network_name: 'neg-network' + subnetwork_name: 'neg-subnetwork' + skip_test: true +parameters: + - name: 'zone' + type: ResourceRef + description: | + Zone where the containing network endpoint group is located. + url_param_only: true + required: false + ignore_read: true + default_from_api: true + resource: 'Zone' + imports: 'name' + - name: 'networkEndpointGroup' + type: ResourceRef + description: | + The network endpoint group this endpoint is part of. + url_param_only: true + required: true + ignore_read: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + resource: 'NetworkEndpointGroup' + imports: 'name' +properties: + - name: 'instance' + type: ResourceRef + description: | + The name for a specific VM instance that the IP address belongs to. + This is required for network endpoints of type GCE_VM_IP_PORT. + The instance must be in the same zone of network endpoint group. + custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' + resource: 'Instance' + imports: 'name' + - name: 'port' + type: Integer + description: | + Port number of network endpoint. + **Note** `port` is required unless the Network Endpoint Group is created + with the type of `GCE_VM_IP` + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_int.go.tmpl' + - name: 'ipAddress' + type: String + description: | + IPv4 address of network endpoint. The IP address must belong + to a VM in GCE (either the primary IP or as part of an aliased IP + range). + required: true diff --git a/mmv1/products/compute/go_NetworkEndpointGroup.yaml b/mmv1/products/compute/go_NetworkEndpointGroup.yaml new file mode 100644 index 000000000000..433530c951d0 --- /dev/null +++ b/mmv1/products/compute/go_NetworkEndpointGroup.yaml @@ -0,0 +1,148 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkEndpointGroup' +kind: 'compute#networkEndpointGroup' +description: | + Network endpoint groups (NEGs) are zonal resources that represent + collections of IP address and port combinations for GCP resources within a + single subnet. Each IP address and port combination is called a network + endpoint. + + Network endpoint groups can be used as backends in backend services for + HTTP(S), TCP proxy, and SSL proxy load balancers. You cannot use NEGs as a + backend with internal load balancers. Because NEG backends allow you to + specify IP addresses and ports, you can distribute traffic in a granular + fashion among applications or containers running within VM instances. + + Recreating a network endpoint group that's in use by another resource will give a + `resourceInUseByAnotherResource` error. Use `lifecycle.create_before_destroy` + to avoid this type of error. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'network_endpoint_group' + primary_resource_id: 'neg' + vars: + neg_name: 'my-lb-neg' + network_name: 'neg-network' + subnetwork_name: 'neg-subnetwork' + - name: 'network_endpoint_group_non_gcp' + primary_resource_id: 'neg' + vars: + neg_name: 'my-lb-neg' + network_name: 'neg-network' +parameters: + - name: 'zone' + type: ResourceRef + description: | + Zone where the network endpoint group is located. + required: false + ignore_read: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'networkEndpointType' + type: Enum + description: | + Type of network endpoints in this network endpoint group. + NON_GCP_PRIVATE_IP_PORT is used for hybrid connectivity network + endpoint groups (see https://cloud.google.com/load-balancing/docs/hybrid). + Note that NON_GCP_PRIVATE_IP_PORT can only be used with Backend Services + that 1) have the following load balancing schemes: EXTERNAL, EXTERNAL_MANAGED, + INTERNAL_MANAGED, and INTERNAL_SELF_MANAGED and 2) support the RATE or + CONNECTION balancing modes. + + Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_IP_PORT, INTERNET_FQDN_PORT, SERVERLESS, and PRIVATE_SERVICE_CONNECT. + default_value: GCE_VM_IP_PORT + enum_values: + - 'GCE_VM_IP' + - 'GCE_VM_IP_PORT' + - 'NON_GCP_PRIVATE_IP_PORT' + - 'INTERNET_IP_PORT' + - 'INTERNET_FQDN_PORT' + - 'SERVERLESS' + - 'PRIVATE_SERVICE_CONNECT' + - name: 'size' + type: Integer + description: Number of network endpoints in the network endpoint group. + output: true + - name: 'network' + type: ResourceRef + description: | + The network to which all network endpoints in the NEG belong. + Uses "default" project network if unspecified. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'subnetwork' + type: ResourceRef + description: | + Optional subnetwork to which all network endpoints in the NEG belong. + diff_suppress_func: 'tpgresource.CompareOptionalSubnet' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'defaultPort' + type: Integer + description: | + The default port used if the port number is not specified in the + network endpoint. diff --git a/mmv1/products/compute/go_NetworkEndpoints.yaml b/mmv1/products/compute/go_NetworkEndpoints.yaml new file mode 100644 index 000000000000..d8248ee4c88c --- /dev/null +++ b/mmv1/products/compute/go_NetworkEndpoints.yaml @@ -0,0 +1,138 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkEndpoints' +kind: 'compute#networkEndpoints' +description: | + A set of network endpoints belonging to a network endpoint group (NEG). A + single network endpoint represents a IP address and port combination that is + part of a specific network endpoint group (NEG). NEGs are zonal collections + of these endpoints for GCP resources within a single subnet. **NOTE**: + Network endpoints cannot be created outside of a network endpoint group. + + This resource is authoritative for a single NEG. Any endpoints not specified + by this resource will be deleted when the resource configuration is applied. + + -> **NOTE** In case the Endpoint's Instance is recreated, it's needed to + perform `apply` twice. To avoid situations like this, please use this resource + with the lifecycle `replace_triggered_by` method, with the passed Instance's ID. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/networkEndpointGroups' +docs: +id_format: '{{project}}/{{zone}}/{{network_endpoint_group}}/endpoints' +base_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}' +self_link: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/listNetworkEndpoints' +create_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints' +update_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/attachNetworkEndpoints' +update_verb: 'POST' +read_verb: 'POST' +delete_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints' +delete_verb: 'POST' +mutex: networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}} +import_format: + - 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - networkEndpointGroup +custom_code: + constants: 'templates/terraform/constants/go/network_endpoints.go.tmpl' + encoder: 'templates/terraform/encoders/go/compute_network_endpoints.go.tmpl' + decoder: 'templates/terraform/decoders/go/network_endpoints.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/network_endpoints.go.tmpl' + pre_update: 'templates/terraform/pre_update/go/network_endpoints.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_network_endpoints.go.tmpl' +exclude_tgc: true +examples: + - name: 'network_endpoints' + primary_resource_id: 'default-endpoints' + vars: + neg_name: 'my-lb-neg' + instance_name: 'endpoint-instance' + network_name: 'neg-network' + subnetwork_name: 'neg-subnetwork' + skip_test: true +parameters: + - name: 'zone' + type: ResourceRef + description: | + Zone where the containing network endpoint group is located. + url_param_only: true + required: false + ignore_read: true + default_from_api: true + resource: 'Zone' + imports: 'name' + - name: 'networkEndpointGroup' + type: ResourceRef + description: | + The network endpoint group these endpoints are part of. + url_param_only: true + required: true + ignore_read: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + resource: 'NetworkEndpointGroup' + imports: 'name' +properties: + - name: 'networkEndpoints' + type: Array + description: | + The network endpoints to be added to the enclosing network endpoint group + (NEG). Each endpoint specifies an IP address and port, along with + additional information depending on the NEG type. + is_set: true + item_type: + type: NestedObject + properties: + - name: 'instance' + type: ResourceRef + description: | + The name for a specific VM instance that the IP address belongs to. + This is required for network endpoints of type GCE_VM_IP_PORT. + The instance must be in the same zone as the network endpoint group. + custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' + resource: 'Instance' + imports: 'name' + - name: 'port' + type: Integer + description: | + Port number of network endpoint. + **Note** `port` is required unless the Network Endpoint Group is created + with the type of `GCE_VM_IP` + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_int.go.tmpl' + - name: 'ipAddress' + type: String + description: | + IPv4 address of network endpoint. The IP address must belong + to a VM in GCE (either the primary IP or as part of an aliased IP + range). + required: true diff --git a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml new file mode 100644 index 000000000000..6cb137323ffd --- /dev/null +++ b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml @@ -0,0 +1,81 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkFirewallPolicy' +description: "The Compute NetworkFirewallPolicy resource" +docs: +base_url: 'projects/{{project}}/global/firewallPolicies' +self_link: 'projects/{{project}}/global/firewallPolicies/{{name}}' +create_url: 'projects/{{project}}/global/firewallPolicies' +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +legacy_long_form_project: true +examples: + - name: 'network_firewall_policy_full' + primary_resource_id: 'policy' + vars: + policy_name: 'tf-test-policy' +parameters: +properties: + - name: 'creationTimestamp' + type: String + description: Creation timestamp in RFC3339 text format. + output: true + - name: 'name' + type: String + description: User-provided name of the Network firewall policy. The name should be unique in the project in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + required: true + immutable: true + - name: 'networkFirewallPolicyId' + type: String + description: The unique identifier for the resource. This identifier is defined by the server. + api_name: id + output: true + - name: 'description' + type: String + description: An optional description of this resource. Provide this property when you create the resource. + - name: 'fingerprint' + type: Fingerprint + description: Fingerprint of the resource. This field is used internally during updates of this resource. + output: true + - name: 'selfLink' + type: String + description: Server-defined URL for the resource. + output: true + - name: 'selfLinkWithId' + type: String + description: Server-defined URL for this resource with the resource id. + output: true + - name: 'ruleTupleCount' + type: Integer + description: Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. + output: true diff --git a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml new file mode 100644 index 000000000000..b1e00931aeb0 --- /dev/null +++ b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml @@ -0,0 +1,109 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NetworkPeeringRoutesConfig' +description: | + Manage a network peering's route settings without managing the peering as + a whole. This resource is primarily intended for use with GCP-generated + peerings that shouldn't otherwise be managed by other tools. Deleting this + resource is a no-op and the peering will not be modified. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/vpc/docs/vpc-peering' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/networks/updatePeering' +docs: +id_format: 'projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}' +base_url: 'projects/{{project}}/global/networks/{{network}}' +self_link: 'projects/{{project}}/global/networks/{{network}}' +create_url: 'projects/{{project}}/global/networks/{{network}}/updatePeering' +create_verb: 'PATCH' +update_url: 'projects/{{project}}/global/networks/{{network}}/updatePeering' +update_verb: 'PATCH' +skip_delete: true +mutex: projects/{{project}}/global/networks/{{network}}/peerings +import_format: + - 'projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - peering +nested_query: + keys: + - peerings + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/network_peering_routes_config.go.tmpl' +examples: + - name: 'network_peering_routes_config_basic' + primary_resource_id: 'peering_primary_routes' + vars: + peering_primary_name: 'primary-peering' + peering_secondary_name: 'secondary-peering' + network_primary_name: 'primary-network' + network_secondary_name: 'secondary-network' + - name: 'network_peering_routes_config_gke' + primary_resource_id: 'peering_gke_routes' + vars: + network_name: 'container-network' + subnetwork_name: 'container-subnetwork' + gke_cluster_name: 'private-cluster' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' +parameters: + - name: 'network' + type: ResourceRef + description: | + The name of the primary network for the peering. + url_param_only: true + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + resource: 'Network' + imports: 'name' +properties: + - name: 'peering' + type: String + description: | + Name of the peering. + api_name: name + required: true + - name: 'exportCustomRoutes' + type: Boolean + description: | + Whether to export the custom routes to the peer network. + required: true + send_empty_value: true + - name: 'importCustomRoutes' + type: Boolean + description: | + Whether to import the custom routes to the peer network. + required: true + send_empty_value: true diff --git a/mmv1/products/compute/go_NodeGroup.yaml b/mmv1/products/compute/go_NodeGroup.yaml new file mode 100644 index 000000000000..3e46390a3e91 --- /dev/null +++ b/mmv1/products/compute/go_NodeGroup.yaml @@ -0,0 +1,213 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NodeGroup' +kind: 'compute#NodeGroup' +description: | + Represents a NodeGroup resource to manage a group of sole-tenant nodes. +references: + guides: + 'Sole-Tenant Nodes': 'https://cloud.google.com/compute/docs/nodes/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/nodeGroups' +has_self_link: true +create_url: 'projects/{{project}}/zones/{{zone}}/nodeGroups?initialNodeCount=PRE_CREATE_REPLACE_ME' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + pre_create: 'templates/terraform/pre_create/go/compute_node_group_url_replace.go.tmpl' +examples: + - name: 'node_group_basic' + primary_resource_id: 'nodes' + vars: + group_name: 'soletenant-group' + template_name: 'soletenant-tmpl' + - name: 'node_group_maintenance_interval' + primary_resource_id: 'nodes' + min_version: 'beta' + vars: + group_name: 'soletenant-group' + template_name: 'soletenant-tmpl' + - name: 'node_group_autoscaling_policy' + primary_resource_id: 'nodes' + vars: + group_name: 'soletenant-group' + template_name: 'soletenant-tmpl' + - name: 'node_group_share_settings' + primary_resource_id: 'nodes' + vars: + group_name: 'soletenant-group' + template_name: 'soletenant-tmpl' + guest_project_id: 'project-id' + guest_project_name: 'project-name' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'zone' + type: ResourceRef + description: | + Zone where this node group is located + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'selfLink' +properties: + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'description' + type: String + description: | + An optional textual description of the resource. + - name: 'name' + type: String + description: | + Name of the resource. + - name: 'nodeTemplate' + type: ResourceRef + description: | + The URL of the node template to which this node group belongs. + required: true + update_url: 'projects/{{project}}/zones/{{zone}}/nodeGroups/{{name}}/setNodeTemplate' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'NodeTemplate' + imports: 'selfLink' + - name: 'size' + type: Integer + description: | + The total number of nodes in the node group. + output: true + - name: 'initialSize' + type: Integer + description: | + The initial number of nodes in the node group. One of `initial_size` or `autoscaling_policy` must be configured on resource creation. + url_param_only: true + - name: 'maintenancePolicy' + type: String + description: | + Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT. + default_value: DEFAULT + - name: 'maintenanceWindow' + type: NestedObject + description: | + contains properties for the timeframe of maintenance + properties: + - name: 'startTime' + type: String + description: | + instances.start time of the window. This must be in UTC format that resolves to one of 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, both 13:00-5 and 08:00 are valid. + required: true + - name: 'autoscalingPolicy' + type: NestedObject + description: | + If you use sole-tenant nodes for your workloads, you can use the node + group autoscaler to automatically manage the sizes of your node groups. + + One of `initial_size` or `autoscaling_policy` must be configured on resource creation. + default_from_api: true + properties: + - name: 'mode' + type: Enum + description: | + The autoscaling mode. Set to one of the following: + - OFF: Disables the autoscaler. + - ON: Enables scaling in and scaling out. + - ONLY_SCALE_OUT: Enables only scaling out. + You must use this mode if your node groups are configured to + restart their hosted VMs on minimal servers. + required: true + default_from_api: true + enum_values: + - 'OFF' + - 'ON' + - 'ONLY_SCALE_OUT' + - name: 'minNodes' + type: Integer + description: | + Minimum size of the node group. Must be less + than or equal to max-nodes. The default value is 0. + default_from_api: true + - name: 'maxNodes' + type: Integer + description: | + Maximum size of the node group. Set to a value less than or equal + to 100 and greater than or equal to min-nodes. + required: true + default_from_api: true + - name: 'shareSettings' + type: NestedObject + description: | + Share settings for the node group. + default_from_api: true + properties: + - name: 'shareType' + type: Enum + description: | + Node group sharing type. + required: true + enum_values: + - 'ORGANIZATION' + - 'SPECIFIC_PROJECTS' + - 'LOCAL' + - name: 'projectMap' + type: Map + description: | + A map of project id and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. + key_name: 'id' + key_description: | + The project ID. + value_type: + type: NestedObject + properties: + - name: 'projectId' + type: String + description: | + The project id/number should be the same as the key of this project config in the project map. + required: true + - name: 'maintenanceInterval' + type: Enum + description: | + Specifies the frequency of planned maintenance events. Set to one of the following: + - AS_NEEDED: Hosts are eligible to receive infrastructure and hypervisor updates as they become available. + - RECURRENT: Hosts receive planned infrastructure and hypervisor updates on a periodic basis, but not more frequently than every 28 days. This minimizes the number of planned maintenance operations on individual hosts and reduces the frequency of disruptions, both live migrations and terminations, on individual VMs. + min_version: 'beta' + default_from_api: true + enum_values: + - 'AS_NEEDED' + - 'RECURRENT' diff --git a/mmv1/products/compute/go_NodeTemplate.yaml b/mmv1/products/compute/go_NodeTemplate.yaml new file mode 100644 index 000000000000..d14bff5502ae --- /dev/null +++ b/mmv1/products/compute/go_NodeTemplate.yaml @@ -0,0 +1,155 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NodeTemplate' +kind: 'compute#nodeTemplate' +description: | + Represents a NodeTemplate resource. Node templates specify properties + for creating sole-tenant nodes, such as node type, vCPU and memory + requirements, node affinity labels, and region. +references: + guides: + 'Sole-Tenant Nodes': 'https://cloud.google.com/compute/docs/nodes/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/nodeTemplates' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/nodeTemplates' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'node_template_basic' + primary_resource_id: 'template' + vars: + template_name: 'soletenant-tmpl' + - name: 'node_template_server_binding' + primary_resource_id: 'template' + vars: + template_name: 'soletenant-with-licenses' +parameters: + - name: 'region' + type: ResourceRef + description: | + Region where nodes using the node template will be created. + If it is not provided, the provider region is used. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'selfLink' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional textual description of the resource.' + - name: 'name' + type: String + description: 'Name of the resource.' + - name: 'nodeAffinityLabels' + type: KeyValuePairs + description: | + Labels to use for node affinity, which will be used in + instance scheduling. + - name: 'nodeType' + type: String + description: | + Node type to use for nodes group that are created from this template. + Only one of nodeTypeFlexibility and nodeType can be specified. + conflicts: + - node_type_flexibility + - name: 'nodeTypeFlexibility' + type: NestedObject + description: | + Flexible properties for the desired node type. Node groups that + use this node template will create nodes of a type that matches + these properties. Only one of nodeTypeFlexibility and nodeType can + be specified. + conflicts: + - node_type + properties: + - name: 'cpus' + type: String + description: | + Number of virtual CPUs to use. + at_least_one_of: + - 'node_type_flexibility.0.cpus' + - 'node_type_flexibility.0.memory' + - name: 'memory' + type: String + description: | + Physical memory available to the node, defined in MB. + at_least_one_of: + - 'node_type_flexibility.0.cpus' + - 'node_type_flexibility.0.memory' + - name: 'localSsd' + type: String + description: | + Use local SSD + output: true + - name: 'serverBinding' + type: NestedObject + description: | + The server binding policy for nodes using this template. Determines + where the nodes should restart following a maintenance event. + default_from_api: true + properties: + - name: 'type' + type: Enum + description: | + Type of server binding policy. If `RESTART_NODE_ON_ANY_SERVER`, + nodes using this template will restart on any physical server + following a maintenance event. + + If `RESTART_NODE_ON_MINIMAL_SERVER`, nodes using this template + will restart on the same physical server following a maintenance + event, instead of being live migrated to or restarted on a new + physical server. This option may be useful if you are using + software licenses tied to the underlying server characteristics + such as physical sockets or cores, to avoid the need for + additional licenses when maintenance occurs. However, VMs on such + nodes will experience outages while maintenance is applied. + required: true + enum_values: + - 'RESTART_NODE_ON_ANY_SERVER' + - 'RESTART_NODE_ON_MINIMAL_SERVERS' + - name: 'cpuOvercommitType' + type: Enum + description: | + CPU overcommit. + default_value: NONE + enum_values: + - 'ENABLED' + - 'NONE' diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml new file mode 100644 index 000000000000..7596cff9d73a --- /dev/null +++ b/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml @@ -0,0 +1,92 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSecurityPolicy' +description: | + Organization security policies are used to control incoming/outgoing traffic. +min_version: 'beta' +references: + guides: + 'Creating a firewall policy': 'https://cloud.google.com/vpc/docs/using-firewall-policies#create-policy' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies' +docs: +id_format: 'locations/global/securityPolicies/{{policy_id}}' +base_url: 'locations/global/securityPolicies?parentId={{parent}}' +self_link: 'locations/global/securityPolicies/{{policy_id}}' +create_url: 'locations/global/securityPolicies?parentId={{parent}}' +update_verb: 'PATCH' +import_format: + - 'locations/global/securityPolicies/{{policy_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/org_security_policy.go.tmpl' + post_update: 'templates/terraform/post_update/go/org_security_policy.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +examples: + - name: 'organization_security_policy_basic' + primary_resource_id: 'policy' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'parent' + type: String + description: | + The parent of this OrganizationSecurityPolicy in the Cloud Resource Hierarchy. + Format: organizations/{organization_id} or folders/{folder_id} + min_version: 'beta' + required: true + immutable: true +properties: + - name: 'displayName' + type: String + description: | + A textual name of the security policy. + min_version: 'beta' + required: true + immutable: true + - name: 'description' + type: String + description: | + A textual description for the organization security policy. + min_version: 'beta' + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. This field is used internally during + updates of this resource. + min_version: 'beta' + output: true + - name: 'policy_id' + type: String + description: | + The unique identifier for the resource. This identifier is defined by the server. + api_name: id + min_version: 'beta' + output: true + - name: 'type' + type: Enum + description: | + The type indicates the intended use of the security policy. + For organization security policies, the only supported type + is "FIREWALL". + min_version: 'beta' + immutable: true + default_value: FIREWALL + enum_values: + - 'FIREWALL' diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml new file mode 100644 index 000000000000..60f6628b6ba4 --- /dev/null +++ b/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml @@ -0,0 +1,75 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSecurityPolicyAssociation' +description: | + An association for the OrganizationSecurityPolicy. +min_version: 'beta' +references: + guides: + 'Associating a policy with the organization or folder': 'https://cloud.google.com/vpc/docs/using-firewall-policies#associate' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addAssociation' +docs: +id_format: '{{policy_id}}/association/{{name}}' +base_url: '{{policy_id}}' +self_link: '{{policy_id}}/getAssociation?name={{name}}' +create_url: '{{policy_id}}/addAssociation' +delete_url: '{{policy_id}}/removeAssociation?name={{name}}' +delete_verb: 'POST' +immutable: true +import_format: + - '{{%policy_id}}/association/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/org_security_policy_association.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +read_error_transform: 'transformSecurityPolicyAssociationReadError' +examples: + - name: 'organization_security_policy_association_basic' + primary_resource_id: 'policy' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'policyId' + type: String + description: | + The security policy ID of the association. + api_name: securityPolicyId + min_version: 'beta' + url_param_only: true + required: true +properties: + - name: 'name' + type: String + description: | + The name for an association. + min_version: 'beta' + required: true + - name: 'attachmentId' + type: String + description: | + The resource that the security policy is attached to. + min_version: 'beta' + required: true + - name: 'displayName' + type: String + description: | + The display name of the security policy of the association. + min_version: 'beta' + output: true diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml new file mode 100644 index 000000000000..39a78b1cd8e6 --- /dev/null +++ b/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml @@ -0,0 +1,201 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSecurityPolicyRule' +description: | + A rule for the OrganizationSecurityPolicy. +min_version: 'beta' +references: + guides: + 'Creating firewall rules': 'https://cloud.google.com/vpc/docs/using-firewall-policies#create-rules' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule' +docs: +id_format: '{{policy_id}}/priority/{{priority}}' +base_url: '{{policy_id}}' +self_link: '{{policy_id}}/getRule?priority={{priority}}' +create_url: '{{policy_id}}/addRule?priority={{priority}}' +update_url: '{{policy_id}}/patchRule?priority={{priority}}' +update_verb: 'POST' +delete_url: '{{policy_id}}/removeRule?priority={{priority}}' +delete_verb: 'POST' +import_format: + - '{{%policy_id}}/priority/{{priority}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + post_create: 'templates/terraform/post_create/go/org_security_policy_rule.go.tmpl' + post_update: 'templates/terraform/post_create/go/org_security_policy_rule.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +examples: + - name: 'organization_security_policy_rule_basic' + primary_resource_id: 'policy' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'policyId' + type: String + description: | + The ID of the OrganizationSecurityPolicy this rule applies to. + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'description' + type: String + description: | + A description of the rule. + min_version: 'beta' + - name: 'priority' + type: Integer + description: | + An integer indicating the priority of a rule in the list. The priority must be a value + between 0 and 2147483647. Rules are evaluated from highest to lowest priority where 0 is the + highest priority and 2147483647 is the lowest prority. + min_version: 'beta' + required: true + immutable: true + - name: 'match' + type: NestedObject + description: + A match condition that incoming traffic is evaluated against. If it + evaluates to true, the corresponding 'action' is enforced. + min_version: 'beta' + required: true + properties: + - name: 'description' + type: String + description: | + A description of the rule. + min_version: 'beta' + - name: 'versionedExpr' + type: Enum + description: | + Preconfigured versioned expression. For organization security policy rules, + the only supported type is "FIREWALL". + min_version: 'beta' + default_value: FIREWALL + enum_values: + - 'FIREWALL' + - name: 'config' + type: NestedObject + description: The configuration options for matching the rule. + min_version: 'beta' + required: true + properties: + - name: 'srcIpRanges' + type: Array + description: | + Source IP address range in CIDR format. Required for + INGRESS rules. + min_version: 'beta' + exactly_one_of: + - 'match.0.config.0.src_ip_ranges' + - 'match.0.config.0.dest_ip_ranges' + item_type: + type: String + - name: 'destIpRanges' + type: Array + description: | + Destination IP address range in CIDR format. Required for + EGRESS rules. + min_version: 'beta' + exactly_one_of: + - 'match.0.config.0.src_ip_ranges' + - 'match.0.config.0.dest_ip_ranges' + item_type: + type: String + - name: 'layer4Config' + type: Array + description: | + Pairs of IP protocols and ports that the rule should match. + api_name: layer4Configs + min_version: 'beta' + required: true + item_type: + type: NestedObject + properties: + - name: 'ipProtocol' + type: String + description: | + The IP protocol to which this rule applies. The protocol + type is required when creating a firewall rule. + This value can either be one of the following well + known protocol strings (tcp, udp, icmp, esp, ah, ipip, sctp), + or the IP protocol number. + min_version: 'beta' + required: true + - name: 'ports' + type: Array + description: | + An optional list of ports to which this rule applies. This field + is only applicable for UDP or TCP protocol. Each entry must be + either an integer or a range. If not specified, this rule + applies to connections through any port. + + Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + min_version: 'beta' + item_type: + type: String + - name: 'action' + type: String + description: | + The Action to perform when the client connection triggers the rule. Can currently be either + "allow", "deny" or "goto_next". + min_version: 'beta' + required: true + - name: 'preview' + type: Boolean + description: | + If set to true, the specified action is not enforced. + min_version: 'beta' + - name: 'direction' + type: Enum + description: | + The direction in which this rule applies. If unspecified an INGRESS rule is created. + min_version: 'beta' + enum_values: + - 'INGRESS' + - 'EGRESS' + - name: 'targetResources' + type: Array + description: | + A list of network resource URLs to which this rule applies. + This field allows you to control which network's VMs get + this rule. If this field is left blank, all VMs + within the organization will receive the rule. + min_version: 'beta' + item_type: + type: String + - name: 'enableLogging' + type: Boolean + description: | + Denotes whether to enable logging for a particular rule. + If logging is enabled, logs will be exported to the + configured export destination in Stackdriver. + min_version: 'beta' + send_empty_value: true + - name: 'targetServiceAccounts' + type: Array + description: | + A list of service accounts indicating the sets of + instances that are applied with this rule. + min_version: 'beta' + item_type: + type: String diff --git a/mmv1/products/compute/go_PacketMirroring.yaml b/mmv1/products/compute/go_PacketMirroring.yaml new file mode 100644 index 000000000000..04554d993989 --- /dev/null +++ b/mmv1/products/compute/go_PacketMirroring.yaml @@ -0,0 +1,202 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PacketMirroring' +description: | + Packet Mirroring mirrors traffic to and from particular VM instances. + You can use the collected traffic to help you detect security threats + and monitor application performance. +references: + guides: + 'Using Packet Mirroring': 'https://cloud.google.com/vpc/docs/using-packet-mirroring#creating' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/packetMirrorings' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/packetMirrorings' +self_link: 'projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}' +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'compute_packet_mirroring_full' + primary_resource_id: 'foobar' + vars: + instance_name: 'my-instance' + subnetwork_name: 'my-subnetwork' + service_name: 'my-service' + hc_name: 'my-healthcheck' + mirroring_name: 'my-mirroring' + ilb_rule_name: 'my-ilb' + network_name: 'my-network' +parameters: +properties: + - name: 'name' + type: String + description: The name of the packet mirroring rule + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: A human-readable description of the rule. + immutable: true + - name: 'region' + type: String + description: | + The Region in which the created address should reside. + If it is not provided, the provider region is used. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'network' + type: NestedObject + description: | + Specifies the mirrored VPC network. Only packets in this network + will be mirrored. All mirrored VMs should have a NIC in the given + network. All mirrored subnetworks should belong to the given network. + required: true + immutable: true + properties: + - name: 'url' + type: ResourceRef + description: + The full self_link URL of the network where this rule is active. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'priority' + type: Integer + description: | + Since only one rule can be active at a time, priority is + used to break ties in the case of two rules that apply to + the same instances. + required: false + default_from_api: true + - name: 'collectorIlb' + type: NestedObject + description: | + The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL) + that will be used as collector for mirrored traffic. The + specified forwarding rule must have is_mirroring_collector + set to true. + required: true + properties: + - name: 'url' + type: ResourceRef + description: The URL of the forwarding rule. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'ForwardingRule' + imports: 'selfLink' + - name: 'filter' + type: NestedObject + description: | + A filter for mirrored traffic. If unset, all traffic is mirrored. + properties: + - name: 'ipProtocols' + type: Array + description: Possible IP protocols including tcp, udp, icmp and esp + api_name: IPProtocols + item_type: + type: String + - name: 'cidrRanges' + type: Array + description: | + IP CIDR ranges that apply as a filter on the source (ingress) or + destination (egress) IP in the IP header. Only IPv4 is supported. + item_type: + type: String + - name: 'direction' + type: Enum + description: Direction of traffic to mirror. + default_value: BOTH + enum_values: + - 'INGRESS' + - 'EGRESS' + - 'BOTH' + - name: 'mirroredResources' + type: NestedObject + description: | + A means of specifying which resources to mirror. + required: true + properties: + - name: 'subnetworks' + type: Array + description: | + All instances in one of these subnetworks will be mirrored. + at_least_one_of: + - 'mirrored_resources.0.subnetworks' + - 'mirrored_resources.0.instances' + - 'mirrored_resources.0.tags' + item_type: + description: + The subnetworks that should be mirrored. Specify at most 5. + type: NestedObject + properties: + - name: 'url' + type: ResourceRef + description: + The URL of the subnetwork where this rule should be active. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'instances' + type: Array + description: | + All the listed instances will be mirrored. Specify at most 50. + at_least_one_of: + - 'mirrored_resources.0.subnetworks' + - 'mirrored_resources.0.instances' + - 'mirrored_resources.0.tags' + item_type: + description: The instances that should be mirrored. + type: NestedObject + properties: + - name: 'url' + type: ResourceRef + description: + The URL of the instances where this rule should be active. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Instance' + imports: 'selfLink' + - name: 'tags' + type: Array + description: | + All instances with these tags will be mirrored. + at_least_one_of: + - 'mirrored_resources.0.subnetworks' + - 'mirrored_resources.0.instances' + - 'mirrored_resources.0.tags' + item_type: + type: String diff --git a/mmv1/products/compute/go_PerInstanceConfig.yaml b/mmv1/products/compute/go_PerInstanceConfig.yaml new file mode 100644 index 000000000000..4db8981e8176 --- /dev/null +++ b/mmv1/products/compute/go_PerInstanceConfig.yaml @@ -0,0 +1,242 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PerInstanceConfig' +description: | + A config defined for a single managed instance that belongs to an instance group manager. It preserves the instance name + across instance group manager operations and can define stateful disks or metadata that are unique to the instance. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/instance-groups/stateful-migs#per-instance_configs' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers' +docs: +id_format: '{{project}}/{{zone}}/{{instance_group_manager}}/{{name}}' +base_url: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}' +self_link: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/listPerInstanceConfigs' +create_url: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/createInstances' +update_url: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/updatePerInstanceConfigs' +update_verb: 'POST' +read_verb: 'POST' +delete_url: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs' +delete_verb: 'POST' +mutex: instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - name +nested_query: + keys: + - items + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_per_instance_config.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/compute_per_instance_config.go.tmpl' + post_update: 'templates/terraform/post_update/go/compute_per_instance_config.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/per_instance_config.go.tmpl' +exclude_tgc: true +examples: + - name: 'stateful_igm' + primary_resource_id: 'stateful-instance' + vars: + template_name: 'my-template' + igm_name: 'my-igm' + disk_name: 'my-disk-name' + skip_test: true +virtual_fields: + - name: 'minimal_action' + description: | + The minimal action to perform on the instance during an update. + Default is `NONE`. Possible values are: + * REPLACE + * RESTART + * REFRESH + * NONE + type: Enum + default_value: "NONE" + - name: 'most_disruptive_allowed_action' + description: | + The most disruptive action to perform on the instance during an update. + Default is `REPLACE`. Possible values are: + * REPLACE + * RESTART + * REFRESH + * NONE + type: Enum + default_value: "REPLACE" + - name: 'remove_instance_on_destroy' + description: | + When true, deleting this config will immediately remove the underlying instance. + When false, deleting this config will use the behavior as determined by remove_instance_on_destroy. + type: Boolean + default_value: false + - name: 'remove_instance_state_on_destroy' + description: | + When true, deleting this config will immediately remove any specified state from the underlying instance. + When false, deleting this config will *not* immediately remove any state from the underlying instance. + State will be removed on the next instance recreation or update. + type: Boolean + default_value: false +parameters: + - name: 'zone' + type: ResourceRef + description: | + Zone where the containing instance group manager is located + url_param_only: true + required: false + immutable: true + ignore_read: true + default_from_api: true + resource: 'Zone' + imports: 'name' + - name: 'instanceGroupManager' + type: ResourceRef + description: | + The instance group manager this instance config is part of. + url_param_only: true + required: true + immutable: true + resource: 'InstanceGroupManager' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The name for this per-instance config and its corresponding instance. + required: true + immutable: true + - name: 'preservedState' + type: NestedObject + description: 'The preserved state for this instance.' + properties: + - name: 'metadata' + type: KeyValuePairs + description: | + Preserved metadata defined for this instance. This is a list of key->value pairs. + - name: 'disk' + type: Array + description: | + Stateful disks for the instance. + api_name: disks + is_set: true + custom_flatten: 'templates/terraform/custom_flatten/go/preserved_state_disks.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/preserved_state_disks.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'deviceName' + type: String + description: | + A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. + required: true + - name: 'source' + type: String + description: | + The URI of an existing persistent disk to attach under the specified device-name in the format + `projects/project-id/zones/zone/disks/disk-name`. + required: true + - name: 'mode' + type: Enum + description: | + The mode of the disk. + default_value: READ_WRITE + enum_values: + - 'READ_ONLY' + - 'READ_WRITE' + - name: 'deleteRule' + type: Enum + description: | + A value that prescribes what should happen to the stateful disk when the VM instance is deleted. + The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. + `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. + `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently + deleted from the instance group. + default_value: NEVER + enum_values: + - 'NEVER' + - 'ON_PERMANENT_INSTANCE_DELETION' + - name: 'internalIp' + type: Map + description: | + Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface. + api_name: internalIPs + key_name: 'interface_name' + value_type: + type: NestedObject + properties: + - name: 'autoDelete' + type: Enum + description: | + These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. + default_value: NEVER + enum_values: + - 'NEVER' + - 'ON_PERMANENT_INSTANCE_DELETION' + - name: 'ipAddress' + type: NestedObject + description: | + Ip address representation + properties: + - name: 'address' + type: ResourceRef + description: | + The URL of the reservation for this IP address. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Address' + imports: 'selfLink' + - name: 'externalIp' + type: Map + description: | + Preserved external IPs defined for this instance. This map is keyed with the name of the network interface. + api_name: externalIPs + key_name: 'interface_name' + value_type: + type: NestedObject + properties: + - name: 'autoDelete' + type: Enum + description: | + These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. + default_value: NEVER + enum_values: + - 'NEVER' + - 'ON_PERMANENT_INSTANCE_DELETION' + - name: 'ipAddress' + type: NestedObject + description: | + Ip address representation + properties: + - name: 'address' + type: ResourceRef + description: | + The URL of the reservation for this IP address. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Address' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml new file mode 100644 index 000000000000..049a8a4ffbe2 --- /dev/null +++ b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml @@ -0,0 +1,73 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectCloudArmorTier' +description: | + Sets the Cloud Armor tier of the project. +references: + guides: + 'Subscribing to Cloud Armor Enterprise': 'https://cloud.google.com/armor/docs/managed-protection-overview#subscribing_to_plus' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/projects/setCloudArmorTier' +docs: +id_format: 'projects/{{project}}' +base_url: 'projects/{{project}}' +create_url: 'projects/{{project}}/setCloudArmorTier' +update_url: 'projects/{{project}}/setCloudArmorTier' +update_verb: 'POST' + +read_query_params: '?fields=cloudArmorTier' +import_format: + - 'projects/{{project}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + custom_delete: 'templates/terraform/custom_delete/go/only_remove_from_state.go.tmpl' +examples: + - name: 'compute_project_cloud_armor_tier_basic' + primary_resource_id: 'cloud_armor_tier_config' + skip_test: true + - name: 'compute_project_cloud_armor_tier_project_set' + primary_resource_id: 'cloud_armor_tier_config' + vars: + project_id: 'your_project_id' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_test: true +parameters: +properties: + - name: 'cloudArmorTier' + type: Enum + description: | + Managed protection tier to be set. + required: true + enum_values: + - 'CA_STANDARD' + - 'CA_ENTERPRISE_PAYGO' diff --git a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml new file mode 100644 index 000000000000..309ca0401355 --- /dev/null +++ b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml @@ -0,0 +1,77 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PublicAdvertisedPrefix' +description: | + Represents a PublicAdvertisedPrefix for use with bring your own IP addresses (BYOIP). +references: + guides: + 'Using bring your own IP': 'https://cloud.google.com/vpc/docs/using-bring-your-own-ip' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/publicAdvertisedPrefixes' +docs: +base_url: 'projects/{{project}}/global/publicAdvertisedPrefixes' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'public_advertised_prefixes_basic' + primary_resource_id: 'prefixes' + vars: + prefixes_name: 'my-prefix' + test_env_vars: + desc: 'PAP_DESCRIPTION' + skip_test: true +parameters: +properties: + - name: 'description' + type: String + description: An optional description of this resource. + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + - name: 'dnsVerificationIp' + type: String + description: The IPv4 address to be used for reverse DNS verification. + required: true + - name: 'ipCidrRange' + type: String + description: + The IPv4 address range, in CIDR format, represented by this public + advertised prefix. + required: true diff --git a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml new file mode 100644 index 000000000000..b9f6c6929e4b --- /dev/null +++ b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml @@ -0,0 +1,88 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'PublicDelegatedPrefix' +description: | + Represents a PublicDelegatedPrefix for use with bring your own IP addresses (BYOIP). +references: + guides: + 'Using bring your own IP': 'https://cloud.google.com/vpc/docs/using-bring-your-own-ip' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/publicDelegatedPrefixes' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/publicDelegatedPrefixes' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'public_delegated_prefixes_basic' + primary_resource_id: 'prefixes' + vars: + prefixes_name: 'my-prefix' + test_env_vars: + desc: 'PAP_DESCRIPTION' + skip_test: true +parameters: +properties: + - name: 'region' + type: String + description: 'A region where the prefix will reside.' + url_param_only: true + required: true + - name: 'description' + type: String + description: An optional description of this resource. + - name: 'isLiveMigration' + type: Boolean + description: If true, the prefix will be live migrated. + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + - name: 'parentPrefix' + type: String + description: + The URL of parent prefix. Either PublicAdvertisedPrefix or + PublicDelegatedPrefix. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'ipCidrRange' + type: String + description: + The IPv4 address range, in CIDR format, represented by this public + advertised prefix. + required: true diff --git a/mmv1/products/compute/go_Region.yaml b/mmv1/products/compute/go_Region.yaml new file mode 100644 index 000000000000..da81ccb93b8c --- /dev/null +++ b/mmv1/products/compute/go_Region.yaml @@ -0,0 +1,132 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Region' +kind: 'compute#region' +description: | + Represents a Region resource. A region is a specific geographical + location where you can run your resources. Each region has one or more + zones +# Used as a resource reference +exclude: true +readonly: true +docs: +base_url: 'projects/{{project}}/regions' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +collection_url_key: 'items' +custom_code: +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'deprecated' + type: NestedObject + description: 'The deprecation state of this resource.' + output: true + properties: + - name: 'deleted' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to DELETED. + - name: 'deprecated' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to DEPRECATED. + output: true + - name: 'obsolete' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to OBSOLETE. + output: true + - name: 'replacement' + type: String + description: | + The URL of the suggested replacement for a deprecated resource. The + suggested replacement resource must be the same kind of resource as + the deprecated resource. + output: true + - name: 'state' + type: Enum + description: | + The deprecation state of this resource. This can be DEPRECATED, + OBSOLETE, or DELETED. Operations which create a new resource using a + DEPRECATED resource will return successfully, but with a warning + indicating the deprecated resource and recommending its replacement. + Operations which use OBSOLETE or DELETED resources will be rejected + and result in an error. + output: true + enum_values: + - 'DEPRECATED' + - 'OBSOLETE' + - 'DELETED' + - name: 'description' + type: String + description: 'An optional description of this resource.' + output: true + - name: 'id' + type: Integer + description: 'The unique identifier for the resource.' + output: true + - name: 'name' + type: String + description: 'Name of the resource.' + - name: 'quotas' + type: Array + description: 'Quotas assigned to this region.' + output: true + item_type: + type: NestedObject + properties: + - name: 'metric' + type: String + description: 'Name of the quota metric.' + output: true + - name: 'limit' + type: Double + description: 'Quota limit for this metric.' + output: true + - name: 'usage' + type: Double + description: 'Current usage of this metric.' + output: true + - name: 'owner' + type: String + description: + 'Owning resource. This is the resource on which this quota is + applied.' + output: true + - name: 'status' + type: Enum + description: | + Status of the region, either UP or DOWN. + output: true + enum_values: + - 'UP' + - 'DOWN' + - name: 'zones' + type: Array + description: 'List of zones within the region' + output: true + item_type: + type: String diff --git a/mmv1/products/compute/go_RegionAutoscaler.yaml b/mmv1/products/compute/go_RegionAutoscaler.yaml new file mode 100644 index 000000000000..0bc0b9cfcb30 --- /dev/null +++ b/mmv1/products/compute/go_RegionAutoscaler.yaml @@ -0,0 +1,403 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionAutoscaler' +kind: 'compute#autoscaler' +description: | + Represents an Autoscaler resource. + + Autoscalers allow you to automatically scale virtual machine instances in + managed instance groups according to an autoscaling policy that you + define. +references: + guides: + 'Autoscaling Groups of Instances': 'https://cloud.google.com/compute/docs/autoscaler/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionAutoscalers' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/autoscalers' +has_self_link: true +update_url: 'projects/{{project}}/regions/{{region}}/autoscalers?autoscaler={{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'region_autoscaler_basic' + primary_resource_id: 'foobar' + vars: + region_autoscaler_name: 'my-region-autoscaler' + instance_template_name: 'my-instance-template' + target_pool_name: 'my-target-pool' + rigm_name: 'my-region-igm' +parameters: + - name: 'region' + type: ResourceRef + description: | + URL of the region where the instance group resides. + required: false + immutable: true + ignore_read: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'autoscalingPolicy' + type: NestedObject + description: | + The configuration parameters for the autoscaling algorithm. You can + define one or more of the policies for an autoscaler: cpuUtilization, + customMetricUtilizations, and loadBalancingUtilization. + + If none of these are specified, the default will be to autoscale based + on cpuUtilization to 0.6 or 60%. + required: true + properties: + - name: 'minReplicas' + type: Integer + description: | + The minimum number of replicas that the autoscaler can scale down + to. This cannot be less than 0. If not provided, autoscaler will + choose a default value depending on maximum number of instances + allowed. + api_name: minNumReplicas + required: true + send_empty_value: true + - name: 'maxReplicas' + type: Integer + description: | + The maximum number of instances that the autoscaler can scale up + to. This is required when creating or updating an autoscaler. The + maximum number of replicas should not be lower than minimal number + of replicas. + api_name: maxNumReplicas + required: true + - name: 'cooldownPeriod' + type: Integer + description: | + The number of seconds that the autoscaler should wait before it + starts collecting information from a new instance. This prevents + the autoscaler from collecting information when the instance is + initializing, during which the collected usage would not be + reliable. The default time autoscaler waits is 60 seconds. + + Virtual machine initialization times might vary because of + numerous factors. We recommend that you test how long an + instance may take to initialize. To do this, create an instance + and time the startup process. + api_name: coolDownPeriodSec + default_value: 60 + - name: 'mode' + type: String + description: | + Defines operating mode for this policy. + default_value: ON + - name: 'scaleDownControl' + type: NestedObject + description: | + Defines scale down controls to reduce the risk of response latency + and outages due to abrupt scale-in events + min_version: 'beta' + properties: + - name: 'maxScaledDownReplicas' + type: NestedObject + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas' + - 'autoscaling_policy.0.scale_down_control.0.time_window_sec' + properties: + - name: 'fixed' + type: Integer + description: | + Specifies a fixed number of VM instances. This must be a positive + integer. + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.fixed' + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.percent' + - name: 'percent' + type: Integer + description: | + Specifies a percentage of instances between 0 to 100%, inclusive. + For example, specify 80 for 80%. + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.fixed' + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas.0.percent' + - name: 'timeWindowSec' + type: Integer + description: | + How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + at_least_one_of: + - 'autoscaling_policy.0.scale_down_control.0.max_scaled_down_replicas' + - 'autoscaling_policy.0.scale_down_control.0.time_window_sec' + - name: 'scaleInControl' + type: NestedObject + description: | + Defines scale in controls to reduce the risk of response latency + and outages due to abrupt scale-in events + properties: + - name: 'maxScaledInReplicas' + type: NestedObject + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas' + - 'autoscaling_policy.0.scale_in_control.0.time_window_sec' + properties: + - name: 'fixed' + type: Integer + description: | + Specifies a fixed number of VM instances. This must be a positive + integer. + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed' + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent' + - name: 'percent' + type: Integer + description: | + Specifies a percentage of instances between 0 to 100%, inclusive. + For example, specify 80 for 80%. + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.fixed' + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas.0.percent' + - name: 'timeWindowSec' + type: Integer + description: | + How long back autoscaling should look when computing recommendations + to include directives regarding slower scale down, as described above. + at_least_one_of: + - 'autoscaling_policy.0.scale_in_control.0.max_scaled_in_replicas' + - 'autoscaling_policy.0.scale_in_control.0.time_window_sec' + - name: 'cpuUtilization' + type: NestedObject + description: | + Defines the CPU utilization policy that allows the autoscaler to + scale based on the average CPU utilization of a managed instance + group. + default_from_api: true + properties: + - name: 'target' + type: Double + description: | + The target CPU utilization that the autoscaler should maintain. + Must be a float value in the range (0, 1]. If not specified, the + default is 0.6. + + If the CPU level is below the target utilization, the autoscaler + scales down the number of instances until it reaches the minimum + number of instances you specified or until the average CPU of + your instances reaches the target utilization. + + If the average CPU is above the target utilization, the autoscaler + scales up until it reaches the maximum number of instances you + specified or until the average utilization reaches the target + utilization. + api_name: utilizationTarget + required: true + - name: 'predictiveMethod' + type: String + description: | + Indicates whether predictive autoscaling based on CPU metric is enabled. Valid values are: + + - NONE (default). No predictive method is used. The autoscaler scales the group to meet current demand based on real-time metrics. + + - OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: NONE + - name: 'metric' + type: Array + description: | + Configuration parameters of autoscaling based on a custom metric. + api_name: customMetricUtilizations + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The identifier (type) of the Stackdriver Monitoring metric. + The metric cannot have negative values. + + The metric must have a value type of INT64 or DOUBLE. + api_name: metric + required: true + - name: 'singleInstanceAssignment' + type: Double + description: | + If scaling is based on a per-group metric value that represents the + total amount of work to be done or resource usage, set this value to + an amount assigned for a single instance of the scaled group. + The autoscaler will keep the number of instances proportional to the + value of this metric, the metric itself should not change value due + to group resizing. + + For example, a good metric to use with the target is + `pubsub.googleapis.com/subscription/num_undelivered_messages` + or a custom metric exporting the total number of requests coming to + your instances. + + A bad example would be a metric exporting an average or median + latency, since this value can't include a chunk assignable to a + single instance, it could be better used with utilization_target + instead. + - name: 'target' + type: Double + description: | + The target value of the metric that autoscaler should + maintain. This must be a positive value. A utilization + metric scales number of virtual machines handling requests + to increase or decrease proportionally to the metric. + + For example, a good metric to use as a utilizationTarget is + www.googleapis.com/compute/instance/network/received_bytes_count. + The autoscaler will work to keep this value constant for each + of the instances. + api_name: utilizationTarget + - name: 'type' + type: Enum + description: | + Defines how target utilization value is expressed for a + Stackdriver Monitoring metric. + api_name: utilizationTargetType + enum_values: + - 'GAUGE' + - 'DELTA_PER_SECOND' + - 'DELTA_PER_MINUTE' + - name: 'filter' + type: String + description: | + A filter string to be used as the filter string for + a Stackdriver Monitoring TimeSeries.list API call. + This filter is used to select a specific TimeSeries for + the purpose of autoscaling and to determine whether the metric + is exporting per-instance or per-group data. + + You can only use the AND operator for joining selectors. + You can only use direct equality comparison operator (=) without + any functions for each selector. + You can specify the metric in both the filter string and in the + metric field. However, if specified in both places, the metric must + be identical. + + The monitored resource type determines what kind of values are + expected for the metric. If it is a gce_instance, the autoscaler + expects the metric to include a separate TimeSeries for each + instance in a group. In such a case, you cannot filter on resource + labels. + + If the resource type is any other value, the autoscaler expects + this metric to contain values that apply to the entire autoscaled + instance group and resource label filtering can be performed to + point autoscaler at the correct TimeSeries to scale upon. + This is called a per-group metric for the purpose of autoscaling. + + If not specified, the type defaults to gce_instance. + + You should provide a filter that is selective enough to pick just + one TimeSeries for the autoscaled group or for each of the instances + (if you are using gce_instance resource type). If multiple + TimeSeries are returned upon the query execution, the autoscaler + will sum their respective values to obtain its scaling value. + - name: 'loadBalancingUtilization' + type: NestedObject + description: | + Configuration parameters of autoscaling based on a load balancer. + properties: + - name: 'target' + type: Double + description: | + Fraction of backend capacity utilization (set in HTTP(s) load + balancing configuration) that autoscaler should maintain. Must + be a positive float value. If not defined, the default is 0.8. + api_name: utilizationTarget + required: true + - name: 'scalingSchedules' + type: Map + description: | + Scaling schedules defined for an autoscaler. Multiple schedules can be set on an autoscaler and they can overlap. + key_name: 'name' + key_description: | + A name for the schedule. + value_type: + type: NestedObject + properties: + - name: 'minRequiredReplicas' + type: Integer + description: | + Minimum number of VM instances that autoscaler will recommend in time intervals starting according to schedule. + required: true + send_empty_value: true + - name: 'schedule' + type: String + description: | + The start timestamps of time intervals when this scaling schedule should provide a scaling signal. This field uses the extended cron format (with an optional year field). + required: true + - name: 'timeZone' + type: String + description: | + The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. + default_value: UTC + - name: 'durationSec' + type: Integer + description: | + The duration of time intervals (in seconds) for which this scaling schedule will be running. The minimum allowed value is 300. + required: true + - name: 'disabled' + type: Boolean + description: | + A boolean value that specifies if a scaling schedule can influence autoscaler recommendations. If set to true, then a scaling schedule has no effect. + default_value: false + - name: 'description' + type: String + description: | + A description of a scaling schedule. + - name: 'target' + type: String + description: | + URL of the managed instance group that this autoscaler will scale. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' diff --git a/mmv1/products/compute/go_RegionBackendService.yaml b/mmv1/products/compute/go_RegionBackendService.yaml new file mode 100644 index 000000000000..13700c4f64c0 --- /dev/null +++ b/mmv1/products/compute/go_RegionBackendService.yaml @@ -0,0 +1,1279 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionBackendService' +kind: 'compute#backendService' +description: | + A Region Backend Service defines a regionally-scoped group of virtual + machines that will serve traffic for load balancing. +references: + guides: + 'Internal TCP/UDP Load Balancing': 'https://cloud.google.com/compute/docs/load-balancing/internal/' + api: 'https://cloud.google.com/compute/docs/reference/latest/regionBackendServices' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/backendServices' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + allowed_iam_role: 'roles/compute.admin' + parent_resource_attribute: 'name' + iam_conditions_request_type: 'QUERY_PARAM' + min_version: 'beta' +custom_code: + constants: 'templates/terraform/constants/go/region_backend_service.go.tmpl' + encoder: 'templates/terraform/encoders/go/region_backend_service.go.tmpl' + decoder: 'templates/terraform/decoders/go/region_backend_service.go.tmpl' + post_create: 'templates/terraform/post_create/go/compute_region_backend_service_security_policy.go.tmpl' +custom_diff: + - 'customDiffRegionBackendService' +schema_version: 1 +migrate_state: 'tpgresource.MigrateStateNoop' +examples: + - name: 'region_backend_service_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-region-service%s", context["random_suffix"])' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' + - name: 'region_backend_service_external_iap' + primary_resource_id: 'default' + vars: + region_backend_service_name: 'tf-test-region-service-external' + - name: 'region_backend_service_cache' + primary_resource_id: 'default' + min_version: 'beta' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' + - name: 'region_backend_service_ilb_round_robin' + primary_resource_id: 'default' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' + - name: 'region_backend_service_external' + primary_resource_id: 'default' + min_version: 'beta' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' + - name: 'region_backend_service_external_weighted' + primary_resource_id: 'default' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' + - name: 'region_backend_service_ilb_ring_hash' + primary_resource_id: 'default' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' + - name: 'region_backend_service_balancing_mode' + primary_resource_id: 'default' + vars: + region_backend_service_name: 'region-service' + rigm_name: 'rbs-rigm' + region_health_check_name: 'rbs-health-check' + network_name: 'rbs-net' + - name: 'region_backend_service_connection_tracking' + primary_resource_id: 'default' + min_version: 'beta' + vars: + region_backend_service_name: 'region-service' + health_check_name: 'rbs-health-check' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created backend service should reside. + If it is not provided, the provider region is used. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'affinityCookieTtlSec' + type: Integer + description: | + Lifetime of cookies in seconds if session_affinity is + GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts + only until the end of the browser session (or equivalent). The + maximum allowed value for TTL is one day. + + When the load balancing scheme is INTERNAL, this field is not used. + - name: 'backend' + type: Array + description: | + The set of backends that serve this RegionBackendService. + api_name: backends + is_set: true + set_hash_func: 'resourceGoogleComputeBackendServiceBackendHash' + item_type: + type: NestedObject + properties: + - name: 'balancingMode' + type: Enum + description: | + Specifies the balancing mode for this backend. + + See the [Backend Services Overview](https://cloud.google.com/load-balancing/docs/backend-service#balancing-mode) + for an explanation of load balancing modes. + + From version 6.0.0 default value will be UTILIZATION to match default GCP value. + default_value: CONNECTION + enum_values: + - 'UTILIZATION' + - 'RATE' + - 'CONNECTION' + - name: 'capacityScaler' + type: Double + description: | + A multiplier applied to the group's maximum servicing capacity + (based on UTILIZATION, RATE or CONNECTION). + + ~>**NOTE**: This field cannot be set for + INTERNAL region backend services (default loadBalancingScheme), + but is required for non-INTERNAL backend service. The total + capacity_scaler for all backends must be non-zero. + + A setting of 0 means the group is completely drained, offering + 0% of its available Capacity. Valid range is [0.0,1.0]. + send_empty_value: true + - name: 'description' + type: String + description: | + An optional description of this resource. + Provide this property when you create the resource. + - name: 'failover' + type: Boolean + description: | + This field designates whether this is a failover backend. More + than one failover backend can be configured for a given RegionBackendService. + default_from_api: true + - name: 'group' + type: String + description: | + The fully-qualified URL of an Instance Group or Network Endpoint + Group resource. In case of instance group this defines the list + of instances that serve traffic. Member virtual machine + instances from each instance group must live in the same zone as + the instance group itself. No two backends in a backend service + are allowed to use same Instance Group resource. + + For Network Endpoint Groups this defines list of endpoints. All + endpoints of Network Endpoint Group must be hosted on instances + located in the same zone as the Network Endpoint Group. + + Backend services cannot mix Instance Group and + Network Endpoint Group backends. + + When the `load_balancing_scheme` is INTERNAL, only instance groups + are supported. + + Note that you must specify an Instance Group or Network Endpoint + Group resource using the fully-qualified URL, rather than a + partial URL. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + custom_flatten: 'templates/terraform/custom_flatten/go/guard_self_link.go.tmpl' + - name: 'maxConnections' + type: Integer + description: | + The max number of simultaneous connections for the group. Can + be used with either CONNECTION or UTILIZATION balancing modes. + Cannot be set for INTERNAL backend services. + + For CONNECTION mode, either maxConnections or one + of maxConnectionsPerInstance or maxConnectionsPerEndpoint, + as appropriate for group type, must be set. + - name: 'maxConnectionsPerInstance' + type: Integer + description: | + The max number of simultaneous connections that a single + backend instance can handle. Cannot be set for INTERNAL backend + services. + + This is used to calculate the capacity of the group. + Can be used in either CONNECTION or UTILIZATION balancing modes. + For CONNECTION mode, either maxConnections or + maxConnectionsPerInstance must be set. + - name: 'maxConnectionsPerEndpoint' + type: Integer + description: | + The max number of simultaneous connections that a single backend + network endpoint can handle. Cannot be set + for INTERNAL backend services. + + This is used to calculate the capacity of the group. Can be + used in either CONNECTION or UTILIZATION balancing modes. For + CONNECTION mode, either maxConnections or + maxConnectionsPerEndpoint must be set. + - name: 'maxRate' + type: Integer + description: | + The max requests per second (RPS) of the group. Cannot be set + for INTERNAL backend services. + + Can be used with either RATE or UTILIZATION balancing modes, + but required if RATE mode. Either maxRate or one + of maxRatePerInstance or maxRatePerEndpoint, as appropriate for + group type, must be set. + - name: 'maxRatePerInstance' + type: Double + description: | + The max requests per second (RPS) that a single backend + instance can handle. This is used to calculate the capacity of + the group. Can be used in either balancing mode. For RATE mode, + either maxRate or maxRatePerInstance must be set. Cannot be set + for INTERNAL backend services. + - name: 'maxRatePerEndpoint' + type: Double + description: | + The max requests per second (RPS) that a single backend network + endpoint can handle. This is used to calculate the capacity of + the group. Can be used in either balancing mode. For RATE mode, + either maxRate or maxRatePerEndpoint must be set. Cannot be set + for INTERNAL backend services. + - name: 'maxUtilization' + type: Double + description: | + Used when balancingMode is UTILIZATION. This ratio defines the + CPU utilization target for the group. Valid range is [0.0, 1.0]. + Cannot be set for INTERNAL backend services. + - name: 'circuitBreakers' + type: NestedObject + description: | + Settings controlling the volume of connections to a backend service. This field + is applicable only when the `load_balancing_scheme` is set to INTERNAL_MANAGED + and the `protocol` is set to HTTP, HTTPS, or HTTP2. + properties: + - name: 'connectTimeout' + type: NestedObject + description: | + The timeout for new network connections to hosts. + min_version: 'beta' + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + - name: 'maxRequestsPerConnection' + type: Integer + description: | + Maximum requests for a single backend connection. This parameter + is respected by both the HTTP/1.1 and HTTP/2 implementations. If + not specified, there is no limit. Setting this parameter to 1 + will effectively disable keep alive. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + - name: 'maxConnections' + type: Integer + description: | + The maximum number of connections to the backend cluster. + Defaults to 1024. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 1024 + - name: 'maxPendingRequests' + type: Integer + description: | + The maximum number of pending requests to the backend cluster. + Defaults to 1024. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 1024 + - name: 'maxRequests' + type: Integer + description: | + The maximum number of parallel requests to the backend cluster. + Defaults to 1024. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 1024 + - name: 'maxRetries' + type: Integer + description: | + The maximum number of parallel retries to the backend cluster. + Defaults to 3. + at_least_one_of: + - 'circuit_breakers.0.connect_timeout' + - 'circuit_breakers.0.max_requests_per_connection' + - 'circuit_breakers.0.max_connections' + - 'circuit_breakers.0.max_pending_requests' + - 'circuit_breakers.0.max_requests' + - 'circuit_breakers.0.max_retries' + default_value: 3 + - name: 'consistentHash' + type: NestedObject + description: | + Consistent Hash-based load balancing can be used to provide soft session + affinity based on HTTP headers, cookies or other properties. This load balancing + policy is applicable only for HTTP connections. The affinity to a particular + destination host will be lost when one or more hosts are added/removed from the + destination service. This field specifies parameters that control consistent + hashing. + This field only applies when all of the following are true - + * `load_balancing_scheme` is set to INTERNAL_MANAGED + * `protocol` is set to HTTP, HTTPS, or HTTP2 + * `locality_lb_policy` is set to MAGLEV or RING_HASH + properties: + - name: 'httpCookie' + type: NestedObject + description: | + Hash is based on HTTP Cookie. This field describes a HTTP cookie + that will be used as the hash key for the consistent hash load + balancer. If the cookie is not present, it will be generated. + This field is applicable if the sessionAffinity is set to HTTP_COOKIE. + at_least_one_of: + - 'consistent_hash.0.http_cookie' + - 'consistent_hash.0.http_header_name' + - 'consistent_hash.0.minimum_ring_size' + properties: + - name: 'ttl' + type: NestedObject + description: | + Lifetime of the cookie. + at_least_one_of: + - 'consistent_hash.0.http_cookie.0.ttl' + - 'consistent_hash.0.http_cookie.0.name' + - 'consistent_hash.0.http_cookie.0.path' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. + Must be from 0 to 315,576,000,000 inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must + be from 0 to 999,999,999 inclusive. + - name: 'name' + type: String + description: | + Name of the cookie. + at_least_one_of: + - 'consistent_hash.0.http_cookie.0.ttl' + - 'consistent_hash.0.http_cookie.0.name' + - 'consistent_hash.0.http_cookie.0.path' + - name: 'path' + type: String + description: | + Path to set for the cookie. + at_least_one_of: + - 'consistent_hash.0.http_cookie.0.ttl' + - 'consistent_hash.0.http_cookie.0.name' + - 'consistent_hash.0.http_cookie.0.path' + - name: 'httpHeaderName' + type: String + description: | + The hash based on the value of the specified header field. + This field is applicable if the sessionAffinity is set to HEADER_FIELD. + at_least_one_of: + - 'consistent_hash.0.http_cookie' + - 'consistent_hash.0.http_header_name' + - 'consistent_hash.0.minimum_ring_size' + - name: 'minimumRingSize' + type: Integer + description: | + The minimum number of virtual nodes to use for the hash ring. + Larger ring sizes result in more granular load + distributions. If the number of hosts in the load balancing pool + is larger than the ring size, each host will be assigned a single + virtual node. + Defaults to 1024. + at_least_one_of: + - 'consistent_hash.0.http_cookie' + - 'consistent_hash.0.http_header_name' + - 'consistent_hash.0.minimum_ring_size' + default_value: 1024 + - name: 'cdnPolicy' + type: NestedObject + description: 'Cloud CDN configuration for this BackendService.' + default_from_api: true + properties: + - name: 'cacheKeyPolicy' + type: NestedObject + description: 'The CacheKeyPolicy for this CdnPolicy.' + at_least_one_of: + - 'cdn_policy.0.cache_key_policy' + - 'cdn_policy.0.signed_url_cache_max_age_sec' + properties: + - name: 'includeHost' + type: Boolean + description: | + If true requests to different hosts will be cached separately. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + - name: 'includeProtocol' + type: Boolean + description: | + If true, http and https requests will be cached separately. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + - name: 'includeQueryString' + type: Boolean + description: | + If true, include query string parameters in the cache key + according to query_string_whitelist and + query_string_blacklist. If neither is set, the entire query + string will be included. + + If false, the query string will be excluded from the cache + key entirely. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + - name: 'queryStringBlacklist' + type: Array + description: | + Names of query string parameters to exclude in cache keys. + + All other parameters will be included. Either specify + query_string_whitelist or query_string_blacklist, not both. + '&' and '=' will be percent encoded and not treated as + delimiters. + is_set: true + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'queryStringWhitelist' + type: Array + description: | + Names of query string parameters to include in cache keys. + + All other parameters will be excluded. Either specify + query_string_whitelist or query_string_blacklist, not both. + '&' and '=' will be percent encoded and not treated as + delimiters. + is_set: true + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'includeNamedCookies' + type: Array + description: | + Names of cookies to include in cache keys. + send_empty_value: true + at_least_one_of: + - 'cdn_policy.0.cache_key_policy.0.include_host' + - 'cdn_policy.0.cache_key_policy.0.include_protocol' + - 'cdn_policy.0.cache_key_policy.0.include_query_string' + - 'cdn_policy.0.cache_key_policy.0.query_string_blacklist' + - 'cdn_policy.0.cache_key_policy.0.query_string_whitelist' + - 'cdn_policy.0.cache_key_policy.0.include_named_cookies' + item_type: + type: String + - name: 'signedUrlCacheMaxAgeSec' + type: Integer + description: | + Maximum number of seconds the response to a signed URL request + will be considered fresh, defaults to 1hr (3600s). After this + time period, the response will be revalidated before + being served. + + When serving responses to signed URL requests, Cloud CDN will + internally behave as though all responses from this backend had a + "Cache-Control: public, max-age=[TTL]" header, regardless of any + existing Cache-Control header. The actual headers served in + responses will not be altered. + at_least_one_of: + - 'cdn_policy.0.cache_key_policy' + - 'cdn_policy.0.signed_url_cache_max_age_sec' + default_value: 3600 + - name: 'defaultTtl' + type: Integer + description: | + Specifies the default TTL for cached content served by this origin for responses + that do not have an existing valid TTL (max-age or s-max-age). + default_from_api: true + - name: 'maxTtl' + type: Integer + description: | + Specifies the maximum allowed TTL for cached content served by this origin. + default_from_api: true + - name: 'clientTtl' + type: Integer + description: | + Specifies the maximum allowed TTL for cached content served by this origin. + default_from_api: true + - name: 'negativeCaching' + type: Boolean + description: | + Negative caching allows per-status code TTLs to be set, in order to apply fine-grained caching for common errors or redirects. + default_from_api: true + send_empty_value: true + - name: 'negativeCachingPolicy' + type: Array + description: | + Sets a cache TTL for the specified HTTP status code. negativeCaching must be enabled to configure negativeCachingPolicy. + Omitting the policy and leaving negativeCaching enabled will use Cloud CDN's default cache TTLs. + item_type: + type: NestedObject + properties: + - name: 'code' + type: Integer + description: | + The HTTP status code to define a TTL against. Only HTTP status codes 300, 301, 308, 404, 405, 410, 421, 451 and 501 + can be specified as values, and you cannot specify a status code more than once. + - name: 'ttl' + type: Integer + description: | + The TTL (in seconds) for which to cache responses with the corresponding status code. The maximum allowed value is 1800s + (30 minutes), noting that infrequently accessed objects may be evicted from the cache before the defined TTL. + min_version: 'beta' + - name: 'cacheMode' + type: Enum + description: | + Specifies the cache setting for all responses from this backend. + The possible values are: USE_ORIGIN_HEADERS, FORCE_CACHE_ALL and CACHE_ALL_STATIC + default_from_api: true + enum_values: + - 'USE_ORIGIN_HEADERS' + - 'FORCE_CACHE_ALL' + - 'CACHE_ALL_STATIC' + - name: 'serveWhileStale' + type: Integer + description: | + Serve existing content from the cache (if available) when revalidating content with the origin, or when an error is encountered when refreshing the cache. + + default_from_api: true + send_empty_value: true + - name: 'connectionDraining' + type: NestedObject + description: | + Settings for connection draining + flatten_object: true + properties: + - name: 'connection_draining_timeout_sec' + type: Integer + description: | + Time for which instance will be drained (not accept new + connections, but still work to finish started). + + From version 6.0.0 ConnectionDrainingTimeoutSec default value will be 300 to match default GCP value. + api_name: drainingTimeoutSec + send_empty_value: true + default_value: 0 + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'failoverPolicy' + type: NestedObject + description: | + Policy for failovers. + properties: + - name: 'disableConnectionDrainOnFailover' + type: Boolean + description: | + On failover or failback, this field indicates whether connection drain + will be honored. Setting this to true has the following effect: connections + to the old active pool are not drained. Connections to the new active pool + use the timeout of 10 min (currently fixed). Setting to false has the + following effect: both old and new connections will have a drain timeout + of 10 min. + This can be set to true only if the protocol is TCP. + The default is false. + default_from_api: true + at_least_one_of: + - 'failover_policy.0.disable_connection_drain_on_failover' + - 'failover_policy.0.drop_traffic_if_unhealthy' + - 'failover_policy.0.failover_ratio' + - name: 'dropTrafficIfUnhealthy' + type: Boolean + description: | + This option is used only when no healthy VMs are detected in the primary + and backup instance groups. When set to true, traffic is dropped. When + set to false, new connections are sent across all VMs in the primary group. + The default is false. + default_from_api: true + send_empty_value: true + at_least_one_of: + - 'failover_policy.0.disable_connection_drain_on_failover' + - 'failover_policy.0.drop_traffic_if_unhealthy' + - 'failover_policy.0.failover_ratio' + - name: 'failoverRatio' + type: Double + description: | + The value of the field must be in [0, 1]. If the ratio of the healthy + VMs in the primary backend is at or below this number, traffic arriving + at the load-balanced IP will be directed to the failover backend. + In case where 'failoverRatio' is not set or all the VMs in the backup + backend are unhealthy, the traffic will be directed back to the primary + backend in the "force" mode, where traffic will be spread to the healthy + VMs with the best effort, or to all VMs when no VM is healthy. + This field is only used with l4 load balancing. + at_least_one_of: + - 'failover_policy.0.disable_connection_drain_on_failover' + - 'failover_policy.0.drop_traffic_if_unhealthy' + - 'failover_policy.0.failover_ratio' + - name: 'enableCDN' + type: Boolean + description: | + If true, enable Cloud CDN for this RegionBackendService. + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this + object. This field is used in optimistic locking. + output: true + - name: 'healthChecks' + type: Array + description: | + The set of URLs to HealthCheck resources for health checking + this RegionBackendService. Currently at most one health + check can be specified. + + A health check must be specified unless the backend service uses an internet + or serverless NEG as a backend. + is_set: true + set_hash_func: 'tpgresource.SelfLinkRelativePathHash' + custom_flatten: 'templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl' + item_type: + type: String + min_size: 1 + max_size: 1 + - name: 'generated_id' + type: Integer + description: + 'The unique identifier for the resource. This identifier is defined by the + server.' + api_name: id + output: true + - name: 'iap' + type: NestedObject + description: Settings for enabling Cloud Identity Aware Proxy + send_empty_value: true + properties: + - name: 'oauth2ClientId' + type: String + description: | + OAuth2 Client ID for IAP + required: true + - name: 'oauth2ClientSecret' + type: String + description: | + OAuth2 Client Secret for IAP + required: true + ignore_read: true + sensitive: true + send_empty_value: true + - name: 'oauth2ClientSecretSha256' + type: String + description: | + OAuth2 Client Secret SHA-256 for IAP + sensitive: true + output: true + - name: 'loadBalancingScheme' + type: Enum + description: | + Indicates what kind of load balancing this regional backend service + will be used for. A backend service created for one type of load + balancing cannot be used with the other(s). For more information, refer to + [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). + immutable: true + default_value: INTERNAL + enum_values: + - 'EXTERNAL' + - 'EXTERNAL_MANAGED' + - 'INTERNAL' + - 'INTERNAL_MANAGED' + - name: 'localityLbPolicy' + type: Enum + description: | + The load balancing algorithm used within the scope of the locality. + The possible values are: + + * `ROUND_ROBIN`: This is a simple policy in which each healthy backend + is selected in round robin order. + + * `LEAST_REQUEST`: An O(1) algorithm which selects two random healthy + hosts and picks the host which has fewer active requests. + + * `RING_HASH`: The ring/modulo hash load balancer implements consistent + hashing to backends. The algorithm has the property that the + addition/removal of a host from a set of N hosts only affects + 1/N of the requests. + + * `RANDOM`: The load balancer selects a random healthy host. + + * `ORIGINAL_DESTINATION`: Backend host is selected based on the client + connection metadata, i.e., connections are opened + to the same address as the destination address of + the incoming connection before the connection + was redirected to the load balancer. + + * `MAGLEV`: used as a drop in replacement for the ring hash load balancer. + Maglev is not as stable as ring hash but has faster table lookup + build times and host selection times. For more information about + Maglev, refer to https://ai.google/research/pubs/pub44824 + + * `WEIGHTED_MAGLEV`: Per-instance weighted Load Balancing via health check + reported weights. If set, the Backend Service must + configure a non legacy HTTP-based Health Check, and + health check replies are expected to contain + non-standard HTTP response header field + X-Load-Balancing-Endpoint-Weight to specify the + per-instance weights. If set, Load Balancing is weight + based on the per-instance weights reported in the last + processed health check replies, as long as every + instance either reported a valid weight or had + UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains + equal-weight. + + + This field is applicable to either: + + * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, + and loadBalancingScheme set to INTERNAL_MANAGED. + * A global backend service with the load_balancing_scheme set to INTERNAL_SELF_MANAGED. + * A regional backend service with loadBalancingScheme set to EXTERNAL (External Network + Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External + Network Load Balancing. The default is MAGLEV. + + + If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, + or RING_HASH, session affinity settings will not take effect. + + Only ROUND_ROBIN and RING_HASH are supported when the backend service is referenced + by a URL map that is bound to target gRPC proxy that has validate_for_proxyless + field set to true. + enum_values: + - 'ROUND_ROBIN' + - 'LEAST_REQUEST' + - 'RING_HASH' + - 'RANDOM' + - 'ORIGINAL_DESTINATION' + - 'MAGLEV' + - 'WEIGHTED_MAGLEV' + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'outlierDetection' + type: NestedObject + description: | + Settings controlling eviction of unhealthy hosts from the load balancing pool. + This field is applicable only when the `load_balancing_scheme` is set + to INTERNAL_MANAGED and the `protocol` is set to HTTP, HTTPS, or HTTP2. + + From version 6.0.0 outlierDetection default terraform values will be removed to match default GCP value. + Default values are enforce by GCP without providing them. + properties: + - name: 'baseEjectionTime' + type: NestedObject + description: | + The base time that a host is ejected for. The real time is equal to the base + time multiplied by the number of times the host has been ejected. Defaults to + 30000ms or 30s. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'consecutiveErrors' + type: Integer + description: | + Number of errors before a host is ejected from the connection pool. When the + backend host is accessed over HTTP, a 5xx return code qualifies as an error. + Defaults to 5. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 5 + - name: 'consecutiveGatewayFailure' + type: Integer + description: | + The number of consecutive gateway failures (502, 503, 504 status or connection + errors that are mapped to one of those status codes) before a consecutive + gateway failure ejection occurs. Defaults to 5. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 5 + - name: 'enforcingConsecutiveErrors' + type: Integer + description: | + The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive 5xx. This setting can be used to disable + ejection or to ramp it up slowly. Defaults to 100. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 100 + - name: 'enforcingConsecutiveGatewayFailure' + type: Integer + description: | + The percentage chance that a host will be actually ejected when an outlier + status is detected through consecutive gateway failures. This setting can be + used to disable ejection or to ramp it up slowly. Defaults to 0. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 0 + - name: 'enforcingSuccessRate' + type: Integer + description: | + The percentage chance that a host will be actually ejected when an outlier + status is detected through success rate statistics. This setting can be used to + disable ejection or to ramp it up slowly. Defaults to 100. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 100 + - name: 'interval' + type: NestedObject + description: | + Time interval between ejection sweep analysis. This can result in both new + ejections as well as hosts being returned to service. Defaults to 10 seconds. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + properties: + - name: 'seconds' + type: Integer + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'maxEjectionPercent' + type: Integer + description: | + Maximum percentage of hosts in the load balancing pool for the backend service + that can be ejected. Defaults to 10%. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 10 + - name: 'successRateMinimumHosts' + type: Integer + description: | + The number of hosts in a cluster that must have enough request volume to detect + success rate outliers. If the number of hosts is less than this setting, outlier + detection via success rate statistics is not performed for any host in the + cluster. Defaults to 5. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 5 + - name: 'successRateRequestVolume' + type: Integer + description: | + The minimum number of total requests that must be collected in one interval (as + defined by the interval duration above) to include this host in success rate + based outlier detection. If the volume is lower than this setting, outlier + detection via success rate statistics is not performed for that host. Defaults + to 100. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 100 + - name: 'successRateStdevFactor' + type: Integer + description: | + This factor is used to determine the ejection threshold for success rate outlier + ejection. The ejection threshold is the difference between the mean success + rate, and the product of this factor and the standard deviation of the mean + success rate: mean - (stdev * success_rate_stdev_factor). This factor is divided + by a thousand to get a double. That is, if the desired factor is 1.9, the + runtime value should be 1900. Defaults to 1900. + at_least_one_of: + - 'outlier_detection.0.base_ejection_time' + - 'outlier_detection.0.consecutive_errors' + - 'outlier_detection.0.consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_consecutive_errors' + - 'outlier_detection.0.enforcing_consecutive_gateway_failure' + - 'outlier_detection.0.enforcing_success_rate' + - 'outlier_detection.0.interval' + - 'outlier_detection.0.max_ejection_percent' + - 'outlier_detection.0.success_rate_minimum_hosts' + - 'outlier_detection.0.success_rate_request_volume' + - 'outlier_detection.0.success_rate_stdev_factor' + default_value: 1900 + - name: 'portName' + type: String + description: | + A named port on a backend instance group representing the port for + communication to the backend VMs in that group. Required when the + loadBalancingScheme is EXTERNAL, EXTERNAL_MANAGED, INTERNAL_MANAGED, or INTERNAL_SELF_MANAGED + and the backends are instance groups. The named port must be defined on each + backend instance group. This parameter has no meaning if the backends are NEGs. API sets a + default of "http" if not given. + Must be omitted when the loadBalancingScheme is INTERNAL (Internal TCP/UDP Load Balancing). + default_from_api: true + - name: 'protocol' + type: Enum + description: | + The protocol this RegionBackendService uses to communicate with backends. + The default is HTTP. **NOTE**: HTTP2 is only valid for beta HTTP/2 load balancer + types and may result in errors if used with the GA API. + # This is removed to avoid breaking terraform, as default values cannot be + # unspecified. Providers should include this as needed via overrides + # default_value: :TCP + default_from_api: true + enum_values: + - 'HTTP' + - 'HTTPS' + - 'HTTP2' + - 'SSL' + - 'TCP' + - 'UDP' + - 'GRPC' + - 'UNSPECIFIED' + - name: 'securityPolicy' + type: String + description: | + The security policy associated with this backend service. + min_version: 'beta' + update_url: 'projects/{{project}}/regions/{{region}}/backendServices/{{name}}/setSecurityPolicy' + update_verb: 'POST' + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'sessionAffinity' + type: Enum + description: | + Type of session affinity to use. The default is NONE. Session affinity is + not applicable if the protocol is UDP. + default_from_api: true + enum_values: + - 'NONE' + - 'CLIENT_IP' + - 'CLIENT_IP_PORT_PROTO' + - 'CLIENT_IP_PROTO' + - 'GENERATED_COOKIE' + - 'HEADER_FIELD' + - 'HTTP_COOKIE' + - 'CLIENT_IP_NO_DESTINATION' + - name: 'connectionTrackingPolicy' + type: NestedObject + description: | + Connection Tracking configuration for this BackendService. + This is available only for Layer 4 Internal Load Balancing and + Network Load Balancing. + min_version: 'beta' + properties: + - name: 'idleTimeoutSec' + type: Integer + description: | + Specifies how long to keep a Connection Tracking entry while there is + no matching traffic (in seconds). + + For L4 ILB the minimum(default) is 10 minutes and maximum is 16 hours. + + For NLB the minimum(default) is 60 seconds and the maximum is 16 hours. + default_from_api: true + - name: 'trackingMode' + type: Enum + description: | + Specifies the key used for connection tracking. There are two options: + `PER_CONNECTION`: The Connection Tracking is performed as per the + Connection Key (default Hash Method) for the specific protocol. + + `PER_SESSION`: The Connection Tracking is performed as per the + configured Session Affinity. It matches the configured Session Affinity. + default_value: PER_CONNECTION + enum_values: + - 'PER_CONNECTION' + - 'PER_SESSION' + - name: 'connectionPersistenceOnUnhealthyBackends' + type: Enum + description: | + Specifies connection persistence when backends are unhealthy. + + If set to `DEFAULT_FOR_PROTOCOL`, the existing connections persist on + unhealthy backends only for connection-oriented protocols (TCP and SCTP) + and only if the Tracking Mode is PER_CONNECTION (default tracking mode) + or the Session Affinity is configured for 5-tuple. They do not persist + for UDP. + + If set to `NEVER_PERSIST`, after a backend becomes unhealthy, the existing + connections on the unhealthy backend are never persisted on the unhealthy + backend. They are always diverted to newly selected healthy backends + (unless all backends are unhealthy). + + If set to `ALWAYS_PERSIST`, existing connections always persist on + unhealthy backends regardless of protocol and session affinity. It is + generally not recommended to use this mode overriding the default. + default_value: DEFAULT_FOR_PROTOCOL + enum_values: + - 'DEFAULT_FOR_PROTOCOL' + - 'NEVER_PERSIST' + - 'ALWAYS_PERSIST' + - name: 'enableStrongAffinity' + type: Boolean + description: Enable Strong Session Affinity for Network Load Balancing. This option is not available publicly. + - name: 'timeoutSec' + type: Integer + description: | + The backend service timeout has a different meaning depending on the type of load balancer. + For more information see, [Backend service settings](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices). + The default is 30 seconds. + The full range of timeout values allowed goes from 1 through 2,147,483,647 seconds. + default_from_api: true + - name: 'logConfig' + type: NestedObject + description: | + This field denotes the logging options for the load balancer traffic served by this backend service. + If logging is enabled, logs will be exported to Stackdriver. + default_from_api: true + properties: + - name: 'enable' + type: Boolean + description: | + Whether to enable logging for the load balancer traffic served by this backend service. + send_empty_value: true + at_least_one_of: + - 'log_config.0.enable' + - 'log_config.0.sample_rate' + - name: 'sampleRate' + type: Double + description: | + This field can only be specified if logging is enabled for this backend service. The value of + the field must be in [0, 1]. This configures the sampling rate of requests to the load balancer + where 1.0 means all logged requests are reported and 0.0 means no logged requests are reported. + The default value is 1.0. + at_least_one_of: + - 'log_config.0.enable' + - 'log_config.0.sample_rate' + diff_suppress_func: 'suppressWhenDisabled' + default_value: 1.0 + - name: 'network' + type: ResourceRef + description: | + The URL of the network to which this backend service belongs. + This field can only be specified when the load balancing scheme is set to INTERNAL. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'subsetting' + type: NestedObject + description: | + Subsetting configuration for this BackendService. Currently this is applicable only for Internal TCP/UDP load balancing and Internal HTTP(S) load balancing. + min_version: 'beta' + properties: + - name: 'policy' + type: Enum + description: | + The algorithm used for subsetting. + required: true + enum_values: + - 'CONSISTENT_HASH_SUBSETTING' diff --git a/mmv1/products/compute/go_RegionCommitment.yaml b/mmv1/products/compute/go_RegionCommitment.yaml new file mode 100644 index 000000000000..f40c115711cd --- /dev/null +++ b/mmv1/products/compute/go_RegionCommitment.yaml @@ -0,0 +1,200 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionCommitment' +kind: 'compute#commitment' +description: | + Represents a regional Commitment resource. + + Creating a commitment resource means that you are purchasing a committed + use contract with an explicit start and end time. You can create commitments + based on vCPUs and memory usage and receive discounted rates. +references: + guides: + 'Committed use discounts for Compute Engine': 'https://cloud.google.com/compute/docs/instances/committed-use-discounts-overview' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionCommitments' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/commitments' +has_self_link: true +skip_delete: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'compute_region_commitment_basic' + primary_resource_id: 'foobar' + vars: + region_commitment_name: 'my-region-commitment' + skip_test: true + - name: 'compute_region_commitment_full' + primary_resource_id: 'foobar' + vars: + region_commitment_name: 'my-full-commitment' + skip_test: true +parameters: + - name: 'region' + type: ResourceRef + description: | + URL of the region where this commitment may be used. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'commitment_id' + type: Integer + description: 'Unique identifier for the resource.' + api_name: id + output: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'status' + type: Enum + description: | + Status of the commitment with regards to eventual expiration + (each commitment has an end date defined). + output: true + enum_values: + - 'NOT_YET_ACTIVE' + - 'ACTIVE' + - 'EXPIRED' + - name: 'statusMessage' + type: String + description: | + A human-readable explanation of the status. + output: true + - name: 'plan' + type: Enum + description: | + The plan for this commitment, which determines duration and discount rate. + The currently supported plans are TWELVE_MONTH (1 year), and THIRTY_SIX_MONTH (3 years). + required: true + enum_values: + - 'TWELVE_MONTH' + - 'THIRTY_SIX_MONTH' + - name: 'startTimestamp' + type: Time + description: 'Commitment start time in RFC3339 text format.' + output: true + - name: 'endTimestamp' + type: Time + description: 'Commitment end time in RFC3339 text format.' + output: true + - name: 'resources' + type: Array + description: | + A list of commitment amounts for particular resources. + Note that VCPU and MEMORY resource commitments must occur together. + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: | + Type of resource for which this commitment applies. + Possible values are VCPU, MEMORY, LOCAL_SSD, and ACCELERATOR. + - name: 'amount' + type: String + description: | + The amount of the resource purchased (in a type-dependent unit, + such as bytes). For vCPUs, this can just be an integer. For memory, + this must be provided in MB. Memory must be a multiple of 256 MB, + with up to 6.5GB of memory per every vCPU. + - name: 'acceleratorType' + type: String + description: | + Name of the accelerator type resource. Applicable only when the type is ACCELERATOR. + - name: 'type' + type: String + description: | + The type of commitment, which affects the discount rate and the eligible resources. + The type could be one of the following value: `MEMORY_OPTIMIZED`, `ACCELERATOR_OPTIMIZED`, + `GENERAL_PURPOSE_N1`, `GENERAL_PURPOSE_N2`, `GENERAL_PURPOSE_N2D`, `GENERAL_PURPOSE_E2`, + `GENERAL_PURPOSE_T2D`, `GENERAL_PURPOSE_C3`, `COMPUTE_OPTIMIZED_C2`, `COMPUTE_OPTIMIZED_C2D` and + `GRAPHICS_OPTIMIZED_G2` + default_from_api: true + - name: 'category' + type: Enum + description: | + The category of the commitment. Category MACHINE specifies commitments composed of + machine resources such as VCPU or MEMORY, listed in resources. Category LICENSE + specifies commitments composed of software licenses, listed in licenseResources. + Note that only MACHINE commitments should have a Type specified. + default_from_api: true + enum_values: + - 'LICENSE' + - 'MACHINE' + - name: 'licenseResource' + type: NestedObject + description: | + The license specification required as part of a license commitment. + properties: + - name: 'license' + type: String + description: | + Any applicable license URI. + required: true + - name: 'amount' + type: String + description: | + The number of licenses purchased. + - name: 'coresPerLicense' + type: String + description: | + Specifies the core range of the instance for which this license applies. + - name: 'autoRenew' + type: Boolean + description: | + Specifies whether to enable automatic renewal for the commitment. + The default value is false if not specified. + If the field is set to true, the commitment will be automatically renewed for either + one or three years according to the terms of the existing commitment. + default_from_api: true diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml new file mode 100644 index 000000000000..eb9eb2ec6181 --- /dev/null +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -0,0 +1,369 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionDisk' +kind: 'compute#disk' +description: | + Persistent disks are durable storage devices that function similarly to + the physical disks in a desktop or a server. Compute Engine manages the + hardware behind these devices to ensure data redundancy and optimize + performance for you. Persistent disks are available as either standard + hard disk drives (HDD) or solid-state drives (SSD). + + Persistent disks are located independently from your virtual machine + instances, so you can detach or move persistent disks to keep your data + even after you delete your instances. Persistent disk performance scales + automatically with size, so you can resize your existing persistent disks + or add more persistent disks to an instance to meet your performance and + storage space requirements. + + Add a persistent disk to your instance when you need reliable and + affordable storage with consistent performance characteristics. +references: + guides: + 'Adding or Resizing Regional Persistent Disks': 'https://cloud.google.com/compute/docs/disks/regional-persistent-disk' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionDisks' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/disks' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + parent_resource_attribute: 'name' + base_url: 'projects/{{project}}/regions/{{region}}/disks/{{name}}' + import_format: + - 'projects/{{project}}/regions/{{region}}/disks/{{name}}' + - '{{name}}' +custom_code: + encoder: 'templates/terraform/encoders/go/disk.tmpl' + decoder: 'templates/terraform/decoders/go/disk.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/detach_disk.tmpl' +custom_diff: + - 'customdiff.ForceNewIfChange("size", IsDiskShrinkage)' + - 'hyperDiskIopsUpdateDiffSupress' + - 'tpgresource.SetLabelsDiff' +examples: + - name: 'region_disk_basic' + primary_resource_id: 'regiondisk' + primary_resource_name: 'fmt.Sprintf("tf-test-my-region-disk%s", context["random_suffix"])' + vars: + region_disk_name: 'my-region-disk' + disk_name: 'my-disk' + snapshot_name: 'my-snapshot' + - name: 'region_disk_async' + primary_resource_id: 'primary' + primary_resource_name: 'fmt.Sprintf("tf-test-my-region-disk%s", context["random_suffix"])' + vars: + region_disk_name: 'primary-region-disk' + secondary_region_disk_name: 'secondary-region-disk' + - name: 'region_disk_features' + primary_resource_id: 'regiondisk' + primary_resource_name: 'fmt.Sprintf("tf-test-my-region-disk%s", context["random_suffix"])' + vars: + region_disk_name: 'my-region-features-disk' +parameters: + - name: 'region' + type: ResourceRef + description: 'A reference to the region where the disk resides.' + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' + - name: 'diskEncryptionKey' + type: NestedObject + description: | + Encrypts the disk using a customer-supplied encryption key. + + After you encrypt a disk with a customer-supplied key, you must + provide the same key if you use the disk later (e.g. to create a disk + snapshot or an image, or to attach the disk to a virtual machine). + + Customer-supplied encryption keys do not protect access to metadata of + the disk. + + If you do not provide an encryption key when creating the disk, then + the disk will be encrypted using an automatically generated key and + you do not need to provide a key to use the disk later. + immutable: true + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + sensitive: true + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + output: true + - name: 'kmsKeyName' + type: String + description: | + The name of the encryption key that is stored in Google Cloud KMS. + - name: 'snapshot' + type: ResourceRef + description: | + The source snapshot used to create this disk. You can provide this as + a partial or full URL to the resource. For example, the following are + valid values: + + * `https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot` + * `projects/project/global/snapshots/snapshot` + * `global/snapshots/snapshot` + * `snapshot` + api_name: sourceSnapshot + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Snapshot' + imports: 'selfLink' + - name: 'sourceSnapshotEncryptionKey' + type: NestedObject + description: | + The customer-supplied encryption key of the source snapshot. Required + if the source snapshot is protected by a customer-supplied encryption + key. + immutable: true + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + # TODO(chrisst) Change to ResourceRef once KMS is in Magic Modules + - name: 'kmsKeyName' + type: String + description: | + The name of the encryption key that is stored in Google Cloud KMS. + min_version: 'beta' + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + output: true + - name: 'sourceSnapshotId' + type: String + description: | + The unique ID of the snapshot used to create this disk. This value + identifies the exact snapshot that was used to create this persistent + disk. For example, if you created the persistent disk from a snapshot + that was later deleted and recreated under the same name, the source + snapshot ID would identify the exact version of the snapshot that was + used. + output: true +properties: + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/regions/{{region}}/disks/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'lastAttachTimestamp' + type: Time + description: 'Last attach timestamp in RFC3339 text format.' + output: true + - name: 'lastDetachTimestamp' + type: Time + description: 'Last detach timestamp in RFC3339 text format.' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Labels to apply to this disk. A list of key->value pairs. + immutable: false + update_url: 'projects/{{project}}/regions/{{region}}/disks/{{name}}/setLabels' + update_verb: 'POST' + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'size' + type: Integer + description: | + Size of the persistent disk, specified in GB. You can specify this + field when creating a persistent disk using the sourceImage or + sourceSnapshot parameter, or specify it alone to create an empty + persistent disk. + + If you specify this field along with sourceImage or sourceSnapshot, + the value of sizeGb must not be less than the size of the sourceImage + or the size of the snapshot. + api_name: sizeGb + default_from_api: true + update_url: 'projects/{{project}}/regions/{{region}}/disks/{{name}}/resize' + update_verb: 'POST' + - name: 'users' + type: Array + description: | + Links to the users of the disk (attached instances) in form: + project/zones/zone/instances/instance + output: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'user' + type: ResourceRef + description: 'A reference to a user of this disk' + resource: 'Instance' + imports: 'selfLink' + - name: 'physicalBlockSizeBytes' + type: Integer + description: | + Physical block size of the persistent disk, in bytes. If not present + in a request, a default value is used. Currently supported sizes + are 4096 and 16384, other sizes may be added in the future. + If an unsupported value is requested, the error message will list + the supported values for the caller's project. + default_from_api: true + - name: 'replicaZones' + type: Array + description: 'URLs of the zones where the disk should be replicated to.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'zone' + type: ResourceRef + description: | + A reference to a zone where the disk should be replicated to. + resource: 'Zone' + imports: 'selfLink' + min_size: 2 + max_size: 2 + - name: 'type' + type: ResourceRef + description: | + URL of the disk type resource describing which disk type to use to + create the disk. Provide this when creating the disk. + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + default_value: pd-standard + resource: 'RegionDiskType' + imports: 'selfLink' + - name: 'interface' + type: String + description: | + Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. + min_version: 'beta' + url_param_only: true + diff_suppress_func: 'tpgresource.AlwaysDiffSuppress' + default_value: SCSI + deprecation_message: '`interface` is deprecated and will be removed in a future major release. This field is no longer used and can be safely removed from your configurations; disk interfaces are automatically determined on attachment.' + - name: 'sourceDisk' + type: String + description: | + The source disk used to create this disk. You can provide this as a partial or full URL to the resource. + For example, the following are valid values: + + * https://www.googleapis.com/compute/v1/projects/{project}/zones/{zone}/disks/{disk} + * https://www.googleapis.com/compute/v1/projects/{project}/regions/{region}/disks/{disk} + * projects/{project}/zones/{zone}/disks/{disk} + * projects/{project}/regions/{region}/disks/{disk} + * zones/{zone}/disks/{disk} + * regions/{region}/disks/{disk} + diff_suppress_func: 'sourceDiskDiffSupress' + - name: 'sourceDiskId' + type: String + description: | + The ID value of the disk used to create this image. This value may + be used to determine whether the image was taken from the current + or a previous instance of a given disk name. + output: true + - name: 'asyncPrimaryDisk' + type: NestedObject + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + properties: + - name: 'disk' + type: String + description: | + Primary disk for asynchronous disk replication. + required: true + - name: 'guestOsFeatures' + type: Array + description: | + A list of features to enable on the guest operating system. + Applicable only for bootable disks. + is_set: true + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: Enum + description: | + The type of supported feature. Read [Enabling guest operating system features](https://cloud.google.com/compute/docs/images/create-delete-deprecate-private-images#guest-os-features) to see a list of available options. + required: true + enum_values: + - 'MULTI_IP_SUBNET' + - 'SECURE_BOOT' + - 'SEV_CAPABLE' + - 'UEFI_COMPATIBLE' + - 'VIRTIO_SCSI_MULTIQUEUE' + - 'WINDOWS' + - 'GVNIC' + - 'SEV_LIVE_MIGRATABLE' + - 'SEV_SNP_CAPABLE' + - 'SUSPEND_RESUME_COMPATIBLE' + - 'TDX_CAPABLE' + - name: 'licenses' + type: Array + description: Any applicable license URI. + immutable: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'license' + type: ResourceRef + description: 'An applicable license URI' + resource: 'License' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml new file mode 100644 index 000000000000..c658bb931ec6 --- /dev/null +++ b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml @@ -0,0 +1,89 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionDiskResourcePolicyAttachment' +description: | + Adds existing resource policies to a disk. You can only add one policy + which will be applied to this disk for scheduling snapshot creation. + + ~> **Note:** This resource does not support zonal disks (`google_compute_disk`). For zonal disks, please refer to [`google_compute_disk_resource_policy_attachment`](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_disk_resource_policy_attachment) +docs: +id_format: '{{project}}/{{region}}/{{disk}}/{{name}}' +base_url: 'projects/{{project}}/regions/{{region}}/disks/{{disk}}' +self_link: 'projects/{{project}}/regions/{{region}}/disks/{{disk}}' +create_url: 'projects/{{project}}/regions/{{region}}/disks/{{disk}}/addResourcePolicies' +delete_url: 'projects/{{project}}/regions/{{region}}/disks/{{disk}}/removeResourcePolicies' +delete_verb: 'POST' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - name +nested_query: + keys: + - resourcePolicies + is_list_of_ids: true + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_region_disk_resource_policies_attachment.go.tmpl' + decoder: 'templates/terraform/decoders/go/compute_disk_resource_policies_attachment.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_region_disk_resource_policies_attachment.go.tmpl' +examples: + - name: 'region_disk_resource_policy_attachment_basic' + primary_resource_id: 'attachment' + vars: + base_disk_name: 'my-base-disk' + snapshot_name: 'my-snapshot' + disk_name: 'my-disk' + policy_name: 'my-resource-policy' +parameters: + - name: 'disk' + type: ResourceRef + description: | + The name of the regional disk in which the resource policies are attached to. + url_param_only: true + required: true + resource: 'Disk' + imports: 'name' + - name: 'region' + type: ResourceRef + description: 'A reference to the region where the disk resides.' + url_param_only: true + required: false + default_from_api: true + resource: 'Region' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The resource policy to be attached to the disk for scheduling snapshot + creation. Do not specify the self link. + required: true diff --git a/mmv1/products/compute/go_RegionDiskType.yaml b/mmv1/products/compute/go_RegionDiskType.yaml new file mode 100644 index 000000000000..ea29643f962a --- /dev/null +++ b/mmv1/products/compute/go_RegionDiskType.yaml @@ -0,0 +1,110 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionDiskType' +kind: 'compute#diskType' +description: | + Represents a regional DiskType resource. A DiskType resource represents + the type of disk to use, such as a pd-ssd, pd-balanced or pd-standard. To reference a + disk type, use the disk type's full or partial URL. +exclude: true +readonly: true +docs: +base_url: 'projects/{{project}}/regions/{{region}}/diskTypes' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +collection_url_key: 'items' +custom_code: +parameters: + - name: 'region' + type: ResourceRef + description: 'A reference to the region where the disk type resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'defaultDiskSizeGb' + type: Integer + description: 'Server-defined default disk size in GB.' + output: true + - name: 'deprecated' + type: NestedObject + description: 'The deprecation status associated with this disk type.' + output: true + properties: + - name: 'deleted' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to DELETED. + output: true + - name: 'deprecated' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to DEPRECATED. + output: true + - name: 'obsolete' + type: Time + description: | + An optional RFC3339 timestamp on or after which the deprecation state + of this resource will be changed to OBSOLETE. + output: true + - name: 'replacement' + type: String + description: | + The URL of the suggested replacement for a deprecated resource. The + suggested replacement resource must be the same kind of resource as + the deprecated resource. + output: true + - name: 'state' + type: Enum + description: | + The deprecation state of this resource. This can be DEPRECATED, + OBSOLETE, or DELETED. Operations which create a new resource using a + DEPRECATED resource will return successfully, but with a warning + indicating the deprecated resource and recommending its replacement. + Operations which use OBSOLETE or DELETED resources will be rejected + and result in an error. + output: true + enum_values: + - 'DEPRECATED' + - 'OBSOLETE' + - 'DELETED' + - name: 'description' + type: String + description: 'An optional description of this resource.' + output: true + - name: 'id' + type: Integer + description: 'The unique identifier for the resource.' + output: true + - name: 'name' + type: String + description: 'Name of the resource.' + - name: 'validDiskSize' + type: String + description: | + An optional textual description of the valid disk size, such as + "10GB-10TB". + output: true diff --git a/mmv1/products/compute/go_RegionHealthCheck.yaml b/mmv1/products/compute/go_RegionHealthCheck.yaml new file mode 100644 index 000000000000..3249ecc45d3d --- /dev/null +++ b/mmv1/products/compute/go_RegionHealthCheck.yaml @@ -0,0 +1,857 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionHealthCheck' +kind: 'compute#healthCheck' +description: | + Health Checks determine whether instances are responsive and able to do work. + They are an important part of a comprehensive load balancing configuration, + as they enable monitoring instances behind load balancers. + + Health Checks poll instances at a specified interval. Instances that + do not respond successfully to some number of probes in a row are marked + as unhealthy. No new connections are sent to unhealthy instances, + though existing connections will continue. The health check will + continue to poll unhealthy instances. If an instance later responds + successfully to some number of consecutive probes, it is marked + healthy again and can receive new connections. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/health-checks' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionHealthChecks' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/healthChecks' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + encoder: 'templates/terraform/encoders/go/health_check_type.tmpl' +custom_diff: + - 'healthCheckCustomizeDiff' +examples: + - name: 'region_health_check_tcp' + primary_resource_id: 'tcp-region-health-check' + vars: + health_check_name: 'tcp-region-health-check' + - name: 'region_health_check_tcp_full' + primary_resource_id: 'tcp-region-health-check' + vars: + health_check_name: 'tcp-region-health-check' + - name: 'region_health_check_ssl' + primary_resource_id: 'ssl-region-health-check' + vars: + health_check_name: 'ssl-region-health-check' + - name: 'region_health_check_ssl_full' + primary_resource_id: 'ssl-region-health-check' + vars: + health_check_name: 'ssl-region-health-check' + - name: 'region_health_check_http' + primary_resource_id: 'http-region-health-check' + vars: + health_check_name: 'http-region-health-check' + - name: 'region_health_check_http_logs' + primary_resource_id: 'http-region-health-check' + min_version: 'beta' + vars: + health_check_name: 'http-region-health-check' + - name: 'region_health_check_http_full' + primary_resource_id: 'http-region-health-check' + vars: + health_check_name: 'http-region-health-check' + - name: 'region_health_check_https' + primary_resource_id: 'https-region-health-check' + vars: + health_check_name: 'https-region-health-check' + - name: 'region_health_check_https_full' + primary_resource_id: 'https-region-health-check' + vars: + health_check_name: 'https-region-health-check' + - name: 'region_health_check_http2' + primary_resource_id: 'http2-region-health-check' + vars: + health_check_name: 'http2-region-health-check' + - name: 'region_health_check_http2_full' + primary_resource_id: 'http2-region-health-check' + vars: + health_check_name: 'http2-region-health-check' + - name: 'region_health_check_grpc' + primary_resource_id: 'grpc-region-health-check' + vars: + health_check_name: 'grpc-region-health-check' + - name: 'region_health_check_grpc_full' + primary_resource_id: 'grpc-region-health-check' + vars: + health_check_name: 'grpc-region-health-check' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created health check should reside. + If it is not provided, the provider region is used. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'selfLink' +properties: + - name: 'checkIntervalSec' + type: Integer + description: | + How often (in seconds) to send a health check. The default value is 5 + seconds. + default_value: 5 + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + send_empty_value: true + - name: 'healthyThreshold' + type: Integer + description: | + A so-far unhealthy instance will be marked healthy after this many + consecutive successes. The default value is 2. + default_value: 2 + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + immutable: true + - name: 'unhealthyThreshold' + type: Integer + description: | + A so-far healthy instance will be marked unhealthy after this many + consecutive failures. The default value is 2. + default_value: 2 + - name: 'timeoutSec' + type: Integer + description: | + How long (in seconds) to wait before claiming failure. + The default value is 5 seconds. It is invalid for timeoutSec to have + greater value than checkIntervalSec. + default_value: 5 + - name: 'type' + type: Enum + description: |- + The type of the health check. One of HTTP, HTTP2, HTTPS, TCP, or SSL. + output: true + enum_values: + - 'TCP' + - 'SSL' + - 'HTTP' + - 'HTTPS' + - 'HTTP2' + - name: 'httpHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'host' + type: String + description: | + The value of the host header in the HTTP health check request. + If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'requestPath' + type: String + description: | + The request path of the HTTP health check request. + The default value is /. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + default_value: / + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTP health check request. + The default value is 80. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, HTTP health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'http_health_check.0.host' + - 'http_health_check.0.request_path' + - 'http_health_check.0.response' + - 'http_health_check.0.port' + - 'http_health_check.0.port_name' + - 'http_health_check.0.proxy_header' + - 'http_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'httpsHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'host' + type: String + description: | + The value of the host header in the HTTPS health check request. + If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'requestPath' + type: String + description: | + The request path of the HTTPS health check request. + The default value is /. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + default_value: / + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTPS health check request. + The default value is 443. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, HTTPS health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'https_health_check.0.host' + - 'https_health_check.0.request_path' + - 'https_health_check.0.response' + - 'https_health_check.0.port' + - 'https_health_check.0.port_name' + - 'https_health_check.0.proxy_header' + - 'https_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'tcpHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'request' + type: String + description: | + The application data to send once the TCP connection has been + established (default value is empty). If both request and response are + empty, the connection establishment alone will indicate health. The request + data can only be ASCII. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the TCP health check request. + The default value is 80. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, TCP health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'tcp_health_check.0.request' + - 'tcp_health_check.0.response' + - 'tcp_health_check.0.port' + - 'tcp_health_check.0.port_name' + - 'tcp_health_check.0.proxy_header' + - 'tcp_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'sslHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'request' + type: String + description: | + The application data to send once the SSL connection has been + established (default value is empty). If both request and response are + empty, the connection establishment alone will indicate health. The request + data can only be ASCII. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the SSL health check request. + The default value is 443. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, SSL health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'ssl_health_check.0.request' + - 'ssl_health_check.0.response' + - 'ssl_health_check.0.port' + - 'ssl_health_check.0.port_name' + - 'ssl_health_check.0.proxy_header' + - 'ssl_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'http2HealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'host' + type: String + description: | + The value of the host header in the HTTP2 health check request. + If left empty (default value), the public IP on behalf of which this health + check is performed will be used. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'requestPath' + type: String + description: | + The request path of the HTTP2 health check request. + The default value is /. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + default_value: / + - name: 'response' + type: String + description: | + The bytes to match against the beginning of the response data. If left empty + (the default value), any response will indicate health. The response data + can only be ASCII. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'port' + type: Integer + description: | + The TCP port number for the HTTP2 health check request. + The default value is 443. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to the + backend. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, HTTP2 health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'http2_health_check.0.host' + - 'http2_health_check.0.request_path' + - 'http2_health_check.0.response' + - 'http2_health_check.0.port' + - 'http2_health_check.0.port_name' + - 'http2_health_check.0.proxy_header' + - 'http2_health_check.0.port_specification' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'grpcHealthCheck' + type: NestedObject + exactly_one_of: + - 'http_health_check' + - 'https_health_check' + - 'http2_health_check' + - 'tcp_health_check' + - 'ssl_health_check' + - 'grpc_health_check' + diff_suppress_func: 'portDiffSuppress' + properties: + - name: 'port' + type: Integer + description: | + The port number for the health check request. + Must be specified if portName and portSpecification are not set + or if port_specification is USE_FIXED_PORT. Valid values are 1 through 65535. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + - name: 'portName' + type: String + description: | + Port name as defined in InstanceGroup#NamedPort#name. If both port and + port_name are defined, port takes precedence. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + - name: 'portSpecification' + type: Enum + description: | + Specifies how port is selected for health checking, can be one of the + following values: + + * `USE_FIXED_PORT`: The port number in `port` is used for health checking. + + * `USE_NAMED_PORT`: The `portName` is used for health checking. + + * `USE_SERVING_PORT`: For NetworkEndpointGroup, the port specified for each + network endpoint is used for health checking. For other backends, the + port or named port specified in the Backend Service is used for health + checking. + + If not specified, gRPC health check follows behavior specified in `port` and + `portName` fields. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + enum_values: + - 'USE_FIXED_PORT' + - 'USE_NAMED_PORT' + - 'USE_SERVING_PORT' + - name: 'grpcServiceName' + type: String + description: | + The gRPC service name for the health check. + The value of grpcServiceName has the following meanings by convention: + + * Empty serviceName means the overall status of all services at the backend. + * Non-empty serviceName means the health of that gRPC service, as defined by the owner of the service. + + The grpcServiceName can only be ASCII. + at_least_one_of: + - 'grpc_health_check.0.port' + - 'grpc_health_check.0.port_name' + - 'grpc_health_check.0.port_specification' + - 'grpc_health_check.0.grpc_service_name' + - name: 'logConfig' + type: NestedObject + description: | + Configure logging on this health check. + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/health_check_log_config.go.tmpl' + properties: + - name: 'enable' + type: Boolean + description: | + Indicates whether or not to export logs. This is false by default, + which means no health check logging will be done. + default_value: false diff --git a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml new file mode 100644 index 000000000000..cfc36bb029fb --- /dev/null +++ b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml @@ -0,0 +1,226 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionInstanceGroupManager' +kind: 'compute#instanceGroupManager' +description: | + Creates a managed instance group using the information that you specify in + the request. After the group is created, it schedules an action to create + instances in the group using the specified instance template. This + operation is marked as DONE when the group is created even if the + instances in the group have not yet been created. You must separately + verify the status of the individual instances. + + A managed instance group can have up to 1000 VM instances per group. +exclude: true +docs: +base_url: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +parameters: + - name: 'region' + type: ResourceRef + description: 'The region the managed instance group resides.' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'baseInstanceName' + type: String + description: | + The base instance name to use for instances in this group. The value + must be 1-58 characters long. Instances are named by appending a + hyphen and a random four-character string to the base instance name. + The base instance name must comply with RFC1035. + required: true + - name: 'creationTimestamp' + type: Time + description: | + The creation timestamp for this managed instance group in RFC3339 + text format. + output: true + - name: 'currentActions' + type: NestedObject + description: | + The list of instance actions and the number of instances in this + managed instance group that are scheduled for each of those actions. + output: true + properties: + - name: 'abandoning' + type: Integer + description: | + The total number of instances in the managed instance group that + are scheduled to be abandoned. Abandoning an instance removes it + from the managed instance group without deleting it. + output: true + - name: 'creating' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be created or are currently being created. If the + group fails to create any of these instances, it tries again until + it creates the instance successfully. + + If you have disabled creation retries, this field will not be + populated; instead, the creatingWithoutRetries field will be + populated. + output: true + - name: 'creatingWithoutRetries' + type: Integer + description: | + The number of instances that the managed instance group will + attempt to create. The group attempts to create each instance only + once. If the group fails to create any of these instances, it + decreases the group's targetSize value accordingly. + output: true + - name: 'deleting' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be deleted or are currently being deleted. + output: true + - name: 'none' + type: Integer + description: | + The number of instances in the managed instance group that are + running and have no scheduled actions. + output: true + - name: 'recreating' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be recreated or are currently being being recreated. + Recreating an instance deletes the existing root persistent disk + and creates a new disk from the image that is defined in the + instance template. + output: true + - name: 'refreshing' + type: Integer + description: | + The number of instances in the managed instance group that are + being reconfigured with properties that do not require a restart + or a recreate action. For example, setting or removing target + pools for the instance. + output: true + - name: 'restarting' + type: Integer + description: | + The number of instances in the managed instance group that are + scheduled to be restarted or are currently being restarted. + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + immutable: true + - name: 'id' + type: Integer + description: 'A unique identifier for this resource' + output: true + - name: 'instanceGroup' + type: ResourceRef + description: 'The instance group being managed' + output: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'InstanceGroup' + imports: 'selfLink' + - name: 'instanceTemplate' + type: ResourceRef + description: | + The instance template that is specified for this managed instance + group. The group uses this template to create all new instances in the + managed instance group. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'InstanceTemplate' + imports: 'selfLink' + - name: 'name' + type: String + description: | + The name of the managed instance group. The name must be 1-63 + characters long, and comply with RFC1035. + required: true + - name: 'namedPorts' + type: Array + description: + Named ports configured for the Instance Groups complementary to this + Instance Group Manager. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name for this named port. The name must be 1-63 characters + long, and comply with RFC1035. + - name: 'port' + type: Integer + description: + The port number, which can be a value between 1 and 65535. + - name: 'targetPools' + type: Array + description: | + TargetPool resources to which instances in the instanceGroup field are + added. The target pools automatically apply to all of the instances in + the managed instance group. + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'targetPool' + type: ResourceRef + description: 'The targetPool to receive managed instances.' + resource: 'TargetPool' + imports: 'selfLink' + - name: 'targetSize' + type: Integer + description: | + The target number of running instances for this managed instance + group. Deleting or abandoning instances reduces this number. Resizing + the group changes this number. + - name: 'autoHealingPolicies' + type: Array + description: | + The autohealing policy for this managed instance group + item_type: + type: NestedObject + properties: + - name: 'healthCheck' + type: String + description: | + The URL for the health check that signals autohealing. + - name: 'initialDelaySec' + type: Integer + description: | + The number of seconds that the managed instance group waits + before it applies autohealing policies to new instances or recently recreated instances diff --git a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml new file mode 100644 index 000000000000..a98401a6ddf5 --- /dev/null +++ b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml @@ -0,0 +1,148 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionNetworkEndpoint' +kind: 'compute#networkEndpoint' +description: | + A Region network endpoint represents a IP address/FQDN and port combination that is + part of a specific network endpoint group (NEG). + + ~> **NOTE**: Network endpoints cannot be created outside of a network endpoint group. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/' + 'Internet NEGs Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/internet-neg-concepts' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/regionNetworkEndpointGroups' +docs: +id_format: '{{project}}/{{region}}/{{region_network_endpoint_group}}/{{ip_address}}/{{fqdn}}/{{port}}' +base_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{region_network_endpoint_group}}' +self_link: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{region_network_endpoint_group}}/listNetworkEndpoints' +create_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{region_network_endpoint_group}}/attachNetworkEndpoints' +read_verb: 'POST' +delete_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{region_network_endpoint_group}}/detachNetworkEndpoints' +delete_verb: 'POST' +immutable: true +mutex: networkEndpoint/{{project}}/{{region}}/{{region_network_endpoint_group}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - ipAddress + - fqdn + - port +nested_query: + keys: + - items + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_region_network_endpoint.go.tmpl' + decoder: 'templates/terraform/decoders/go/network_endpoint.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/compute_region_network_endpoint.go.tmpl' +exclude_tgc: true +examples: + - name: 'region_network_endpoint_internet_ip_port' + primary_resource_id: 'region-internet-ip-port-endpoint' + vars: + neg_name: 'ip-port-neg' + network_name: 'network' + - name: 'region_network_endpoint_internet_fqdn_port' + primary_resource_id: 'region-internet-fqdn-port-endpoint' + vars: + neg_name: 'fqdn-port-neg' + network_name: 'network' + - name: 'region_network_endpoint_portmap' + primary_resource_id: 'region_network_endpoint_portmap' + min_version: 'beta' + vars: + network_name: 'network' + subnetwork_name: 'subnetwork' + instance_name: 'instance' + neg_name: 'portmap-neg' + skip_test: true +parameters: + - name: 'region' + type: ResourceRef + description: | + Region where the containing network endpoint group is located. + url_param_only: true + required: false + ignore_read: true + default_from_api: true + resource: 'Region' + imports: 'name' + - name: 'regionNetworkEndpointGroup' + type: ResourceRef + description: | + The network endpoint group this endpoint is part of. + url_param_only: true + required: true + ignore_read: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + resource: 'RegionNetworkEndpointGroup' + imports: 'name' +properties: + - name: 'port' + type: Integer + description: | + Port number of network endpoint. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_int.go.tmpl' + validation: + function: 'validation.IntAtLeast(1)' + - name: 'ipAddress' + type: String + description: | + IPv4 address external endpoint. + + This can only be specified when network_endpoint_type of the NEG is INTERNET_IP_PORT. + - name: 'fqdn' + type: String + description: | + Fully qualified domain name of network endpoint. + + This can only be specified when network_endpoint_type of the NEG is INTERNET_FQDN_PORT. + at_least_one_of: + - 'fqdn' + - 'ip_address' + - name: 'clientDestinationPort' + type: Integer + description: | + Client destination port for the `GCE_VM_IP_PORTMAP` NEG. + min_version: 'beta' + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_int.go.tmpl' + - name: 'instance' + type: ResourceRef + description: | + The name for a specific VM instance that the IP address belongs to. + This is required for network endpoints of type GCE_VM_IP_PORTMAP. + min_version: 'beta' + resource: 'Instance' + imports: 'name' diff --git a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml new file mode 100644 index 000000000000..ba12edad1066 --- /dev/null +++ b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml @@ -0,0 +1,320 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionNetworkEndpointGroup' +kind: 'compute#networkEndpointGroup' +description: | + A regional NEG that can support Serverless Products, proxying traffic to + external backends and providing traffic to the PSC port mapping endpoints. + + Recreating a region network endpoint group that's in use by another resource will give a + `resourceInUseByAnotherResource` error. Use `lifecycle.create_before_destroy` + to avoid this type of error. +references: + guides: + 'Serverless NEGs Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/serverless-neg-concepts' + 'Internet NEGs Official Documentation': 'https://cloud.google.com/load-balancing/docs/negs/internet-neg-concepts' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/regionNetworkEndpointGroups' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'region_network_endpoint_group_functions' + primary_resource_id: 'function_neg' + vars: + neg_name: 'function-neg' + function_name: 'function-neg' + bucket_name: 'cloudfunctions-function-example-bucket' + zip_path: 'path/to/index.zip' + test_vars_overrides: + 'zip_path': 'acctest.CreateZIPArchiveForCloudFunctionSource(t, "./test-fixtures/http_trigger.js")' + - name: 'region_network_endpoint_group_cloudrun' + primary_resource_id: 'cloudrun_neg' + vars: + neg_name: 'cloudrun-neg' + - name: 'region_network_endpoint_group_appengine' + primary_resource_id: 'appengine_neg' + vars: + neg_name: 'appengine-neg' + - name: 'region_network_endpoint_group_appengine_empty' + primary_resource_id: 'appengine_neg' + vars: + neg_name: 'appengine-neg' + - name: 'region_network_endpoint_group_psc' + primary_resource_id: 'psc_neg' + vars: + neg_name: 'psc-neg' + - name: 'region_network_endpoint_group_psc_service_attachment' + primary_resource_id: 'psc_neg_service_attachment' + vars: + neg_name: 'psc-neg' + network_name: 'psc-network' + subnetwork_name: 'psc-subnetwork' + psc_subnetwork_name: 'psc-subnetwork-nat' + backend_service_name: 'psc-backend' + forwarding_rule_name: 'psc-forwarding-rule' + service_attachment_name: 'psc-service-attachment' + health_check_name: 'psc-healthcheck' + - name: 'region_network_endpoint_group_internet_ip_port' + primary_resource_id: 'region_network_endpoint_group_internet_ip_port' + vars: + neg_name: 'ip-port-neg' + network_name: 'network' + - name: 'region_network_endpoint_group_internet_fqdn_port' + primary_resource_id: 'region_network_endpoint_group_internet_fqdn_port' + vars: + neg_name: 'ip-port-neg' + network_name: 'network' + - name: 'region_network_endpoint_group_portmap' + primary_resource_id: 'region_network_endpoint_group_portmap' + min_version: 'beta' + vars: + network_name: 'network' + subnetwork_name: 'subnetwork' + neg_name: 'portmap-neg' +parameters: + - name: 'region' + type: ResourceRef + description: | + A reference to the region where the regional NEGs reside. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + - name: 'networkEndpointType' + type: Enum + description: | + Type of network endpoints in this network endpoint group. Defaults to SERVERLESS. + default_value: SERVERLESS + enum_values: + - 'SERVERLESS' + - 'PRIVATE_SERVICE_CONNECT' + - 'INTERNET_IP_PORT' + - 'INTERNET_FQDN_PORT' + - 'GCE_VM_IP_PORTMAP' + - name: 'pscTargetService' + type: String + description: | + This field is only used for PSC and INTERNET NEGs. + + The target service url used to set up private service connection to + a Google API or a PSC Producer Service Attachment. + - name: 'network' + type: ResourceRef + description: | + This field is only used for PSC and INTERNET NEGs. + + The URL of the network to which all network endpoints in the NEG belong. Uses + "default" project network if unspecified. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'subnetwork' + type: ResourceRef + description: | + This field is only used for PSC NEGs. + + Optional URL of the subnetwork to which all network endpoints in the NEG belong. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'cloudRun' + type: NestedObject + description: | + This field is only used for SERVERLESS NEGs. + + Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set. + conflicts: + - cloud_function + - app_engine + - serverless_deployment + properties: + - name: 'service' + type: String + description: | + Cloud Run service is the main resource of Cloud Run. + The service must be 1-63 characters long, and comply with RFC1035. + Example value: "run-service". + at_least_one_of: + - 'cloud_run.0.service' + - 'cloud_run.0.url_mask' + - name: 'tag' + type: String + description: | + Cloud Run tag represents the "named-revision" to provide + additional fine-grained traffic routing information. + The tag must be 1-63 characters long, and comply with RFC1035. + Example value: "revision-0010". + - name: 'urlMask' + type: String + description: | + A template to parse service and tag fields from a request URL. + URL mask allows for routing to multiple Run services without having + to create multiple network endpoint groups and backend services. + + For example, request URLs "foo1.domain.com/bar1" and "foo1.domain.com/bar2" + an be backed by the same Serverless Network Endpoint Group (NEG) with + URL mask ".domain.com/". The URL mask will parse them to { service="bar1", tag="foo1" } + and { service="bar2", tag="foo2" } respectively. + at_least_one_of: + - 'cloud_run.0.service' + - 'cloud_run.0.url_mask' + - name: 'appEngine' + type: NestedObject + description: | + This field is only used for SERVERLESS NEGs. + + Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set. + send_empty_value: true + allow_empty_object: true + conflicts: + - cloud_run + - cloud_function + - serverless_deployment + properties: + - name: 'service' + type: String + description: | + Optional serving service. + The service name must be 1-63 characters long, and comply with RFC1035. + Example value: "default", "my-service". + - name: 'version' + type: String + description: | + Optional serving version. + The version must be 1-63 characters long, and comply with RFC1035. + Example value: "v1", "v2". + - name: 'urlMask' + type: String + description: | + A template to parse service and version fields from a request URL. + URL mask allows for routing to multiple App Engine services without + having to create multiple Network Endpoint Groups and backend services. + + For example, the request URLs "foo1-dot-appname.appspot.com/v1" and + "foo1-dot-appname.appspot.com/v2" can be backed by the same Serverless NEG with + URL mask "-dot-appname.appspot.com/". The URL mask will parse + them to { service = "foo1", version = "v1" } and { service = "foo1", version = "v2" } respectively. + - name: 'cloudFunction' + type: NestedObject + description: | + This field is only used for SERVERLESS NEGs. + + Only one of cloud_run, app_engine, cloud_function or serverless_deployment may be set. + conflicts: + - cloud_run + - app_engine + - serverless_deployment + properties: + - name: 'function' + type: String + description: | + A user-defined name of the Cloud Function. + The function name is case-sensitive and must be 1-63 characters long. + Example value: "func1". + at_least_one_of: + - 'cloud_function.0.function' + - 'cloud_function.0.url_mask' + - name: 'urlMask' + type: String + description: | + A template to parse function field from a request URL. URL mask allows + for routing to multiple Cloud Functions without having to create + multiple Network Endpoint Groups and backend services. + + For example, request URLs "mydomain.com/function1" and "mydomain.com/function2" + can be backed by the same Serverless NEG with URL mask "/". The URL mask + will parse them to { function = "function1" } and { function = "function2" } respectively. + at_least_one_of: + - 'cloud_function.0.function' + - 'cloud_function.0.url_mask' + - name: 'serverlessDeployment' + type: NestedObject + description: | + This field is only used for SERVERLESS NEGs. + + Only one of cloudRun, appEngine, cloudFunction or serverlessDeployment may be set. + min_version: 'beta' + send_empty_value: true + allow_empty_object: true + conflicts: + - cloud_run + - cloud_function + - app_engine + properties: + - name: 'platform' + type: String + description: | + The platform of the NEG backend target(s). Possible values: + API Gateway: apigateway.googleapis.com + required: true + - name: 'resource' + type: String + description: | + The user-defined name of the workload/instance. This value must be provided explicitly or in the urlMask. + The resource identified by this value is platform-specific and is as follows: API Gateway: The gateway ID, App Engine: The service name, + Cloud Functions: The function name, Cloud Run: The service name + - name: 'version' + type: String + description: | + The optional resource version. The version identified by this value is platform-specific and is follows: + API Gateway: Unused, App Engine: The service version, Cloud Functions: Unused, Cloud Run: The service tag + - name: 'urlMask' + type: String + description: | + A template to parse platform-specific fields from a request URL. URL mask allows for routing to multiple resources + on the same serverless platform without having to create multiple Network Endpoint Groups and backend resources. + The fields parsed by this template are platform-specific and are as follows: API Gateway: The gateway ID, + App Engine: The service and version, Cloud Functions: The function name, Cloud Run: The service and tag + required: false diff --git a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml new file mode 100644 index 000000000000..75b0f374ec67 --- /dev/null +++ b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml @@ -0,0 +1,87 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionNetworkFirewallPolicy' +description: "The Compute NetworkFirewallPolicy resource" +docs: +base_url: 'projects/{{project}}/regions/{{region}}/firewallPolicies' +self_link: 'projects/{{project}}/regions/{{region}}/firewallPolicies/{{name}}' +create_url: 'projects/{{project}}/regions/{{region}}/firewallPolicies' +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +legacy_long_form_project: true +examples: + - name: 'region_network_firewall_policy_full' + primary_resource_id: 'policy' + vars: + policy_name: 'tf-test-policy' +parameters: + - name: 'region' + type: String + description: The region of this resource. + url_param_only: true + immutable: true + default_from_api: true +properties: + - name: 'creationTimestamp' + type: String + description: Creation timestamp in RFC3339 text format. + output: true + - name: 'name' + type: String + description: User-provided name of the Network firewall policy. The name should be unique in the project in which the firewall policy is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + required: true + immutable: true + - name: 'regionNetworkFirewallPolicyId' + type: String + description: The unique identifier for the resource. This identifier is defined by the server. + api_name: id + output: true + - name: 'description' + type: String + description: An optional description of this resource. Provide this property when you create the resource. + - name: 'fingerprint' + type: Fingerprint + description: Fingerprint of the resource. This field is used internally during updates of this resource. + output: true + - name: 'selfLink' + type: String + description: Server-defined URL for the resource. + output: true + - name: 'selfLinkWithId' + type: String + description: Server-defined URL for this resource with the resource id. + output: true + - name: 'ruleTupleCount' + type: Integer + description: Total count of all firewall policy rule tuples. A firewall policy can not exceed a set number of tuples. + output: true diff --git a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml new file mode 100644 index 000000000000..da471721b90c --- /dev/null +++ b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml @@ -0,0 +1,243 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionPerInstanceConfig' +description: | + A config defined for a single managed instance that belongs to an instance group manager. It preserves the instance name + across instance group manager operations and can define stateful disks or metadata that are unique to the instance. + This resource works with regional instance group managers. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/instance-groups/stateful-migs#per-instance_configs' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/instanceGroupManagers' +docs: +id_format: '{{project}}/{{region}}/{{region_instance_group_manager}}/{{name}}' +base_url: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}' +self_link: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/listPerInstanceConfigs' +create_url: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/createInstances' +update_url: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/updatePerInstanceConfigs' +update_verb: 'POST' +read_verb: 'POST' +delete_url: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs' +delete_verb: 'POST' +mutex: instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +identity: + - name +nested_query: + keys: + - items + is_list_of_ids: false + modify_by_patch: false +custom_code: + encoder: 'templates/terraform/encoders/go/compute_per_instance_config.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/compute_per_instance_config.go.tmpl' + post_update: 'templates/terraform/post_update/go/compute_region_per_instance_config.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/region_per_instance_config.go.tmpl' +exclude_tgc: true +examples: + - name: 'stateful_rigm' + primary_resource_id: 'stateful-instance' + vars: + template_name: 'my-template' + igm_name: 'my-rigm' + disk_name: 'my-disk-name' + skip_test: true +virtual_fields: + - name: 'minimal_action' + description: | + The minimal action to perform on the instance during an update. + Default is `NONE`. Possible values are: + * REPLACE + * RESTART + * REFRESH + * NONE + type: Enum + default_value: "NONE" + - name: 'most_disruptive_allowed_action' + description: | + The most disruptive action to perform on the instance during an update. + Default is `REPLACE`. Possible values are: + * REPLACE + * RESTART + * REFRESH + * NONE + type: Enum + default_value: "REPLACE" + - name: 'remove_instance_on_destroy' + description: | + When true, deleting this config will immediately remove the underlying instance. + When false, deleting this config will use the behavior as determined by remove_instance_on_destroy. + type: Boolean + default_value: false + - name: 'remove_instance_state_on_destroy' + description: | + When true, deleting this config will immediately remove any specified state from the underlying instance. + When false, deleting this config will *not* immediately remove any state from the underlying instance. + State will be removed on the next instance recreation or update. + type: Boolean + default_value: false +parameters: + - name: 'region' + type: ResourceRef + description: | + Region where the containing instance group manager is located + url_param_only: true + required: false + immutable: true + ignore_read: true + default_from_api: true + resource: 'Region' + imports: 'name' + - name: 'regionInstanceGroupManager' + type: ResourceRef + description: | + The region instance group manager this instance config is part of. + url_param_only: true + required: true + immutable: true + resource: 'RegionInstanceGroupManager' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The name for this per-instance config and its corresponding instance. + required: true + immutable: true + - name: 'preservedState' + type: NestedObject + description: 'The preserved state for this instance.' + properties: + - name: 'metadata' + type: KeyValuePairs + description: | + Preserved metadata defined for this instance. This is a list of key->value pairs. + - name: 'disk' + type: Array + description: | + Stateful disks for the instance. + api_name: disks + is_set: true + custom_flatten: 'templates/terraform/custom_flatten/go/preserved_state_disks.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/preserved_state_disks.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'deviceName' + type: String + description: | + A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. + required: true + - name: 'source' + type: String + description: | + The URI of an existing persistent disk to attach under the specified device-name in the format + `projects/project-id/zones/zone/disks/disk-name`. + required: true + - name: 'mode' + type: Enum + description: | + The mode of the disk. + default_value: READ_WRITE + enum_values: + - 'READ_ONLY' + - 'READ_WRITE' + - name: 'deleteRule' + type: Enum + description: | + A value that prescribes what should happen to the stateful disk when the VM instance is deleted. + The available options are `NEVER` and `ON_PERMANENT_INSTANCE_DELETION`. + `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. + `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently + deleted from the instance group. + default_value: NEVER + enum_values: + - 'NEVER' + - 'ON_PERMANENT_INSTANCE_DELETION' + - name: 'internalIp' + type: Map + description: | + Preserved internal IPs defined for this instance. This map is keyed with the name of the network interface. + api_name: internalIPs + key_name: 'interface_name' + value_type: + type: NestedObject + properties: + - name: 'autoDelete' + type: Enum + description: | + These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. + default_value: NEVER + enum_values: + - 'NEVER' + - 'ON_PERMANENT_INSTANCE_DELETION' + - name: 'ipAddress' + type: NestedObject + description: | + Ip address representation + properties: + - name: 'address' + type: ResourceRef + description: | + The URL of the reservation for this IP address. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Address' + imports: 'selfLink' + - name: 'externalIp' + type: Map + description: | + Preserved external IPs defined for this instance. This map is keyed with the name of the network interface. + api_name: externalIPs + key_name: 'interface_name' + value_type: + type: NestedObject + properties: + - name: 'autoDelete' + type: Enum + description: | + These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. + default_value: NEVER + enum_values: + - 'NEVER' + - 'ON_PERMANENT_INSTANCE_DELETION' + - name: 'ipAddress' + type: NestedObject + description: | + Ip address representation + properties: + - name: 'address' + type: ResourceRef + description: | + The URL of the reservation for this IP address. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Address' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionSecurityPolicy.yaml b/mmv1/products/compute/go_RegionSecurityPolicy.yaml new file mode 100644 index 000000000000..1792c4d2eaeb --- /dev/null +++ b/mmv1/products/compute/go_RegionSecurityPolicy.yaml @@ -0,0 +1,195 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionSecurityPolicy' +description: | + Represents a Region Cloud Armor Security Policy resource. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/armor/docs/security-policy-concepts' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionSecurityPolicies' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies' +self_link: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/regions/{{region}}/securityPolicies/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'region_security_policy_basic' + primary_resource_id: 'region-sec-policy-basic' + vars: + sec_policy_name: 'my-sec-policy-basic' + - name: 'region_security_policy_with_ddos_protection_config' + primary_resource_id: 'region-sec-policy-ddos-protection' + vars: + sec_policy_name: 'my-sec-policy-ddos-protection' + - name: 'region_security_policy_with_user_defined_fields' + primary_resource_id: 'region-sec-policy-user-defined-fields' + vars: + sec_policy_name: 'my-sec-policy-user-defined-fields' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created Region Security Policy should reside. + If it is not provided, the provider region is used. + min_version: 'beta' + required: false + immutable: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'policyId' + type: String + description: | + The unique identifier for the resource. This identifier is defined by the server. + api_name: id + min_version: 'beta' + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. + Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. + min_version: 'beta' + required: true + immutable: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create the resource. + min_version: 'beta' + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. This field is used internally during + updates of this resource. + min_version: 'beta' + output: true + - name: 'type' + type: Enum + description: | + The type indicates the intended use of the security policy. + - CLOUD_ARMOR: Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. + - CLOUD_ARMOR_EDGE: Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache. + - CLOUD_ARMOR_NETWORK: Cloud Armor network policies can be configured to filter packets targeting network load balancing resources such as backend services, target pools, target instances, and instances with external IPs. They filter requests before the request is served from the application. + This field can be set only at resource creation time. + min_version: 'beta' + immutable: true + enum_values: + - 'CLOUD_ARMOR' + - 'CLOUD_ARMOR_EDGE' + - 'CLOUD_ARMOR_NETWORK' + - name: 'ddosProtectionConfig' + type: NestedObject + description: | + Configuration for Google Cloud Armor DDOS Proctection Config. + min_version: 'beta' + properties: + - name: 'ddosProtection' + type: Enum + description: | + Google Cloud Armor offers the following options to help protect systems against DDoS attacks: + - STANDARD: basic always-on protection for network load balancers, protocol forwarding, or VMs with public IP addresses. + - ADVANCED: additional protections for Managed Protection Plus subscribers who use network load balancers, protocol forwarding, or VMs with public IP addresses. + - ADVANCED_PREVIEW: flag to enable the security policy in preview mode. + min_version: 'beta' + required: true + enum_values: + - 'ADVANCED' + - 'ADVANCED_PREVIEW' + - 'STANDARD' + - name: 'selfLink' + type: String + description: | + Server-defined URL for the resource. + min_version: 'beta' + output: true + - name: 'selfLinkWithPolicyId' + type: String + description: | + Server-defined URL for this resource with the resource id. + api_name: selfLinkWithId + min_version: 'beta' + output: true + - name: 'userDefinedFields' + type: Array + description: | + Definitions of user-defined fields for CLOUD_ARMOR_NETWORK policies. + A user-defined field consists of up to 4 bytes extracted from a fixed offset in the packet, relative to the IPv4, IPv6, TCP, or UDP header, with an optional mask to select certain bits. + Rules may then specify matching values for these fields. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name of this field. Must be unique within the policy. + min_version: 'beta' + - name: 'base' + type: Enum + description: | + The base relative to which 'offset' is measured. Possible values are: + - IPV4: Points to the beginning of the IPv4 header. + - IPV6: Points to the beginning of the IPv6 header. + - TCP: Points to the beginning of the TCP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. + - UDP: Points to the beginning of the UDP header, skipping over any IPv4 options or IPv6 extension headers. Not present for non-first fragments. + min_version: 'beta' + required: true + enum_values: + - 'IPV4' + - 'IPV6' + - 'TCP' + - 'UDP' + - name: 'offset' + type: Integer + description: | + Offset of the first byte of the field (in network byte order) relative to 'base'. + min_version: 'beta' + - name: 'size' + type: Integer + description: | + Size of the field in bytes. Valid values: 1-4. + min_version: 'beta' + - name: 'mask' + type: String + description: | + If specified, apply this mask (bitwise AND) to the field to ignore bits before matching. + Encoded as a hexadecimal number (starting with "0x"). + The last byte of the field (in network byte order) corresponds to the least significant byte of the mask. + min_version: 'beta' diff --git a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml new file mode 100644 index 000000000000..87e43e49bed5 --- /dev/null +++ b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml @@ -0,0 +1,574 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionSecurityPolicyRule' +description: | + A rule for the RegionSecurityPolicy. +min_version: 'beta' +references: + guides: + 'Creating region security policy rules': 'https://cloud.google.com/armor/docs/configure-security-policies' + api: 'https://cloud.google.com/compute/docs/reference/rest/beta/regionSecurityPolicies/addRule' +docs: +id_format: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}/priority/{{priority}}' +base_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}' +self_link: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}/getRule?priority={{priority}}' +create_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}/addRule?priority={{priority}}' +update_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}/patchRule?priority={{priority}}' +update_verb: 'POST' +update_mask: true +delete_url: 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}/removeRule?priority={{priority}}' +delete_verb: 'POST' +import_format: + - 'projects/{{project}}/regions/{{region}}/securityPolicies/{{security_policy}}/priority/{{priority}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'region_security_policy_rule_basic' + primary_resource_id: 'policy_rule' + min_version: 'beta' + vars: + sec_policy_name: 'policyruletest' + - name: 'region_security_policy_rule_multiple_rules' + primary_resource_id: 'policy_rule_one' + min_version: 'beta' + vars: + sec_policy_name: 'policywithmultiplerules' + - name: 'region_security_policy_rule_with_preconfigured_waf_config' + primary_resource_id: 'policy_rule' + min_version: 'beta' + vars: + sec_policy_name: 'policyruletest' + - name: 'region_security_policy_rule_with_network_match' + primary_resource_id: 'policy_rule_network_match' + min_version: 'beta' + vars: + sec_policy_name: 'policyfornetworkmatch' + skip_test: true +parameters: + - name: 'region' + type: String + description: | + The Region in which the created Region Security Policy rule should reside. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'security_policy' + type: String + description: | + The name of the security policy this rule belongs to. + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create the resource. + min_version: 'beta' + - name: 'priority' + type: Integer + description: | + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + min_version: 'beta' + required: true + immutable: true + - name: 'match' + type: NestedObject + description: | + A match condition that incoming traffic is evaluated against. + If it evaluates to true, the corresponding 'action' is enforced. + min_version: 'beta' + properties: + - name: 'versionedExpr' + type: Enum + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. + Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + min_version: 'beta' + enum_values: + - 'SRC_IPS_V1' + - name: 'expr' + type: NestedObject + description: | + User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. + min_version: 'beta' + properties: + - name: 'expression' + type: String + description: | + Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. + min_version: 'beta' + required: true + - name: 'config' + type: NestedObject + description: | + The configuration options available when specifying versionedExpr. + This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + min_version: 'beta' + properties: + - name: 'srcIpRanges' + type: Array + description: | + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. + min_version: 'beta' + item_type: + type: String + - name: 'preconfiguredWafConfig' + type: NestedObject + description: | + Preconfigured WAF configuration to be applied for the rule. + If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + min_version: 'beta' + properties: + - name: 'exclusion' + type: Array + description: | + An exclusion to apply during preconfigured WAF evaluation. + api_name: exclusions + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'targetRuleSet' + type: String + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + min_version: 'beta' + required: true + - name: 'targetRuleIds' + type: Array + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. + If omitted, it refers to all the rule IDs under the WAF rule set. + min_version: 'beta' + item_type: + type: String + - name: 'requestHeader' + type: Array + description: | + Request header whose value will be excluded from inspection during preconfigured WAF evaluation. + api_name: requestHeadersToExclude + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'operator' + type: Enum + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + min_version: 'beta' + required: true + enum_values: + - 'CONTAINS' + - 'ENDS_WITH' + - 'EQUALS' + - 'EQUALS_ANY' + - 'STARTS_WITH' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + min_version: 'beta' + - name: 'requestCookie' + type: Array + description: | + Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. + api_name: requestCookiesToExclude + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'operator' + type: Enum + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + min_version: 'beta' + required: true + enum_values: + - 'CONTAINS' + - 'ENDS_WITH' + - 'EQUALS' + - 'EQUALS_ANY' + - 'STARTS_WITH' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + min_version: 'beta' + - name: 'requestUri' + type: Array + description: | + Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. + When specifying this field, the query or fragment part should be excluded. + api_name: requestUrisToExclude + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'operator' + type: Enum + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + min_version: 'beta' + required: true + enum_values: + - 'CONTAINS' + - 'ENDS_WITH' + - 'EQUALS' + - 'EQUALS_ANY' + - 'STARTS_WITH' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + min_version: 'beta' + - name: 'requestQueryParam' + type: Array + description: | + Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. + Note that the parameter can be in the query string or in the POST body. + api_name: requestQueryParamsToExclude + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'operator' + type: Enum + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + min_version: 'beta' + required: true + enum_values: + - 'CONTAINS' + - 'ENDS_WITH' + - 'EQUALS' + - 'EQUALS_ANY' + - 'STARTS_WITH' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + min_version: 'beta' + - name: 'action' + type: String + description: | + The Action to perform when the rule is matched. The following are the valid actions: + + * allow: allow access to target. + + * deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. + + * rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. + + * redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. + + * throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. + min_version: 'beta' + required: true + - name: 'rateLimitOptions' + type: NestedObject + description: | + Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + min_version: 'beta' + update_mask_fields: + - 'rateLimitOptions.rateLimitThreshold' + - 'rateLimitOptions.conformAction' + - 'rateLimitOptions.exceedAction' + - 'rateLimitOptions.enforceOnKey' + - 'rateLimitOptions.enforceOnKeyName' + - 'rateLimitOptions.enforceOnKeyConfigs' + - 'rateLimitOptions.banThreshold' + - 'rateLimitOptions.banDurationSec' + properties: + - name: 'rateLimitThreshold' + type: NestedObject + description: | + Threshold at which to begin ratelimiting. + min_version: 'beta' + properties: + - name: 'count' + type: Integer + description: | + Number of HTTP(S) requests for calculating the threshold. + min_version: 'beta' + - name: 'intervalSec' + type: Integer + description: | + Interval over which the threshold is computed. + min_version: 'beta' + - name: 'conformAction' + type: String + description: | + Action to take for requests that are under the configured rate limit threshold. + Valid option is "allow" only. + min_version: 'beta' + - name: 'exceedAction' + type: String + description: | + Action to take for requests that are above the configured rate limit threshold, to deny with a specified HTTP response code. + Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502. + min_version: 'beta' + - name: 'enforceOnKey' + type: Enum + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: + * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. + * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. + * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. + * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. + * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. + * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. + * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. + * REGION_CODE: The country/region from which the request originates. + * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. + * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. + min_version: 'beta' + enum_values: + - 'ALL' + - 'IP' + - 'HTTP_HEADER' + - 'XFF_IP' + - 'HTTP_COOKIE' + - 'HTTP_PATH' + - 'SNI' + - 'REGION_CODE' + - 'TLS_JA3_FINGERPRINT' + - 'USER_IP' + - name: 'enforceOnKeyName' + type: String + description: | + Rate limit key name applicable only for the following key types: + HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. + HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + min_version: 'beta' + - name: 'enforceOnKeyConfigs' + type: Array + description: | + If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. + You can specify up to 3 enforceOnKeyConfigs. + If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'enforceOnKeyType' + type: Enum + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: + * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. + * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. + * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. + * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. + * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. + * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. + * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. + * REGION_CODE: The country/region from which the request originates. + * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. + * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. + min_version: 'beta' + enum_values: + - 'ALL' + - 'IP' + - 'HTTP_HEADER' + - 'XFF_IP' + - 'HTTP_COOKIE' + - 'HTTP_PATH' + - 'SNI' + - 'REGION_CODE' + - 'TLS_JA3_FINGERPRINT' + - 'USER_IP' + - name: 'enforceOnKeyName' + type: String + description: | + Rate limit key name applicable only for the following key types: + HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. + HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + min_version: 'beta' + - name: 'banThreshold' + type: NestedObject + description: | + Can only be specified if the action for the rule is "rate_based_ban". + If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + min_version: 'beta' + properties: + - name: 'count' + type: Integer + description: | + Number of HTTP(S) requests for calculating the threshold. + min_version: 'beta' + - name: 'intervalSec' + type: Integer + description: | + Interval over which the threshold is computed. + min_version: 'beta' + - name: 'banDurationSec' + type: Integer + description: | + Can only be specified if the action for the rule is "rate_based_ban". + If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + min_version: 'beta' + - name: 'preview' + type: Boolean + description: | + If set to true, the specified action is not enforced. + min_version: 'beta' + - name: 'networkMatch' + type: NestedObject + description: | + A match condition that incoming packets are evaluated against for CLOUD_ARMOR_NETWORK security policies. If it matches, the corresponding 'action' is enforced. + The match criteria for a rule consists of built-in match fields (like 'srcIpRanges') and potentially multiple user-defined match fields ('userDefinedFields'). + Field values may be extracted directly from the packet or derived from it (e.g. 'srcRegionCodes'). Some fields may not be present in every packet (e.g. 'srcPorts'). A user-defined field is only present if the base header is found in the packet and the entire field is in bounds. + Each match field may specify which values can match it, listing one or more ranges, prefixes, or exact values that are considered a match for the field. A field value must be present in order to match a specified match field. If no match values are specified for a match field, then any field value is considered to match it, and it's not required to be present. For strings specifying '*' is also equivalent to match all. + For a packet to match a rule, all specified match fields must match the corresponding field values derived from the packet. + Example: + networkMatch: srcIpRanges: - "192.0.2.0/24" - "198.51.100.0/24" userDefinedFields: - name: "ipv4_fragment_offset" values: - "1-0x1fff" + The above match condition matches packets with a source IP in 192.0.2.0/24 or 198.51.100.0/24 and a user-defined field named "ipv4_fragment_offset" with a value between 1 and 0x1fff inclusive + min_version: 'beta' + update_mask_fields: + - 'network_match.userDefinedFields' + - 'network_match.srcIpRanges' + - 'network_match.destIpRanges' + - 'network_match.ipProtocols' + - 'network_match.srcPorts' + - 'network_match.destPorts' + - 'network_match.srcRegionCodes' + - 'network_match.srcAsns' + properties: + - name: 'userDefinedFields' + type: Array + description: | + User-defined fields. Each element names a defined field and lists the matching values for that field. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Name of the user-defined field, as given in the definition. + min_version: 'beta' + - name: 'values' + type: Array + description: | + Matching values of the field. Each element can be a 32-bit unsigned decimal or hexadecimal (starting with "0x") number (e.g. "64") or range (e.g. "0x400-0x7ff"). + min_version: 'beta' + item_type: + type: String + - name: 'srcIpRanges' + type: Array + description: | + Source IPv4/IPv6 addresses or CIDR prefixes, in standard text format. + min_version: 'beta' + item_type: + type: String + - name: 'destIpRanges' + type: Array + description: | + Destination IPv4/IPv6 addresses or CIDR prefixes, in standard text format. + min_version: 'beta' + item_type: + type: String + - name: 'ipProtocols' + type: Array + description: | + IPv4 protocol / IPv6 next header (after extension headers). Each element can be an 8-bit unsigned decimal number (e.g. "6"), range (e.g. "253-254"), or one of the following protocol names: "tcp", "udp", "icmp", "esp", "ah", "ipip", or "sctp". + min_version: 'beta' + item_type: + type: String + - name: 'srcPorts' + type: Array + description: | + Source port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + min_version: 'beta' + item_type: + type: String + - name: 'destPorts' + type: Array + description: | + Destination port numbers for TCP/UDP/SCTP. Each element can be a 16-bit unsigned decimal number (e.g. "80") or range (e.g. "0-1023"). + min_version: 'beta' + item_type: + type: String + - name: 'srcRegionCodes' + type: Array + description: | + Two-letter ISO 3166-1 alpha-2 country code associated with the source IP address. + min_version: 'beta' + item_type: + type: String + - name: 'srcAsns' + type: Array + description: | + BGP Autonomous System Number associated with the source IP address. + min_version: 'beta' + item_type: + type: Integer diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml new file mode 100644 index 000000000000..21f77cac5d19 --- /dev/null +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -0,0 +1,136 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionSslCertificate' +kind: 'compute#sslCertificate' +description: | + A RegionSslCertificate resource, used for HTTPS load balancing. This resource + provides a mechanism to upload an SSL key and certificate to + the load balancer to serve secure connections from the user. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionSslCertificates' +docs: + optional_properties: '* `name_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. Conflicts with `name`. +' +base_url: 'projects/{{project}}/regions/{{region}}/sslCertificates' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl' +examples: + - name: 'region_ssl_certificate_basic' + primary_resource_id: 'default' + ignore_read_extra: + - 'name_prefix' + skip_vcr: true + - name: 'region_ssl_certificate_random_provider' + primary_resource_id: 'default' + external_providers: ["random", "time"] + skip_vcr: true + - name: 'region_ssl_certificate_target_https_proxies' + primary_resource_id: 'default' + vars: + region_target_https_proxy_name: 'test-proxy' + region_url_map_name: 'url-map' + region_backend_service_name: 'backend-service' + region_health_check_name: 'http-health-check' + ignore_read_extra: + - 'name_prefix' + skip_vcr: true +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created regional ssl certificate should reside. + If it is not provided, the provider region is used. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'certificate' + type: String + description: | + The certificate in PEM format. + The certificate chain must be no greater than 5 certs long. + The chain must include at least one intermediate cert. + required: true + sensitive: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'expireTime' + type: String + description: 'Expire time of the certificate in RFC3339 text format.' + output: true + - name: 'certificate_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + + + These are in the same namespace as the managed SSL certificates. + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl' + validation: + function: 'verify.ValidateGCEName' + - name: 'privateKey' + type: String + description: 'The write-only private key in PEM format.' + required: true + immutable: true + ignore_read: true + sensitive: true + diff_suppress_func: 'tpgresource.Sha256DiffSuppress' + custom_flatten: 'templates/terraform/custom_flatten/go/sha256.tmpl' diff --git a/mmv1/products/compute/go_RegionSslPolicy.yaml b/mmv1/products/compute/go_RegionSslPolicy.yaml new file mode 100644 index 000000000000..4a4bab982358 --- /dev/null +++ b/mmv1/products/compute/go_RegionSslPolicy.yaml @@ -0,0 +1,137 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionSslPolicy' +kind: 'compute#sslPolicy' +description: | + Represents a Regional SSL policy. SSL policies give you the ability to control the + features of SSL that your SSL proxy or HTTPS load balancer negotiates. +references: + guides: + 'Using SSL Policies': 'https://cloud.google.com/compute/docs/load-balancing/ssl-policies' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionSslPolicies' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/sslPolicies' +has_self_link: true +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/region_ssl_policy.tmpl' +custom_diff: + - 'regionSslPolicyCustomizeDiff' +parameters: + - name: 'region' + type: ResourceRef + description: | + The region where the regional SSL policy resides. + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'profile' + type: Enum + description: | + Profile specifies the set of SSL features that can be used by the + load balancer when negotiating SSL with clients. If using `CUSTOM`, + the set of SSL features to enable must be specified in the + `customFeatures` field. + + See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) + for information on what cipher suites each profile provides. If + `CUSTOM` is used, the `custom_features` attribute **must be set**. + default_value: COMPATIBLE + enum_values: + - 'COMPATIBLE' + - 'MODERN' + - 'RESTRICTED' + - 'CUSTOM' + - name: 'minTlsVersion' + type: Enum + description: | + The minimum version of SSL protocol that can be used by the clients + to establish a connection with the load balancer. + default_value: TLS_1_0 + enum_values: + - 'TLS_1_0' + - 'TLS_1_1' + - 'TLS_1_2' + - name: 'enabledFeatures' + type: Array + description: 'The list of features enabled in the SSL policy.' + is_set: true + output: true + item_type: + type: String + - name: 'customFeatures' + type: Array + description: | + A list of features enabled when the selected profile is CUSTOM. The + method returns the set of features that can be specified in this + list. This field must be empty if the profile is not CUSTOM. + + See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) + for which ciphers are available to use. **Note**: this argument + *must* be present when using the `CUSTOM` profile. This argument + *must not* be present when using any other profile. + is_set: true + send_empty_value: true + item_type: + type: String + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this + object. This field is used in optimistic locking. + output: true diff --git a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml new file mode 100644 index 000000000000..4bf7dd643dbe --- /dev/null +++ b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml @@ -0,0 +1,106 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionTargetHttpProxy' +description: | + Represents a RegionTargetHttpProxy resource, which is used by one or more + forwarding rules to route incoming HTTP requests to a URL map. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/http/target-proxies' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetHttpProxies' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/targetHttpProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'region_target_http_proxy_basic' + primary_resource_id: 'default' + vars: + region_target_http_proxy_name: 'test-proxy' + region_url_map_name: 'url-map' + region_backend_service_name: 'backend-service' + region_health_check_name: 'http-health-check' + - name: 'region_target_http_proxy_https_redirect' + primary_resource_id: 'default' + vars: + region_target_http_proxy_name: 'test-https-redirect-proxy' + region_url_map_name: 'url-map' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created target https proxy should reside. + If it is not provided, the provider region is used. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + - name: 'urlMap' + type: ResourceRef + description: | + A reference to the RegionUrlMap resource that defines the mapping from URL + to the BackendService. + required: true + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpProxies/{{name}}/setUrlMap' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionUrlMap' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml new file mode 100644 index 000000000000..f54c29bae5f7 --- /dev/null +++ b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml @@ -0,0 +1,183 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionTargetHttpsProxy' +description: | + Represents a RegionTargetHttpsProxy resource, which is used by one or more + forwarding rules to route incoming HTTPS requests to a URL map. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/http/target-proxies' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetHttpsProxies' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + encoder: 'templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl' + update_encoder: 'templates/terraform/encoders/go/compute_region_target_https_proxy.go.tmpl' + decoder: 'templates/terraform/decoders/go/compute_region_target_https_proxy.go.tmpl' +examples: + - name: 'region_target_https_proxy_basic' + primary_resource_id: 'default' + vars: + region_target_https_proxy_name: 'test-proxy' + region_ssl_certificate_name: 'my-certificate' + region_url_map_name: 'url-map' + region_backend_service_name: 'backend-service' + region_health_check_name: 'http-health-check' + - name: 'region_target_https_proxy_mtls' + primary_resource_id: 'default' + min_version: 'beta' + vars: + target_https_proxy_name: 'test-mtls-proxy' + ssl_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + server_tls_policy_name: 'my-tls-policy' + trust_config_name: 'my-trust-config' + - name: 'region_target_https_proxy_certificate_manager_certificate' + primary_resource_id: 'default' + vars: + region_target_https_proxy_name: 'target-http-proxy' + certificate_manager_certificate_name: 'my-certificate' + region_url_map_name: 'url-map' + region_backend_service_name: 'backend-service' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created target https proxy should reside. + If it is not provided, the provider region is used. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'certificateManagerCertificates' + type: Array + description: | + URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. + sslCertificates and certificateManagerCertificates can't be defined together. + Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}` or just the self_link `projects/{project}/locations/{location}/certificates/{resourceName}` + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates' + update_verb: 'POST' + conflicts: + - ssl_certificates + diff_suppress_func: 'tpgresource.CompareResourceNames' + custom_expand: 'templates/terraform/custom_expand/go/certificate_manager_certificate_construct_full_url.go.tmpl' + item_type: + type: String + - name: 'sslCertificates' + type: Array + description: | + URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. + At least one SSL certificate must be specified. Currently, you may specify up to 15 SSL certificates. + sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setSslCertificates' + update_verb: 'POST' + conflicts: + - certificate_manager_certificates + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'sslCertificate' + type: ResourceRef + description: 'The SSL certificates used by this TargetHttpsProxy' + resource: 'RegionSslCertificate' + imports: 'selfLink' + - name: 'sslPolicy' + type: ResourceRef + description: | + A reference to the Region SslPolicy resource that will be associated with + the TargetHttpsProxy resource. If not set, the TargetHttpsProxy + resource will not have any SSL policy configured. + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}' + update_verb: 'PATCH' + update_id: 'sslPolicy' + fingerprint_name: 'fingerprint' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionSslPolicy' + imports: 'selfLink' + - name: 'urlMap' + type: ResourceRef + description: | + A reference to the RegionUrlMap resource that defines the mapping from URL + to the RegionBackendService. + required: true + update_url: 'projects/{{project}}/regions/{{region}}/targetHttpsProxies/{{name}}/setUrlMap' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionUrlMap' + imports: 'selfLink' + - name: 'serverTlsPolicy' + type: ResourceRef + description: | + A URL referring to a networksecurity.ServerTlsPolicy + resource that describes how the proxy should authenticate inbound + traffic. serverTlsPolicy only applies to a global TargetHttpsProxy + attached to globalForwardingRules with the loadBalancingScheme + set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. + For details which ServerTlsPolicy resources are accepted with + INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED + loadBalancingScheme consult ServerTlsPolicy documentation. + If left blank, communications are not encrypted. + resource: 'SslPolicy' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml new file mode 100644 index 000000000000..81901cf43476 --- /dev/null +++ b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml @@ -0,0 +1,116 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionTargetTcpProxy' +description: | + Represents a RegionTargetTcpProxy resource, which is used by one or more + forwarding rules to route incoming TCP requests to a regional TCP proxy load + balancer. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/tcp/internal-proxy' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionTargetTcpProxies' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/targetTcpProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'region_target_tcp_proxy_basic' + primary_resource_id: 'default' + vars: + region_target_tcp_proxy_name: 'test-proxy' + region_backend_service_name: 'backend-service' + health_check_name: 'health-check' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the created target TCP proxy should reside. + If it is not provided, the provider region is used. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to + the backend. + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'backendService' + type: ResourceRef + description: | + A reference to the BackendService resource. + api_name: service + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'proxyBind' + type: Boolean + description: | + This field only applies when the forwarding rule that references + this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + default_from_api: true diff --git a/mmv1/products/compute/go_RegionUrlMap.yaml b/mmv1/products/compute/go_RegionUrlMap.yaml new file mode 100644 index 000000000000..f1f796d6c24c --- /dev/null +++ b/mmv1/products/compute/go_RegionUrlMap.yaml @@ -0,0 +1,2073 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RegionUrlMap' +kind: 'compute#urlMap' +description: | + UrlMaps are used to route requests to a backend service based on rules + that you define for the host and path of an incoming URL. +docs: +base_url: 'projects/{{project}}/regions/{{region}}/urlMaps' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'region_url_map_basic' + primary_resource_id: 'regionurlmap' + vars: + region_url_map_name: 'regionurlmap' + login_region_backend_service_name: 'login' + home_region_backend_service_name: 'home' + region_health_check_name: 'health-check' + - name: 'region_url_map_default_route_action' + primary_resource_id: 'regionurlmap' + vars: + region_url_map_name: 'regionurlmap' + login_region_backend_service_name: 'login' + home_region_backend_service_name: 'home' + region_health_check_name: 'health-check' + - name: 'region_url_map_l7_ilb_path' + primary_resource_id: 'regionurlmap' + vars: + region_url_map_name: 'regionurlmap' + home_region_backend_service_name: 'home' + region_health_check_name: 'health-check' + - name: 'region_url_map_l7_ilb_path_partial' + primary_resource_id: 'regionurlmap' + vars: + region_url_map_name: 'regionurlmap' + home_region_backend_service_name: 'home' + region_health_check_name: 'health-check' + - name: 'region_url_map_l7_ilb_route' + primary_resource_id: 'regionurlmap' + vars: + region_url_map_name: 'regionurlmap' + home_region_backend_service_name: 'home' + region_health_check_name: 'health-check' + - name: 'region_url_map_l7_ilb_route_partial' + primary_resource_id: 'regionurlmap' + vars: + region_url_map_name: 'regionurlmap' + home_region_backend_service_name: 'home' + region_health_check_name: 'health-check' + - name: 'int_https_lb_https_redirect' + primary_resource_id: 'redirect' + min_version: 'beta' + vars: + l7_ilb_network: 'l7-ilb-network' + l7_ilb_proxy_subnet: 'l7-ilb-proxy-subnet' + l7_ilb_subnet: 'l7-ilb-subnet' + l7_ilb_ip: 'l7-ilb-ip' + l7_ilb_forwarding_rule: 'l7-ilb-forwarding-rule' + l7_ilb_target_https_proxy: 'l7-ilb-target-https-proxy' + l7_ilb_regional_url_map: 'l7-ilb-regional-url-map' + l7_ilb_backend_service: 'l7-ilb-backend-service' + l7_ilb_mig_template: 'l7-ilb-mig-template' + l7_ilb_hc: 'l7-ilb-hc' + l7_ilb_mig1: 'l7-ilb-mig1' + l7_ilb_fw_allow_hc: 'l7-ilb-fw-allow-hc' + l7_ilb_fw_allow_ilb_to_backends: 'l7-ilb-fw-allow-ilb-to-backends' + l7_ilb_test_vm: 'l7-ilb-test-vm' + l7_ilb_redirect: 'l7-ilb-redirect' + l7_ilb_target_http_proxy: 'l7-ilb-target-http-proxy' + l7_ilb_redirect_url_map: 'l7-ilb-redirect-url-map' + ignore_read_extra: + - 'target' + - 'ip_address' + skip_test: true + - name: 'regional_external_http_load_balancer' + primary_resource_id: 'default' + min_version: 'beta' + vars: + lb_network: 'lb-network' + backend_subnet: 'backend-subnet' + proxy_only_subnet: 'proxy-only-subnet' + fw_allow_health_check: 'fw-allow-health-check' + fw_allow_proxies: 'fw-allow-proxies' + l7_xlb_backend_template: 'l7-xlb-backend-template' + l7_xlb_backend_example: 'l7-xlb-backend-example' + address_name: 'address-name' + l7_xlb_basic_check: 'l7-xlb-basic-check' + l7_xlb_backend_service: 'l7-xlb-backend-service' + regional_l7_xlb_map: 'regional-l7-xlb-map' + l7_xlb_proxy: 'l7-xlb-proxy' + l7_xlb_forwarding_rule: 'l7-xlb-forwarding-rule' + skip_test: true + skip_docs: true + - name: 'region_url_map_path_template_match' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + home_backend_service_name: 'home-service' + cart_backend_service_name: 'cart-service' + user_backend_service_name: 'user-service' + health_check_name: 'health-check' +parameters: + - name: 'region' + type: ResourceRef + description: | + The Region in which the url map should reside. + If it is not provided, the provider region is used. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'defaultService' + type: ResourceRef + description: | + The full or partial URL of the defaultService resource to which traffic is directed if + none of the hostRules match. If defaultRouteAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to sending the request to the + backend. However, if defaultService is specified, defaultRouteAction cannot contain any + weightedBackendServices. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of defaultService, + defaultUrlRedirect or defaultRouteAction.weightedBackendService must be set. + exactly_one_of: + - 'default_service' + - 'default_url_redirect' + - 'default_route_action.0.weighted_backend_services' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. + # 'fingerprint' used internally for object consistency. + - name: 'host_rule' + type: Array + description: 'The list of HostRules to use against the URL.' + api_name: hostRules + is_set: true + item_type: + type: NestedObject + properties: + - name: 'description' + type: String + description: | + An optional description of this HostRule. Provide this property + when you create the resource. + - name: 'hosts' + type: Array + description: | + The list of host patterns to match. They must be valid + hostnames, except * will match any string of ([a-z0-9-.]*). In + that case, * must be the first character and must be followed in + the pattern by either - or .. + is_set: true + required: true + item_type: + type: String + - name: 'pathMatcher' + type: String + description: | + The name of the PathMatcher to use to match the path portion of + the URL if the hostRule matches the URL's host portion. + required: true + - name: 'map_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. This field is used internally during + updates of this resource. + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'path_matcher' + type: Array + description: 'The list of named PathMatchers to use against the URL.' + api_name: pathMatchers + item_type: + type: NestedObject + properties: + - name: 'defaultService' + type: ResourceRef + description: | + A reference to a RegionBackendService resource. This will be used if + none of the pathRules defined by this PathMatcher is matched by + the URL's path portion. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'name' + type: String + description: | + The name to which this PathMatcher is referred by the HostRule. + required: true + - name: 'routeRules' + type: Array + description: | + The list of ordered HTTP route rules. Use this list instead of pathRules when + advanced route matching and routing actions are desired. The order of specifying + routeRules matters: the first rule that matches will cause its specified routing + action to take effect. Within a given pathMatcher, only one of pathRules or + routeRules must be set. routeRules are not supported in UrlMaps intended for + External load balancers. + item_type: + type: NestedObject + properties: + - name: 'priority' + type: Integer + description: | + For routeRules within a given pathMatcher, priority determines the order + in which load balancer will interpret routeRules. RouteRules are evaluated + in order of priority, from the lowest to highest number. The priority of + a rule decreases as its number increases (1, 2, 3, N+1). The first rule + that matches the request is applied. + + You cannot configure two or more routeRules with the same priority. + Priority for each rule must be set to a number between 0 and + 2147483647 inclusive. + + Priority numbers can have gaps, which enable you to add or remove rules + in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which + you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the + future without any impact on existing rules. + required: true + - name: 'service' + type: ResourceRef + description: | + The region backend service resource to which traffic is + directed if this rule is matched. If routeAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction cannot + contain any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. The headerAction specified here are applied before + the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r + outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'matchRules' + type: Array + description: | + The rules for determining a match. + item_type: + type: NestedObject + properties: + - name: 'fullPathMatch' + type: String + description: | + For satisfying the matchRule condition, the path of the request must exactly + match the value specified in fullPathMatch after removing any query parameters + and anchor that may be part of the original URL. FullPathMatch must be between 1 + and 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must + be specified. + - name: 'headerMatches' + type: Array + description: | + Specifies a list of header match criteria, all of which must match corresponding + headers in the request. + item_type: + type: NestedObject + properties: + - name: 'exactMatch' + type: String + description: | + The value should exactly match contents of exactMatch. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. + - name: 'headerName' + type: String + description: | + The name of the HTTP header to match. For matching against the HTTP request's + authority, use a headerMatch with the header name ":authority". For matching a + request's method, use the headerName ":method". + required: true + - name: 'invertMatch' + type: Boolean + description: | + If set to false, the headerMatch is considered a match if the match criteria + above are met. If set to true, the headerMatch is considered a match if the + match criteria above are NOT met. Defaults to false. + default_value: false + - name: 'prefixMatch' + type: String + description: | + The value of the header must start with the contents of prefixMatch. Only one of + exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + - name: 'presentMatch' + type: Boolean + description: | + A header with the contents of headerName must exist. The match takes place + whether or not the request's header has a value or not. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. + - name: 'rangeMatch' + type: NestedObject + description: | + The header value must be an integer and its value must be in the range specified + in rangeMatch. If the header does not contain an integer, number or is empty, + the match fails. For example for a range [-5, 0] + + * -3 will match + * 0 will not match + * 0.25 will not match + * -3someString will not match. + + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or + rangeMatch must be set. + properties: + - name: 'rangeEnd' + type: Integer + description: | + The end of the range (exclusive). + required: true + - name: 'rangeStart' + type: Integer + description: | + The start of the range (inclusive). + required: true + - name: 'regexMatch' + type: String + description: | + The value of the header must match the regular expression specified in + regexMatch. For regular expression grammar, please see: + en.cppreference.com/w/cpp/regex/ecmascript For matching against a port + specified in the HTTP request, use a headerMatch with headerName set to PORT and + a regular expression that satisfies the RFC2616 Host header's port specifier. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or + rangeMatch must be set. + - name: 'suffixMatch' + type: String + description: | + The value of the header must end with the contents of suffixMatch. Only one of + exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + - name: 'ignoreCase' + type: Boolean + description: | + Specifies that prefixMatch and fullPathMatch matches are case sensitive. + Defaults to false. + default_value: false + - name: 'metadataFilters' + type: Array + description: | + Opaque filter criteria used by Loadbalancer to restrict routing configuration to + a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS + clients present node metadata. If a match takes place, the relevant routing + configuration is made available to those proxies. For each metadataFilter in + this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the + filterLabels must match the corresponding label provided in the metadata. If its + filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match + with corresponding labels in the provided metadata. metadataFilters specified + here can be overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have their + loadBalancingScheme set to INTERNAL_SELF_MANAGED. + item_type: + type: NestedObject + properties: + - name: 'filterLabels' + type: Array + description: | + The list of label value pairs that must match labels in the provided metadata + based on filterMatchCriteria This list must not be empty and can have at the + most 64 entries. + required: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Name of metadata label. The name can have a maximum length of 1024 characters + and must be at least 1 character long. + required: true + - name: 'value' + type: String + description: | + The value of the label must match the specified value. value can have a maximum + length of 1024 characters. + required: true + min_size: 1 + max_size: 64 + - name: 'filterMatchCriteria' + type: Enum + description: | + Specifies how individual filterLabel matches within the list of filterLabels + contribute towards the overall metadataFilter match. Supported values are: + + * MATCH_ANY: At least one of the filterLabels must have a matching label in the + provided metadata. + * MATCH_ALL: All filterLabels must have matching labels in + the provided metadata. + required: true + enum_values: + - 'MATCH_ALL' + - 'MATCH_ANY' + - name: 'prefixMatch' + type: String + description: | + For satisfying the matchRule condition, the request's path must begin with the + specified prefixMatch. prefixMatch must begin with a /. The value must be + between 1 and 1024 characters. Only one of prefixMatch, fullPathMatch or + regexMatch must be specified. + - name: 'queryParameterMatches' + type: Array + description: | + Specifies a list of query parameter match criteria, all of which must match + corresponding query parameters in the request. + item_type: + type: NestedObject + properties: + - name: 'exactMatch' + type: String + description: | + The queryParameterMatch matches if the value of the parameter exactly matches + the contents of exactMatch. Only one of presentMatch, exactMatch and regexMatch + must be set. + - name: 'name' + type: String + description: | + The name of the query parameter to match. The query parameter must exist in the + request, in the absence of which the request match fails. + required: true + - name: 'presentMatch' + type: Boolean + description: | + Specifies that the queryParameterMatch matches if the request contains the query + parameter, irrespective of whether the parameter has a value or not. Only one of + presentMatch, exactMatch and regexMatch must be set. + - name: 'regexMatch' + type: String + description: | + The queryParameterMatch matches if the value of the parameter matches the + regular expression specified by regexMatch. For the regular expression grammar, + please see en.cppreference.com/w/cpp/regex/ecmascript Only one of presentMatch, + exactMatch and regexMatch must be set. + - name: 'regexMatch' + type: String + description: | + For satisfying the matchRule condition, the path of the request must satisfy the + regular expression specified in regexMatch after removing any query parameters + and anchor supplied with the original URL. For regular expression grammar please + see en.cppreference.com/w/cpp/regex/ecmascript Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + - name: 'pathTemplateMatch' + type: String + description: | + For satisfying the matchRule condition, the path of the request + must match the wildcard pattern specified in pathTemplateMatch + after removing any query parameters and anchor that may be part + of the original URL. + + pathTemplateMatch must be between 1 and 255 characters + (inclusive). The pattern specified by pathTemplateMatch may + have at most 5 wildcard operators and at most 5 variable + captures in total. + - name: 'routeAction' + type: NestedObject + description: | + In response to a matching matchRule, the load balancer performs advanced routing + actions like URL rewrites, header transformations, etc. prior to forwarding the + request to the selected backend. If routeAction specifies any + weightedBackendServices, service must not be set. Conversely if service is set, + routeAction cannot contain any weightedBackendServices. Only one of routeAction + or urlRedirect must be set. + properties: + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see W3C + Recommendation for Cross Origin Resource Sharing + properties: + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the + actual request can include user credentials. This translates to the Access- + Control-Allow-Credentials header. Defaults to false. + default_value: false + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regular expression patterns that match allowed origins. For + regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. An + origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'disabled' + type: Boolean + description: | + If true, specifies the CORS policy is disabled. + which indicates that the CORS policy is in effect. Defaults to false. + default_value: false + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long the results of a preflight request can be cached. This + translates to the content for the Access-Control-Max-Age header. + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the + resiliency of clients to backend service failure. As part of fault injection, + when clients send requests to a backend service, delays can be introduced by + Loadbalancer on a percentage of requests before sending those request to the + backend service. Similarly requests from clients can be aborted by the + Loadbalancer for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + properties: + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault + injection. + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. The value must be between 200 + and 599 inclusive. + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be + aborted as part of fault injection. The value must be between 0.0 and 100.0 + inclusive. + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will + be introduced as part of fault injection. The value must be between 0.0 and + 100.0 inclusive. + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are + shadowed to a separate mirrored backend service. Loadbalancer does not wait for + responses from the shadow service. Prior to sending traffic to the shadow + service, the host / authority header is suffixed with -shadow. + properties: + - name: 'backendService' + type: ResourceRef + description: | + The RegionBackendService resource being mirrored to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + properties: + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. + required: true + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'retryConditions' + type: Array + description: | + Specifies one or more conditions when this retry rule applies. Valid values are: + + * 5xx: Loadbalancer will attempt a retry if the backend service responds with + any 5xx response code, or if the backend service does not respond at all, + item_type: + type: String + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time + the request is has been fully processed (i.e. end-of-stream) up until the + response has been completely processed. Timeout includes all retries. If not + specified, the default value is 15 seconds. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, prior to forwarding the request to + the matched service + properties: + - name: 'hostRewrite' + type: String + description: | + Prior to forwarding the request to the selected service, the request's host + header is replaced with contents of hostRewrite. The value must be between 1 and + 255 characters. + - name: 'pathPrefixRewrite' + type: String + description: | + Prior to forwarding the request to the selected backend service, the matching + portion of the request's path is replaced by pathPrefixRewrite. The value must + be between 1 and 1024 characters. + - name: 'pathTemplateRewrite' + type: String + description: | + Prior to forwarding the request to the selected origin, if the + request matched a pathTemplateMatch, the matching portion of the + request's path is replaced re-written using the pattern specified + by pathTemplateRewrite. + + pathTemplateRewrite must be between 1 and 255 characters + (inclusive), must start with a '/', and must only use variables + captured by the route's pathTemplate matchers. + + pathTemplateRewrite may only be used when all of a route's + MatchRules specify pathTemplate. + + Only one of pathPrefixRewrite and pathTemplateRewrite may be + specified. + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match + occurs. The weights determine the fraction of traffic that flows to their + corresponding backend service. If all traffic needs to go to a single backend + service, there must be one weightedBackendService with weight set to a non 0 + number. Once a backendService is identified and before forwarding the request to + the backend service, advanced routing actions like Url rewrites and header + transformations are applied depending on additional settings specified in this + HttpRouteAction. + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The default RegionBackendService resource. Before + forwarding the request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. headerAction specified here take effect before + headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to backendService, computed as weight / + (sum of all weightedBackendService weights in routeAction) . The selection of a + backend service is determined only for new traffic. Once a user's request has + been directed to a backendService, subsequent requests will be sent to the same + backendService as determined by the BackendService's session affinity policy. + The value must be between 0 and 1000 + required: true + - name: 'urlRedirect' + type: NestedObject + description: | + When this rule is matched, the request is redirected to a URL specified by + urlRedirect. If urlRedirect is specified, service or routeAction must not be + set. + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in + TargetHttpProxys. Setting this true for TargetHttpsProxy is not + permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither is + supplied, the path of the original request will be used for the redirect. + The value must be between 1 and 1024 characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the + HttpRouteRuleMatch, retaining the remaining portion of the URL before + redirecting the request. prefixRedirect cannot be supplied together with + pathRedirect. Supply one alone or neither. If neither is supplied, the + path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query + portion of the original URL is retained. The default value is false. + default_value: false + - name: 'pathRule' + type: Array + description: | + The list of path rules. Use this list instead of routeRules when routing based + on simple path matching is all that's required. The order by which path rules + are specified does not matter. Matches are always done on the longest-path-first + basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* + irrespective of the order in which those paths appear in this list. Within a + given pathMatcher, only one of pathRules or routeRules must be set. + api_name: pathRules + item_type: + type: NestedObject + properties: + - name: 'service' + type: ResourceRef + description: | + The region backend service resource to which traffic is + directed if this rule is matched. If routeAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction cannot + contain any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'paths' + type: Array + description: | + The list of path patterns to match. Each must start with / and the only place a + \* is allowed is at the end following a /. The string fed to the path matcher + does not include any text after the first ? or #, and those chars are not + allowed here. + is_set: true + required: true + item_type: + type: String + - name: 'routeAction' + type: NestedObject + description: | + In response to a matching path, the load balancer performs advanced routing + actions like URL rewrites, header transformations, etc. prior to forwarding the + request to the selected backend. If routeAction specifies any + weightedBackendServices, service must not be set. Conversely if service is set, + routeAction cannot contain any weightedBackendServices. Only one of routeAction + or urlRedirect must be set. + properties: + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see W3C + Recommendation for Cross Origin Resource Sharing + properties: + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the + actual request can include user credentials. This translates to the Access- + Control-Allow-Credentials header. Defaults to false. + default_value: false + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regular expression patterns that match allowed origins. For + regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. An + origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'disabled' + type: Boolean + description: | + If true, specifies the CORS policy is disabled. + required: true + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long the results of a preflight request can be cached. This + translates to the content for the Access-Control-Max-Age header. + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the + resiliency of clients to backend service failure. As part of fault injection, + when clients send requests to a backend service, delays can be introduced by + Loadbalancer on a percentage of requests before sending those request to the + backend service. Similarly requests from clients can be aborted by the + Loadbalancer for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + properties: + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault + injection. + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. The value must be between 200 + and 599 inclusive. + required: true + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be + aborted as part of fault injection. The value must be between 0.0 and 100.0 + inclusive. + required: true + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + required: true + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will + be introduced as part of fault injection. The value must be between 0.0 and + 100.0 inclusive. + required: true + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are + shadowed to a separate mirrored backend service. Loadbalancer does not wait for + responses from the shadow service. Prior to sending traffic to the shadow + service, the host / authority header is suffixed with -shadow. + properties: + - name: 'backendService' + type: ResourceRef + description: | + The RegionBackendService resource being mirrored to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + properties: + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'retryConditions' + type: Array + description: | + Specifies one or more conditions when this retry rule applies. Valid values are: + + - 5xx: Loadbalancer will attempt a retry if the backend service responds with + any 5xx response code, or if the backend service does not respond at all, + item_type: + type: String + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time + the request is has been fully processed (i.e. end-of-stream) up until the + response has been completely processed. Timeout includes all retries. If not + specified, the default value is 15 seconds. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, prior to forwarding the request to + the matched service + properties: + - name: 'hostRewrite' + type: String + description: | + Prior to forwarding the request to the selected service, the request's host + header is replaced with contents of hostRewrite. The value must be between 1 and + 255 characters. + - name: 'pathPrefixRewrite' + type: String + description: | + Prior to forwarding the request to the selected backend service, the matching + portion of the request's path is replaced by pathPrefixRewrite. The value must + be between 1 and 1024 characters. + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match + occurs. The weights determine the fraction of traffic that flows to their + corresponding backend service. If all traffic needs to go to a single backend + service, there must be one weightedBackendService with weight set to a non 0 + number. Once a backendService is identified and before forwarding the request to + the backend service, advanced routing actions like Url rewrites and header + transformations are applied depending on additional settings specified in this + HttpRouteAction. + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The default RegionBackendService resource. Before + forwarding the request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. headerAction specified here take effect before + headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to backendService, computed as weight / + (sum of all weightedBackendService weights in routeAction) . The selection of a + backend service is determined only for new traffic. Once a user's request has + been directed to a backendService, subsequent requests will be sent to the same + backendService as determined by the BackendService's session affinity policy. + The value must be between 0 and 1000 + required: true + - name: 'urlRedirect' + type: NestedObject + description: | + When a path pattern is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction must not + be set. + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in + TargetHttpProxys. Setting this true for TargetHttpsProxy is not + permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither is + supplied, the path of the original request will be used for the redirect. + The value must be between 1 and 1024 characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the + HttpRouteRuleMatch, retaining the remaining portion of the URL before + redirecting the request. prefixRedirect cannot be supplied together with + pathRedirect. Supply one alone or neither. If neither is supplied, the + path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is removed + prior to redirecting the request. If set to false, the query portion of the + original URL is retained. + This field is required to ensure an empty block is not set. The normal default value is false. + required: true + - name: 'defaultUrlRedirect' + type: NestedObject + description: | + When none of the specified hostRules match, the request is redirected to a URL specified + by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or + defaultRouteAction must not be set. + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one that was + supplied in the request. The value must be between 1 and 255 characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. If set to + false, the URL scheme of the redirected request will remain the same as that of the + request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this + true for TargetHttpsProxy is not permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one that was + supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be between 1 and 1024 + characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request will be used for + the redirect. The value must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is removed prior + to redirecting the request. If set to false, the query portion of the original URL is + retained. + This field is required to ensure an empty block is not set. The normal default value is false. + required: true + - name: 'test' + type: Array + description: | + The list of expected URL mappings. Requests to update this UrlMap will + succeed only if all of the test cases pass. + api_name: tests + item_type: + type: NestedObject + properties: + - name: 'description' + type: String + description: 'Description of this test case.' + - name: 'host' + type: String + description: 'Host portion of the URL.' + required: true + - name: 'path' + type: String + description: 'Path portion of the URL.' + required: true + - name: 'service' + type: ResourceRef + description: + A reference to expected RegionBackendService resource the given URL + should be mapped to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'defaultUrlRedirect' + type: NestedObject + description: | + When none of the specified hostRules match, the request is redirected to a URL specified + by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or + defaultRouteAction must not be set. + conflicts: + - default_route_action + exactly_one_of: + - 'default_service' + - 'default_url_redirect' + - 'default_route_action.0.weighted_backend_services' + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one that was + supplied in the request. The value must be between 1 and 255 characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. If set to + false, the URL scheme of the redirected request will remain the same as that of the + request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this + true for TargetHttpsProxy is not permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one that was + supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be between 1 and 1024 + characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request will be used for + the redirect. The value must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is removed prior + to redirecting the request. If set to false, the query portion of the original URL is + retained. + This field is required to ensure an empty block is not set. The normal default value is false. + required: true + - name: 'defaultRouteAction' + type: NestedObject + description: | + defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions, such as URL rewrites and header transformations, before forwarding the request to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. + Only one of defaultRouteAction or defaultUrlRedirect must be set. + URL maps for Classic external HTTP(S) load balancers only support the urlRewrite action within defaultRouteAction. + defaultRouteAction has no effect when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. + conflicts: + - default_url_redirect + properties: + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match occurs. The weights determine the fraction of traffic that flows to their corresponding backend service. If all traffic needs to go to a single backend service, there must be one weightedBackendService with weight set to a non-zero number. + After a backend service is identified and before forwarding the request to the backend service, advanced routing actions such as URL rewrites and header transformations are applied depending on additional settings specified in this HttpRouteAction. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + exactly_one_of: + - 'default_service' + - 'default_url_redirect' + - 'default_route_action.0.weighted_backend_services' + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The full or partial URL to the default BackendService resource. Before forwarding the request to backendService, the load balancer applies any relevant headerActions specified as part of this backendServiceWeight. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to a backend service, computed as weight / (sum of all weightedBackendService weights in routeAction) . + The selection of a backend service is determined only for new traffic. Once a user's request has been directed to a backend service, subsequent requests are sent to the same backend service as determined by the backend service's session affinity policy. + The value must be from 0 to 1000. + validation: + function: 'validation.IntBetween(0, 1000)' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for the selected backendService. + headerAction specified here take effect before headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. + headerAction is not supported for load balancers that have their loadBalancingScheme set to EXTERNAL. + Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. + properties: + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request before forwarding the request to the backendService. + item_type: + type: String + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request before forwarding the request to the backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: 'The name of the header.' + - name: 'headerValue' + type: String + description: 'The value of the header to add.' + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. + The default value is false. + default_value: false + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response before sending the response back to the client. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response before sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: 'The name of the header.' + - name: 'headerValue' + type: String + description: 'The value of the header to add.' + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the header. If true, headerValue is set for the header, discarding any values that were set for that header. + The default value is false. + default_value: false + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, before forwarding the request to the matched service. + urlRewrite is the only action supported in UrlMaps for external HTTP(S) load balancers. + Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'pathPrefixRewrite' + type: String + description: | + Before forwarding the request to the selected backend service, the matching portion of the request's path is replaced by pathPrefixRewrite. + The value must be from 1 to 1024 characters. + at_least_one_of: + - 'default_route_action.0.url_rewrite.0.path_prefix_rewrite' + - 'default_route_action.0.url_rewrite.0.host_rewrite' + validation: + function: 'validation.StringLenBetween(1, 1024)' + - name: 'hostRewrite' + type: String + description: | + Before forwarding the request to the selected service, the request's host header is replaced with contents of hostRewrite. + The value must be from 1 to 255 characters. + at_least_one_of: + - 'default_route_action.0.url_rewrite.0.path_prefix_rewrite' + - 'default_route_action.0.url_rewrite.0.host_rewrite' + validation: + function: 'validation.StringLenBetween(1, 255)' + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time the request has been fully processed (known as end-of-stream) up until the response has been processed. Timeout includes all retries. + If not specified, this field uses the largest timeout among all backend services associated with the route. + Not supported when the URL map is bound to a target gRPC proxy that has validateForProxyless field set to true. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + at_least_one_of: + - 'default_route_action.0.timeout.0.seconds' + - 'default_route_action.0.timeout.0.nanos' + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + at_least_one_of: + - 'default_route_action.0.timeout.0.seconds' + - 'default_route_action.0.timeout.0.nanos' + validation: + function: 'validation.IntBetween(0, 999999999)' + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'retryConditions' + type: Array + description: | + Specifies one or more conditions when this retry policy applies. + Valid values are listed below. Only the following codes are supported when the URL map is bound to target gRPC proxy that has validateForProxyless field set to true: cancelled, deadline-exceeded, internal, resource-exhausted, unavailable. + - 5xx : retry is attempted if the instance or endpoint responds with any 5xx response code, or if the instance or endpoint does not respond at all. For example, disconnects, reset, read timeout, connection failure, and refused streams. + - gateway-error : Similar to 5xx, but only applies to response codes 502, 503 or 504. + - connect-failure : a retry is attempted on failures connecting to the instance or endpoint. For example, connection timeouts. + - retriable-4xx : a retry is attempted if the instance or endpoint responds with a 4xx response code. The only error that you can retry is error code 409. + - refused-stream : a retry is attempted if the instance or endpoint resets the stream with a REFUSED_STREAM error code. This reset type indicates that it is safe to retry. + - cancelled : a retry is attempted if the gRPC status code in the response header is set to cancelled. + - deadline-exceeded : a retry is attempted if the gRPC status code in the response header is set to deadline-exceeded. + - internal : a retry is attempted if the gRPC status code in the response header is set to internal. + - resource-exhausted : a retry is attempted if the gRPC status code in the response header is set to resource-exhausted. + - unavailable : a retry is attempted if the gRPC status code in the response header is set to unavailable. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.retry_conditions' + - 'default_route_action.0.retry_policy.0.num_retries' + - 'default_route_action.0.retry_policy.0.per_try_timeout' + item_type: + type: String + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.retry_conditions' + - 'default_route_action.0.retry_policy.0.num_retries' + - 'default_route_action.0.retry_policy.0.per_try_timeout' + validation: + function: 'validation.IntAtLeast(1)' + default_value: 1 + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + + If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, + will use the largest timeout among all backend services associated with the route. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.retry_conditions' + - 'default_route_action.0.retry_policy.0.num_retries' + - 'default_route_action.0.retry_policy.0.per_try_timeout' + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + at_least_one_of: + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.seconds' + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.nanos' + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are + represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.seconds' + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.nanos' + validation: + function: 'validation.IntBetween(0, 999999999)' + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. + The load balancer does not wait for responses from the shadow service. Before sending traffic to the shadow service, the host / authority header is suffixed with -shadow. + Not supported when the URL map is bound to a target gRPC proxy that has the validateForProxyless field set to true. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'backendService' + type: ResourceRef + description: | + The full or partial URL to the RegionBackendService resource being mirrored to. + The backend service configured for a mirroring policy must reference backends that are of the same type as the original backend service matched in the URL map. + Serverless NEG backends are not currently supported as a mirrored backend service. + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'RegionBackendService' + imports: 'selfLink' + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see + [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. + An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regualar expression patterns that match allowed origins. For regular expression grammar + please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long results of a preflight request can be cached in seconds. + This translates to the Access-Control-Max-Age header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the actual request can include user credentials. This field translates to the Access-Control-Allow-Credentials header. + Default is false. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + default_value: false + - name: 'disabled' + type: Boolean + description: | + If true, the setting specifies the CORS policy is disabled. The default value of false, which indicates that the CORS policy is in effect. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + default_value: false + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend service, delays can be introduced by a load balancer on a percentage of requests before sending those requests to the backend service. + Similarly requests from clients can be aborted by the load balancer for a percentage of requests. + timeout and retryPolicy is ignored by clients that are configured with a faultInjectionPolicy if: 1. The traffic is generated by fault injection AND 2. The fault injection is not a delay fault injection. + Fault injection is not supported with the global external HTTP(S) load balancer (classic). To see which load balancers support fault injection, see Load balancing: [Routing and traffic management features](https://cloud.google.com/load-balancing/docs/features#routing-traffic-management). + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay' + - 'default_route_action.0.fault_injection_policy.0.abort' + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay' + - 'default_route_action.0.fault_injection_policy.0.delay.0.percentage' + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.seconds' + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.nanos' + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are + represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.seconds' + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.nanos' + validation: + function: 'validation.IntBetween(0, 999999999)' + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay' + - 'default_route_action.0.fault_injection_policy.0.delay.0.percentage' + validation: + function: 'validation.FloatBetween(0, 100)' + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault injection. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay' + - 'default_route_action.0.fault_injection_policy.0.abort' + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. + The value must be between 200 and 599 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.abort.0.http_status' + - 'default_route_action.0.fault_injection_policy.0.abort.0.percentage' + validation: + function: 'validation.IntBetween(200, 599)' + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.abort.0.http_status' + - 'default_route_action.0.fault_injection_policy.0.abort.0.percentage' + validation: + function: 'validation.FloatBetween(0, 100)' diff --git a/mmv1/products/compute/go_Reservation.yaml b/mmv1/products/compute/go_Reservation.yaml new file mode 100644 index 000000000000..397f513e5ade --- /dev/null +++ b/mmv1/products/compute/go_Reservation.yaml @@ -0,0 +1,243 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Reservation' +description: | + Represents a reservation resource. A reservation ensures that capacity is + held in a specific zone even if the reserved VMs are not running. + + Reservations apply only to Compute Engine, Cloud Dataproc, and Google + Kubernetes Engine VM usage.Reservations do not apply to `f1-micro` or + `g1-small` machine types, preemptible VMs, sole tenant nodes, or other + services not listed above + like Cloud SQL and Dataflow. +references: + guides: + 'Reserving zonal resources': 'https://cloud.google.com/compute/docs/instances/reserving-zonal-resources' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/reservations' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/reservations' +has_self_link: true +update_url: 'projects/{{project}}/zones/{{zone}}/reservations/{{name}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + update_encoder: 'templates/terraform/update_encoder/go/reservation.go.tmpl' + pre_update: 'templates/terraform/pre_update/go/shared_reservation_update.go.tmpl' +examples: + - name: 'reservation_basic' + primary_resource_id: 'gce_reservation' + vars: + reservation_name: 'gce-reservation' + - name: 'shared_reservation_basic' + primary_resource_id: 'gce_reservation' + vars: + reservation_name: 'gce-shared-reservation' + test_env_vars: + project: 'PROJECT_NAME' + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'zone' + type: ResourceRef + description: | + The zone where the reservation is made. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: | + Creation timestamp in RFC3339 text format. + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. + immutable: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'commitment' + type: String + description: | + Full or partial URL to a parent commitment. This field displays for + reservations that are tied to a commitment. + output: true + - name: 'specificReservationRequired' + type: Boolean + description: | + When set to true, only VMs that target this reservation by name can + consume this reservation. Otherwise, it can be consumed by VMs with + affinity for any reservation. Defaults to false. + immutable: true + default_value: false + - name: 'status' + type: String + description: | + The status of the reservation. + output: true + - name: 'shareSettings' + type: NestedObject + description: | + The share setting for reservations. + ignore_read: true + default_from_api: true + properties: + - name: 'shareType' + type: Enum + description: | + Type of sharing for this shared-reservation + default_from_api: true + enum_values: + - 'LOCAL' + - 'SPECIFIC_PROJECTS' + - name: 'projectMap' + type: Map + description: | + A map of project number and project config. This is only valid when shareType's value is SPECIFIC_PROJECTS. + key_name: 'id' + key_description: | + The project id/number which is deleting or adding to the project list. + value_type: + type: NestedObject + properties: + - name: 'projectId' + type: String + description: | + The project id/number, should be same as the key of this project config in the project map. + - name: 'specificReservation' + type: NestedObject + description: | + Reservation for instances with specific machine shapes. + required: true + update_url: 'projects/{{project}}/zones/{{zone}}/reservations/{{name}}/resize' + update_verb: 'POST' + properties: + - name: 'count' + type: Integer + description: | + The number of resources that are allocated. + required: true + validation: + function: 'validation.IntAtLeast(1)' + - name: 'inUseCount' + type: Integer + description: | + How many instances are in use. + output: true + - name: 'instanceProperties' + type: NestedObject + description: | + The instance properties for the reservation. + required: true + immutable: true + properties: + - name: 'machineType' + type: String + description: | + The name of the machine type to reserve. + required: true + immutable: true + - name: 'minCpuPlatform' + type: String + description: | + The minimum CPU platform for the reservation. For example, + `"Intel Skylake"`. See + the CPU platform availability reference](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform#availablezones) + for information on available CPU platforms. + immutable: true + default_from_api: true + - name: 'guestAccelerators' + type: Array + description: | + Guest accelerator type and count. + immutable: true + item_type: + type: NestedObject + properties: + - name: 'acceleratorType' + type: String + description: | + The full or partial URL of the accelerator type to + attach to this instance. For example: + `projects/my-project/zones/us-central1-c/acceleratorTypes/nvidia-tesla-p100` + + If you are creating an instance template, specify only the accelerator name. + required: true + immutable: true + - name: 'acceleratorCount' + type: Integer + description: | + The number of the guest accelerator cards exposed to + this instance. + required: true + immutable: true + - name: 'localSsds' + type: Array + description: | + The amount of local ssd to reserve with each instance. This + reserves disks of type `local-ssd`. + immutable: true + item_type: + type: NestedObject + properties: + - name: 'interface' + type: Enum + description: | + The disk interface to use for attaching this disk. + immutable: true + default_value: SCSI + enum_values: + - 'SCSI' + - 'NVME' + - name: 'diskSizeGb' + type: Integer + description: | + The size of the disk in base-2 GB. + required: true + immutable: true diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml new file mode 100644 index 000000000000..1e689c492b05 --- /dev/null +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -0,0 +1,363 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ResourcePolicy' +kind: 'compute#resourcePolicy' +description: | + A policy that can be attached to a resource to specify or schedule actions on that resource. +references: + guides: + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/resourcePolicies' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/resourcePolicies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'resource_policy_basic' + primary_resource_id: 'foo' + vars: + name: 'gce-policy' + - name: 'resource_policy_full' + primary_resource_id: 'bar' + vars: + name: 'gce-policy' + - name: 'resource_policy_placement_policy' + primary_resource_id: 'baz' + vars: + name: 'gce-policy' + - name: 'resource_policy_placement_policy_max_distance' + primary_resource_id: 'baz' + min_version: 'beta' + vars: + name: 'gce-policy' + - name: 'resource_policy_instance_schedule_policy' + primary_resource_id: 'hourly' + vars: + name: 'gce-policy' + - name: 'resource_policy_snapshot_schedule_chain_name' + primary_resource_id: 'hourly' + vars: + name: 'gce-policy' + - name: 'resource_policy_consistency_group' + primary_resource_id: 'cgroup' + vars: + name: 'gce-policy' +parameters: + - name: 'region' + type: ResourceRef + description: Region where resource policy resides. + required: false + immutable: true + ignore_read: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + The name of the resource, provided by the client when initially creating + the resource. The resource name must be 1-63 characters long, and comply + with RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])`? which means the + first character must be a lowercase letter, and all following characters + must be a dash, lowercase letter, or digit, except the last character, + which cannot be a dash. + required: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create the resource. + - name: 'snapshotSchedulePolicy' + type: NestedObject + description: | + Policy for creating snapshots of persistent disks. + conflicts: + - group_placement_policy + - instance_schedule_policy + - disk_consistency_group_policy + properties: + - name: 'schedule' + type: NestedObject + description: | + Contains one of an `hourlySchedule`, `dailySchedule`, or `weeklySchedule`. + required: true + properties: + - name: 'hourlySchedule' + type: NestedObject + description: | + The policy will execute every nth hour starting at the specified time. + exactly_one_of: + - 'snapshot_schedule_policy.0.schedule.0.hourly_schedule' + - 'snapshot_schedule_policy.0.schedule.0.daily_schedule' + - 'snapshot_schedule_policy.0.schedule.0.weekly_schedule' + properties: + - name: 'hoursInCycle' + type: Integer + description: | + The number of hours between snapshots. + required: true + - name: 'startTime' + type: String + description: | + Time within the window to start the operations. + It must be in an hourly format "HH:MM", + where HH : [00-23] and MM : [00] GMT. + required: true + validation: + function: 'verify.ValidateHourlyOnly' + - name: 'dailySchedule' + type: NestedObject + description: | + The policy will execute every nth day at the specified time. + exactly_one_of: + - 'snapshot_schedule_policy.0.schedule.0.hourly_schedule' + - 'snapshot_schedule_policy.0.schedule.0.daily_schedule' + - 'snapshot_schedule_policy.0.schedule.0.weekly_schedule' + properties: + - name: 'daysInCycle' + type: Integer + description: | + Defines a schedule with units measured in days. The value determines how many days pass between the start of each cycle. Days in cycle for snapshot schedule policy must be 1. + required: true + - name: 'startTime' + type: String + description: | + This must be in UTC format that resolves to one of + 00:00, 04:00, 08:00, 12:00, 16:00, or 20:00. For example, + both 13:00-5 and 08:00 are valid. + required: true + validation: + function: 'verify.ValidateHourlyOnly' + - name: 'weeklySchedule' + type: NestedObject + description: | + Allows specifying a snapshot time for each day of the week. + exactly_one_of: + - 'snapshot_schedule_policy.0.schedule.0.hourly_schedule' + - 'snapshot_schedule_policy.0.schedule.0.daily_schedule' + - 'snapshot_schedule_policy.0.schedule.0.weekly_schedule' + properties: + - name: 'dayOfWeeks' + type: Array + description: | + May contain up to seven (one for each day of the week) snapshot times. + is_set: true + required: true + item_type: + type: NestedObject + properties: + - name: 'startTime' + type: String + description: | + Time within the window to start the operations. + It must be in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + required: true + - name: 'day' + type: Enum + description: | + The day of the week to create the snapshot. e.g. MONDAY + required: true + enum_values: + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + min_size: 1 + max_size: 7 + - name: 'retentionPolicy' + type: NestedObject + description: | + Retention policy applied to snapshots created by this resource policy. + properties: + - name: 'maxRetentionDays' + type: Integer + description: | + Maximum age of the snapshot that is allowed to be kept. + required: true + - name: 'onSourceDiskDelete' + type: Enum + description: | + Specifies the behavior to apply to scheduled snapshots when + the source disk is deleted. + default_value: KEEP_AUTO_SNAPSHOTS + enum_values: + - 'KEEP_AUTO_SNAPSHOTS' + - 'APPLY_RETENTION_POLICY' + - name: 'snapshotProperties' + type: NestedObject + description: | + Properties with which the snapshots are created, such as labels. + properties: + - name: 'labels' + type: KeyValuePairs + description: | + A set of key-value pairs. + at_least_one_of: + - 'snapshot_schedule_policy.0.snapshot_properties.0.labels' + - 'snapshot_schedule_policy.0.snapshot_properties.0.storage_locations' + - 'snapshot_schedule_policy.0.snapshot_properties.0.guest_flush' + - name: 'storageLocations' + type: Array + description: | + Cloud Storage bucket location to store the auto snapshot + (regional or multi-regional) + is_set: true + at_least_one_of: + - 'snapshot_schedule_policy.0.snapshot_properties.0.labels' + - 'snapshot_schedule_policy.0.snapshot_properties.0.storage_locations' + - 'snapshot_schedule_policy.0.snapshot_properties.0.guest_flush' + item_type: + type: String + max_size: 1 + - name: 'guestFlush' + type: Boolean + description: | + Whether to perform a 'guest aware' snapshot. + send_empty_value: true + at_least_one_of: + - 'snapshot_schedule_policy.0.snapshot_properties.0.labels' + - 'snapshot_schedule_policy.0.snapshot_properties.0.storage_locations' + - 'snapshot_schedule_policy.0.snapshot_properties.0.guest_flush' + - name: 'chainName' + type: String + description: | + Creates the new snapshot in the snapshot chain labeled with the + specified name. The chain name must be 1-63 characters long and comply + with RFC1035. + - name: 'groupPlacementPolicy' + type: NestedObject + description: | + Resource policy for instances used for placement configuration. + conflicts: + - instance_schedule_policy + - snapshot_schedule_policy + - disk_consistency_group_policy + properties: + - name: 'vmCount' + type: Integer + description: | + Number of VMs in this placement group. Google does not recommend that you use this field + unless you use a compact policy and you want your policy to work only if it contains this + exact number of VMs. + - name: 'availabilityDomainCount' + type: Integer + description: | + The number of availability domains instances will be spread across. If two instances are in different + availability domain, they will not be put in the same low latency network + - name: 'collocation' + type: Enum + description: | + Collocation specifies whether to place VMs inside the same availability domain on the same low-latency network. + Specify `COLLOCATED` to enable collocation. Can only be specified with `vm_count`. If compute instances are created + with a COLLOCATED policy, then exactly `vm_count` instances must be created at the same time with the resource policy + attached. + enum_values: + - 'COLLOCATED' + - name: 'maxDistance' + type: Integer + description: | + Specifies the number of max logical switches. + min_version: 'beta' + - name: 'instanceSchedulePolicy' + type: NestedObject + description: | + Resource policy for scheduling instance operations. + conflicts: + - snapshot_schedule_policy + - group_placement_policy + - disk_consistency_group_policy + properties: + - name: 'vmStartSchedule' + type: NestedObject + description: | + Specifies the schedule for starting instances. + at_least_one_of: + - 'instance_schedule_policy.0.vm_start_schedule' + - 'instance_schedule_policy.0.vm_stop_schedule' + properties: + - name: 'schedule' + type: String + description: | + Specifies the frequency for the operation, using the unix-cron format. + required: true + - name: 'vmStopSchedule' + type: NestedObject + description: | + Specifies the schedule for stopping instances. + at_least_one_of: + - 'instance_schedule_policy.0.vm_start_schedule' + - 'instance_schedule_policy.0.vm_stop_schedule' + properties: + - name: 'schedule' + type: String + description: | + Specifies the frequency for the operation, using the unix-cron format. + required: true + - name: 'timeZone' + type: String + description: | + Specifies the time zone to be used in interpreting the schedule. The value of this field must be a time zone name + from the tz database: http://en.wikipedia.org/wiki/Tz_database. + required: true + - name: 'startTime' + type: String + description: | + The start time of the schedule. The timestamp is an RFC3339 string. + - name: 'expirationTime' + type: String + description: | + The expiration time of the schedule. The timestamp is an RFC3339 string. + - name: 'diskConsistencyGroupPolicy' + type: NestedObject + description: | + Replication consistency group for asynchronous disk replication. + send_empty_value: true + conflicts: + - snapshot_schedule_policy + - group_placement_policy + - instance_schedule_policy + custom_flatten: 'templates/terraform/custom_flatten/go/disk_consistency_group_policy.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/disk_consistency_group_policy.tmpl' + properties: + - name: 'enabled' + type: Boolean + description: | + Enable disk consistency on the resource policy. + required: true + immutable: true diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml new file mode 100644 index 000000000000..f3f3ecfb2435 --- /dev/null +++ b/mmv1/products/compute/go_Route.yaml @@ -0,0 +1,256 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Route' +kind: 'compute#route' +description: | + Represents a Route resource. + + A route is a rule that specifies how certain packets should be handled by + the virtual network. Routes are associated with virtual machines by tag, + and the set of routes for a particular virtual machine is called its + routing table. For each packet leaving a virtual machine, the system + searches that virtual machine's routing table for a single best matching + route. + + Routes match packets by destination IP address, preferring smaller or more + specific ranges over larger ones. If there is a tie, the system selects + the route with the smallest priority value. If there is still a tie, it + uses the layer three and four packet headers to select just one of the + remaining matching routes. The packet is then forwarded as specified by + the next_hop field of the winning route -- either to another virtual + machine destination, a virtual machine gateway or a Compute + Engine-operated gateway. Packets that do not match any route in the + sending virtual machine's routing table will be dropped. + + A Route resource must have exactly one specification of either + nextHopGateway, nextHopInstance, nextHopIp, nextHopVpnTunnel, or + nextHopIlb. + +references: + guides: + 'Using Routes': 'https://cloud.google.com/vpc/docs/using-routes' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/routes' +docs: + optional_properties: '* `next_hop_instance_zone` - (Optional when `next_hop_instance` is + specified) The zone of the instance specified in + `next_hop_instance`. Omit if `next_hop_instance` is specified as + a URL. +' +base_url: 'projects/{{project}}/global/routes' +has_self_link: true +immutable: true +mutex: projects/{{project}}/global/networks/{{network}}/peerings +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/route.tmpl' + decoder: 'templates/terraform/decoders/go/route.tmpl' +error_retry_predicates: + + - 'transport_tpg.IsPeeringOperationInProgress' +examples: + - name: 'route_basic' + primary_resource_id: 'default' + vars: + route_name: 'network-route' + network_name: 'compute-network' + - name: 'route_ilb' + primary_resource_id: 'route-ilb' + vars: + network_name: 'compute-network' + subnet_name: 'compute-subnet' + forwarding_rule_name: 'compute-forwarding-rule' + health_check_name: 'proxy-health-check' + backend_name: 'compute-backend' + route_name: 'route-ilb' + - name: 'route_ilb_vip' + primary_resource_id: 'route-ilb' + min_version: 'beta' + vars: + producer_name: 'producer' + consumer_name: 'consumer' + forwarding_rule_name: 'compute-forwarding-rule' + health_check_name: 'proxy-health-check' + backend_name: 'compute-backend' + route_name: 'route-ilb' +parameters: +properties: + - name: 'destRange' + type: String + description: | + The destination range of outgoing packets that this route applies to. + Only IPv4 is supported. + required: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property + when you create the resource. + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the + last character, which cannot be a dash. + required: true + validation: + regex: '^[a-z]([-a-z0-9]*[a-z0-9])?$' + - name: 'network' + type: ResourceRef + description: 'The network that this route applies to.' + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'priority' + type: Integer + description: | + The priority of this route. Priority is used to break ties in cases + where there is more than one matching route of equal prefix length. + + In the case of two routes with equal prefix length, the one with the + lowest-numbered priority value wins. + + Default value is 1000. Valid range is 0 through 65535. + immutable: true + send_empty_value: true + default_value: 1000 + - name: 'tags' + type: Array + description: 'A list of instance tags to which this route applies.' + is_set: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/set_to_list.tmpl' + item_type: + type: String + - name: 'nextHopGateway' + type: String + description: | + URL to a gateway that should handle matching packets. + Currently, you can only specify the internet gateway, using a full or + partial valid URL: + * `https://www.googleapis.com/compute/v1/projects/project/global/gateways/default-internet-gateway` + * `projects/project/global/gateways/default-internet-gateway` + * `global/gateways/default-internet-gateway` + * The string `default-internet-gateway`. + immutable: true + exactly_one_of: + - 'next_hop_gateway' + - 'next_hop_instance' + - 'next_hop_ip' + - 'next_hop_vpn_tunnel' + - 'next_hop_ilb' + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/route_gateway.tmpl' + - name: 'nextHopInstance' + type: ResourceRef + description: | + URL to an instance that should handle matching packets. + You can specify this as a full or partial URL. For example: + * `https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/instance` + * `projects/project/zones/zone/instances/instance` + * `zones/zone/instances/instance` + * Just the instance name, with the zone in `next_hop_instance_zone`. + immutable: true + exactly_one_of: + - 'next_hop_gateway' + - 'next_hop_instance' + - 'next_hop_ip' + - 'next_hop_vpn_tunnel' + - 'next_hop_ilb' + custom_expand: 'templates/terraform/custom_expand/go/route_instance.tmpl' + resource: 'Instance' + imports: 'selfLink' + - name: 'nextHopIp' + type: String + description: | + Network IP address of an instance that should handle matching packets. + immutable: true + default_from_api: true + exactly_one_of: + - 'next_hop_gateway' + - 'next_hop_instance' + - 'next_hop_ip' + - 'next_hop_vpn_tunnel' + - 'next_hop_ilb' + - name: 'nextHopVpnTunnel' + type: ResourceRef + description: | + URL to a VpnTunnel that should handle matching packets. + immutable: true + exactly_one_of: + - 'next_hop_gateway' + - 'next_hop_instance' + - 'next_hop_ip' + - 'next_hop_vpn_tunnel' + - 'next_hop_ilb' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'VpnTunnel' + imports: 'selfLink' + - name: 'nextHopNetwork' + type: String + description: | + URL to a Network that should handle matching packets. + output: true + - name: 'nextHopIlb' + type: String + description: | + The IP address or URL to a forwarding rule of type + loadBalancingScheme=INTERNAL that should handle matching + packets. + + With the GA provider you can only specify the forwarding + rule as a partial or full URL. For example, the following + are all valid values: + * 10.128.0.56 + * https://www.googleapis.com/compute/v1/projects/project/regions/region/forwardingRules/forwardingRule + * regions/region/forwardingRules/forwardingRule + + When the beta provider, you can also specify the IP address + of a forwarding rule from the same VPC or any peered VPC. + + Note that this can only be used when the destinationRange is + a public (non-RFC 1918) IP CIDR range. + immutable: true + exactly_one_of: + - 'next_hop_gateway' + - 'next_hop_instance' + - 'next_hop_ip' + - 'next_hop_vpn_tunnel' + - 'next_hop_ilb' + diff_suppress_func: 'tpgresource.CompareIpAddressOrSelfLinkOrResourceName' diff --git a/mmv1/products/compute/go_Router.yaml b/mmv1/products/compute/go_Router.yaml new file mode 100644 index 000000000000..b41f54f27de3 --- /dev/null +++ b/mmv1/products/compute/go_Router.yaml @@ -0,0 +1,199 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Router' +kind: 'compute#router' +description: | + Represents a Router resource. +references: + guides: + 'Google Cloud Router': 'https://cloud.google.com/router/docs/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/routers' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/routers' +has_self_link: true +update_verb: 'PATCH' +mutex: router/{{region}}/{{name}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/router.go.tmpl' +custom_diff: + - 'resourceComputeRouterCustomDiff' +examples: + - name: 'router_basic' + primary_resource_id: 'foobar' + vars: + router_name: 'my-router' + network_name: 'my-network' + ignore_read_extra: + - 'advertisedIpRanges' + - name: 'compute_router_encrypted_interconnect' + primary_resource_id: 'encrypted-interconnect-router' + vars: + router_name: 'test-router' + network_name: 'test-network' + ignore_read_extra: + - 'advertisedIpRanges' +parameters: + - name: 'region' + type: ResourceRef + description: Region where the router resides. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + immutable: true + validation: + function: 'verify.ValidateGCEName' + - name: 'description' + type: String + description: | + An optional description of this resource. + send_empty_value: true + - name: 'network' + type: ResourceRef + description: | + A reference to the network to which this router belongs. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'bgp' + type: NestedObject + description: | + BGP information specific to this router. + send_empty_value: true + properties: + - name: 'asn' + type: Integer + description: | + Local BGP Autonomous System Number (ASN). Must be an RFC6996 + private ASN, either 16-bit or 32-bit. The value will be fixed for + this router resource. All VPN tunnels that link to this router + will have the same local ASN. + required: true + validation: + function: 'verify.ValidateRFC6996Asn' + - name: 'advertiseMode' + type: Enum + description: | + User-specified flag to indicate which mode to use for advertisement. + default_value: DEFAULT + enum_values: + - 'DEFAULT' + - 'CUSTOM' + - name: 'advertisedGroups' + type: Array + description: | + User-specified list of prefix groups to advertise in custom mode. + This field can only be populated if advertiseMode is CUSTOM and + is advertised to all peers of the router. These groups will be + advertised in addition to any specified prefixes. Leave this field + blank to advertise no custom groups. + + This enum field has the one valid value: ALL_SUBNETS + send_empty_value: true + item_type: + type: String + - name: 'advertisedIpRanges' + type: Array + description: | + User-specified list of individual IP ranges to advertise in + custom mode. This field can only be populated if advertiseMode + is CUSTOM and is advertised to all peers of the router. These IP + ranges will be advertised in addition to any specified groups. + Leave this field blank to advertise no custom IP ranges. + send_empty_value: true + custom_flatten: 'templates/terraform/custom_flatten/go/compute_router_range.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'range' + type: String + description: | + The IP range to advertise. The value must be a + CIDR-formatted string. + required: true + send_empty_value: true + - name: 'description' + type: String + description: | + User-specified description for the IP range. + send_empty_value: true + - name: 'keepaliveInterval' + type: Integer + description: | + The interval in seconds between BGP keepalive messages that are sent + to the peer. Hold time is three times the interval at which keepalive + messages are sent, and the hold time is the maximum number of seconds + allowed to elapse between successive keepalive messages that BGP + receives from a peer. + + BGP will use the smaller of either the local hold time value or the + peer's hold time value as the hold time for the BGP connection + between the two peers. If set, this value must be between 20 and 60. + The default is 20. + default_value: 20 + - name: 'identifierRange' + type: String + description: | + Explicitly specifies a range of valid BGP Identifiers for this Router. + It is provided as a link-local IPv4 range (from 169.254.0.0/16), of + size at least /30, even if the BGP sessions are over IPv6. It must + not overlap with any IPv4 BGP session ranges. Other vendors commonly + call this router ID. + default_from_api: true + - name: 'encryptedInterconnectRouter' + type: Boolean + description: | + Indicates if a router is dedicated for use with encrypted VLAN + attachments (interconnectAttachments). + immutable: true diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml new file mode 100644 index 000000000000..4ba216388bec --- /dev/null +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -0,0 +1,464 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'RouterNat' +description: | + A NAT service created in a router. +references: + guides: + 'Google Cloud Router': 'https://cloud.google.com/router/docs/' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/routers' +docs: +id_format: '{{project}}/{{region}}/{{router}}/{{name}}' +base_url: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' +self_link: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' +create_url: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' +create_verb: 'PATCH' +update_url: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' +update_verb: 'PATCH' +delete_url: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' +delete_verb: 'PATCH' +mutex: router/{{region}}/{{router}} +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'nats' +identity: + - name +nested_query: + keys: + - nats + is_list_of_ids: false + modify_by_patch: true +custom_code: + constants: 'templates/terraform/constants/go/router_nat.go.tmpl' + pre_create: 'templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl' + pre_update: 'templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl' +custom_diff: + - 'resourceComputeRouterNatDrainNatIpsCustomDiff' +exclude_tgc: true +examples: + - name: 'router_nat_basic' + primary_resource_id: 'nat' + vars: + router_name: 'my-router' + nat_name: 'my-router-nat' + network_name: 'my-network' + subnet_name: 'my-subnetwork' + skip_test: true + - name: 'router_nat_manual_ips' + primary_resource_id: 'nat_manual' + vars: + router_name: 'my-router' + nat_name: 'my-router-nat' + network_name: 'my-network' + subnet_name: 'my-subnetwork' + address_name: 'nat-manual-ip' + skip_test: true + - name: 'router_nat_rules' + primary_resource_id: 'nat_rules' + vars: + router_name: 'my-router' + nat_name: 'my-router-nat' + network_name: 'my-network' + subnet_name: 'my-subnetwork' + address_name1: 'nat-address1' + address_name2: 'nat-address2' + address_name3: 'nat-address3' + skip_test: true + - name: 'router_nat_private' + primary_resource_id: 'nat_type' + min_version: 'beta' + vars: + router_name: 'my-router' + nat_name: 'my-router-nat' + network_name: 'my-network' + subnet_name: 'my-subnetwork' + hub_name: 'my-hub' + spoke_name: 'my-spoke' + skip_test: true +parameters: + - name: 'router' + type: ResourceRef + description: | + The name of the Cloud Router in which this NAT will be configured. + url_param_only: true + required: true + immutable: true + resource: 'Router' + imports: 'name' + - name: 'region' + type: ResourceRef + description: Region where the router and NAT reside. + url_param_only: true + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the NAT service. The name must be 1-63 characters long and + comply with RFC1035. + required: true + immutable: true + validation: + function: 'verify.ValidateRFC1035Name(2, 63)' + - name: 'natIpAllocateOption' + type: Enum + description: | + How external IPs should be allocated for this NAT. Valid values are + `AUTO_ONLY` for only allowing NAT IPs allocated by Google Cloud + Platform, or `MANUAL_ONLY` for only user-allocated NAT IP addresses. + required: false + enum_values: + - 'MANUAL_ONLY' + - 'AUTO_ONLY' + - name: 'natIps' + type: Array + description: | + Self-links of NAT IPs. Only valid if natIpAllocateOption + is set to MANUAL_ONLY. + is_set: true + send_empty_value: true + set_hash_func: 'computeRouterNatIPsHash' + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'address' + type: ResourceRef + description: 'A reference to an address associated with this NAT' + resource: 'Address' + imports: 'selfLink' + - name: 'drainNatIps' + type: Array + description: | + A list of URLs of the IP resources to be drained. These IPs must be + valid static external IPs that have been assigned to the NAT. + is_set: true + send_empty_value: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'address' + type: ResourceRef + description: 'A reference to an address associated with this NAT' + resource: 'Address' + imports: 'selfLink' + - name: 'sourceSubnetworkIpRangesToNat' + type: Enum + description: | + How NAT should be configured per Subnetwork. + If `ALL_SUBNETWORKS_ALL_IP_RANGES`, all of the + IP ranges in every Subnetwork are allowed to Nat. + If `ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES`, all of the primary IP + ranges in every Subnetwork are allowed to Nat. + `LIST_OF_SUBNETWORKS`: A list of Subnetworks are allowed to Nat + (specified in the field subnetwork below). Note that if this field + contains ALL_SUBNETWORKS_ALL_IP_RANGES or + ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any + other RouterNat section in any Router for this network in this region. + required: true + enum_values: + - 'ALL_SUBNETWORKS_ALL_IP_RANGES' + - 'ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES' + - 'LIST_OF_SUBNETWORKS' + - name: 'subnetwork' + type: Array + description: | + One or more subnetwork NAT configurations. Only used if + `source_subnetwork_ip_ranges_to_nat` is set to `LIST_OF_SUBNETWORKS` + api_name: subnetworks + is_set: true + send_empty_value: true + set_hash_func: 'computeRouterNatSubnetworkHash' + item_type: + type: NestedObject + properties: + - name: 'name' + type: ResourceRef + description: 'Self-link of subnetwork to NAT' + required: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'sourceIpRangesToNat' + type: Array + description: | + List of options for which source IPs in the subnetwork + should have NAT enabled. Supported values include: + `ALL_IP_RANGES`, `LIST_OF_SECONDARY_IP_RANGES`, + `PRIMARY_IP_RANGE`. + is_set: true + required: true + item_type: + type: String + min_size: 1 + - name: 'secondaryIpRangeNames' + type: Array + description: | + List of the secondary ranges of the subnetwork that are allowed + to use NAT. This can be populated only if + `LIST_OF_SECONDARY_IP_RANGES` is one of the values in + sourceIpRangesToNat + is_set: true + item_type: + type: String + - name: 'minPortsPerVm' + type: Integer + description: | + Minimum number of ports allocated to a VM from this NAT. Defaults to 64 for static port allocation and 32 dynamic port allocation if not set. + default_from_api: true + - name: 'maxPortsPerVm' + type: Integer + description: | + Maximum number of ports allocated to a VM from this NAT. + This field can only be set when enableDynamicPortAllocation is enabled. + - name: 'enableDynamicPortAllocation' + type: Boolean + description: | + Enable Dynamic Port Allocation. + If minPortsPerVm is set, minPortsPerVm must be set to a power of two greater than or equal to 32. + If minPortsPerVm is not set, a minimum of 32 ports will be allocated to a VM from this NAT config. + If maxPortsPerVm is set, maxPortsPerVm must be set to a power of two greater than minPortsPerVm. + If maxPortsPerVm is not set, a maximum of 65536 ports will be allocated to a VM from this NAT config. + + Mutually exclusive with enableEndpointIndependentMapping. + default_from_api: true + send_empty_value: true + - name: 'udpIdleTimeoutSec' + type: Integer + description: | + Timeout (in seconds) for UDP connections. Defaults to 30s if not set. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: 30 + - name: 'icmpIdleTimeoutSec' + type: Integer + description: | + Timeout (in seconds) for ICMP connections. Defaults to 30s if not set. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: 30 + - name: 'tcpEstablishedIdleTimeoutSec' + type: Integer + description: | + Timeout (in seconds) for TCP established connections. + Defaults to 1200s if not set. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: 1200 + - name: 'tcpTransitoryIdleTimeoutSec' + type: Integer + description: | + Timeout (in seconds) for TCP transitory connections. + Defaults to 30s if not set. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: 30 + - name: 'tcpTimeWaitTimeoutSec' + type: Integer + description: | + Timeout (in seconds) for TCP connections that are in TIME_WAIT state. + Defaults to 120s if not set. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: 120 + - name: 'logConfig' + type: NestedObject + description: | + Configuration for logging on NAT + send_empty_value: true + properties: + - name: 'enable' + type: Boolean + description: | + Indicates whether or not to export logs. + required: true + - name: 'filter' + type: Enum + description: | + Specifies the desired filtering of logs on this NAT. + required: true + enum_values: + - 'ERRORS_ONLY' + - 'TRANSLATIONS_ONLY' + - 'ALL' + - name: 'endpointTypes' + type: Array + description: | + Specifies the endpoint Types supported by the NAT Gateway. + Supported values include: + `ENDPOINT_TYPE_VM`, `ENDPOINT_TYPE_SWG`, + `ENDPOINT_TYPE_MANAGED_PROXY_LB`. + immutable: true + default_from_api: true + item_type: + type: String + min_size: 1 + - name: 'rules' + type: Array + description: 'A list of rules associated with this NAT.' + is_set: true + send_empty_value: true + set_hash_func: 'computeRouterNatRulesHash' + item_type: + type: NestedObject + properties: + - name: 'ruleNumber' + type: Integer + description: | + An integer uniquely identifying a rule in the list. + The rule number must be a positive value between 0 and 65000, and must be unique among rules within a NAT. + required: true + send_empty_value: true + validation: + function: 'validation.IntBetween(0, 65000)' + - name: 'description' + type: String + description: 'An optional description of this rule.' + - name: 'match' + type: String + description: | + CEL expression that specifies the match condition that egress traffic from a VM is evaluated against. + If it evaluates to true, the corresponding action is enforced. + + The following examples are valid match expressions for public NAT: + + "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" + + "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" + + The following example is a valid match expression for private NAT: + + "nexthop.hub == 'https://networkconnectivity.googleapis.com/v1alpha1/projects/my-project/global/hub/hub-1'" + required: true + - name: 'action' + type: NestedObject + description: + 'The action to be enforced for traffic that matches this rule.' + default_from_api: true + properties: + - name: 'sourceNatActiveIps' + type: Array + description: | + A list of URLs of the IP resources used for this NAT rule. + These IP addresses must be valid static external IP addresses assigned to the project. + This field is used for public NAT. + is_set: true + set_hash_func: 'computeRouterNatIPsHash' + custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_ip_set.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'address' + type: ResourceRef + description: + 'A reference to an address associated with this NAT' + resource: 'Address' + imports: 'selfLink' + - name: 'sourceNatDrainIps' + type: Array + description: | + A list of URLs of the IP resources to be drained. + These IPs must be valid static external IPs that have been assigned to the NAT. + These IPs should be used for updating/patching a NAT rule only. + This field is used for public NAT. + is_set: true + set_hash_func: 'computeRouterNatIPsHash' + custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_ip_set.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'address' + type: ResourceRef + description: + 'A reference to an address associated with this NAT' + resource: 'Address' + imports: 'selfLink' + - name: 'sourceNatActiveRanges' + type: Array + description: | + A list of URLs of the subnetworks used as source ranges for this NAT Rule. + These subnetworks must have purpose set to PRIVATE_NAT. + This field is used for private NAT. + is_set: true + min_version: 'beta' + set_hash_func: 'computeRouterNatRulesSubnetHash' + custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_subnets_set.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'subnet' + type: ResourceRef + description: + 'A reference to a subnetwork address associated with this NAT' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'sourceNatDrainRanges' + type: Array + description: | + A list of URLs of subnetworks representing source ranges to be drained. + This is only supported on patch/update, and these subnetworks must have previously been used as active ranges in this NAT Rule. + This field is used for private NAT. + is_set: true + min_version: 'beta' + set_hash_func: 'computeRouterNatRulesSubnetHash' + custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_subnets_set.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'subnet' + type: ResourceRef + description: + 'A reference to a subnetwork address associated with this NAT' + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'enableEndpointIndependentMapping' + type: Boolean + description: | + Enable endpoint independent mapping. + For more information see the [official documentation](https://cloud.google.com/nat/docs/overview#specs-rfcs). + default_from_api: true + send_empty_value: true + - name: 'type' + type: Enum + description: | + Indicates whether this NAT is used for public or private IP translation. + If unspecified, it defaults to PUBLIC. + If `PUBLIC` NAT used for public IP translation. + If `PRIVATE` NAT used for private IP translation. + min_version: 'beta' + immutable: true + default_value: PUBLIC + enum_values: + - 'PUBLIC' + - 'PRIVATE' + - name: 'autoNetworkTier' + type: Enum + description: | + The network tier to use when automatically reserving NAT IP addresses. + Must be one of: PREMIUM, STANDARD. If not specified, then the current + project-level default tier is used. + default_from_api: true + enum_values: + - 'PREMIUM' + - 'STANDARD' diff --git a/mmv1/products/compute/go_SecurityPolicyRule.yaml b/mmv1/products/compute/go_SecurityPolicyRule.yaml new file mode 100644 index 000000000000..dac5567f837d --- /dev/null +++ b/mmv1/products/compute/go_SecurityPolicyRule.yaml @@ -0,0 +1,447 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecurityPolicyRule' +description: | + A rule for the SecurityPolicy. +references: + guides: + 'Creating global security policy rules': 'https://cloud.google.com/armor/docs/configure-security-policies' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/securityPolicies/addRule' +docs: +id_format: 'projects/{{project}}/global/securityPolicies/{{security_policy}}/priority/{{priority}}' +base_url: 'projects/{{project}}/global/securityPolicies/{{security_policy}}' +self_link: 'projects/{{project}}/global/securityPolicies/{{security_policy}}/getRule?priority={{priority}}' +create_url: 'projects/{{project}}/global/securityPolicies/{{security_policy}}/addRule?priority={{priority}}' +update_url: 'projects/{{project}}/global/securityPolicies/{{security_policy}}/patchRule?priority={{priority}}' +update_verb: 'POST' +update_mask: true +delete_url: 'projects/{{project}}/global/securityPolicies/{{security_policy}}/removeRule?priority={{priority}}' +delete_verb: 'POST' +import_format: + - 'projects/{{project}}/global/securityPolicies/{{security_policy}}/priority/{{priority}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: +examples: + - name: 'security_policy_rule_basic' + primary_resource_id: 'policy_rule' + vars: + sec_policy_name: 'policyruletest' + - name: 'security_policy_rule_default_rule' + primary_resource_id: 'policy_rule' + vars: + sec_policy_name: 'policyruletest' + project_id: 'PROJECT_NAME' + skip_test: true + - name: 'security_policy_rule_multiple_rules' + primary_resource_id: 'policy_rule_one' + vars: + sec_policy_name: 'policywithmultiplerules' +parameters: + - name: 'security_policy' + type: String + description: | + The name of the security policy this rule belongs to. + url_param_only: true + required: true + immutable: true +properties: + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create the resource. + - name: 'priority' + type: Integer + description: | + An integer indicating the priority of a rule in the list. + The priority must be a positive value between 0 and 2147483647. + Rules are evaluated from highest to lowest priority where 0 is the highest priority and 2147483647 is the lowest priority. + required: true + immutable: true + - name: 'match' + type: NestedObject + description: | + A match condition that incoming traffic is evaluated against. + If it evaluates to true, the corresponding 'action' is enforced. + properties: + - name: 'versionedExpr' + type: Enum + description: | + Preconfigured versioned expression. If this field is specified, config must also be specified. + Available preconfigured expressions along with their requirements are: SRC_IPS_V1 - must specify the corresponding srcIpRange field in config. + enum_values: + - 'SRC_IPS_V1' + - name: 'expr' + type: NestedObject + description: | + User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header. + properties: + - name: 'expression' + type: String + description: | + Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. + # >> These fields are not yet supported, following the global security policy resource. + required: true + - name: 'exprOptions' + type: NestedObject + description: | + The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr'). + properties: + - name: 'recaptchaOptions' + type: NestedObject + description: | + reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect. + required: true + properties: + - name: 'actionTokenSiteKeys' + type: Array + description: | + A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + item_type: + type: String + - name: 'sessionTokenSiteKeys' + type: Array + description: | + A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + item_type: + type: String + - name: 'config' + type: NestedObject + description: | + The configuration options available when specifying versionedExpr. + This field must be specified if versionedExpr is specified and cannot be specified if versionedExpr is not specified. + properties: + - name: 'srcIpRanges' + type: Array + description: | + CIDR IP address range. Maximum number of srcIpRanges allowed is 10. + item_type: + type: String + - name: 'preconfiguredWafConfig' + type: NestedObject + description: | + Preconfigured WAF configuration to be applied for the rule. + If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect. + properties: + - name: 'exclusion' + type: Array + description: | + An exclusion to apply during preconfigured WAF evaluation. + api_name: exclusions + item_type: + type: NestedObject + properties: + - name: 'requestHeader' + type: Array + description: | + Request header whose value will be excluded from inspection during preconfigured WAF evaluation. + api_name: requestHeadersToExclude + item_type: + type: NestedObject + properties: + - name: 'operator' + type: String + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + required: true + validation: + function: 'validation.StringInSlice([]string{"EQUALS", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "EQUALS_ANY"}, false)' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + - name: 'requestCookie' + type: Array + description: | + Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation. + api_name: requestCookiesToExclude + item_type: + type: NestedObject + properties: + - name: 'operator' + type: String + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + required: true + validation: + function: 'validation.StringInSlice([]string{"EQUALS", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "EQUALS_ANY"}, false)' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + - name: 'requestUri' + type: Array + description: | + Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. + When specifying this field, the query or fragment part should be excluded. + api_name: requestUrisToExclude + item_type: + type: NestedObject + properties: + - name: 'operator' + type: String + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + required: true + validation: + function: 'validation.StringInSlice([]string{"EQUALS", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "EQUALS_ANY"}, false)' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + - name: 'requestQueryParam' + type: Array + description: | + Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. + Note that the parameter can be in the query string or in the POST body. + api_name: requestQueryParamsToExclude + item_type: + type: NestedObject + properties: + - name: 'operator' + type: String + description: | + You can specify an exact match or a partial match by using a field operator and a field value. + Available options: + EQUALS: The operator matches if the field value equals the specified value. + STARTS_WITH: The operator matches if the field value starts with the specified value. + ENDS_WITH: The operator matches if the field value ends with the specified value. + CONTAINS: The operator matches if the field value contains the specified value. + EQUALS_ANY: The operator matches if the field value is any value. + api_name: op + required: true + validation: + function: 'validation.StringInSlice([]string{"EQUALS", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "EQUALS_ANY"}, false)' + - name: 'value' + type: String + description: | + A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. + The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY. + api_name: val + - name: 'targetRuleSet' + type: String + description: | + Target WAF rule set to apply the preconfigured WAF exclusion. + required: true + - name: 'targetRuleIds' + type: Array + description: | + A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. + If omitted, it refers to all the rule IDs under the WAF rule set. + item_type: + type: String + - name: 'action' + type: String + description: | + The Action to perform when the rule is matched. The following are the valid actions: + + * allow: allow access to target. + + * deny(STATUS): deny access to target, returns the HTTP response code specified. Valid values for STATUS are 403, 404, and 502. + + * rate_based_ban: limit client traffic to the configured threshold and ban the client if the traffic exceeds the threshold. Configure parameters for this action in RateLimitOptions. Requires rateLimitOptions to be set. + + * redirect: redirect to a different target. This can either be an internal reCAPTCHA redirect, or an external URL-based redirect via a 302 response. Parameters for this action can be configured via redirectOptions. This action is only supported in Global Security Policies of type CLOUD_ARMOR. + + * throttle: limit client traffic to the configured threshold. Configure parameters for this action in rateLimitOptions. Requires rateLimitOptions to be set for this. + required: true + - name: 'rateLimitOptions' + type: NestedObject + description: | + Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions. + update_mask_fields: + - 'rateLimitOptions.rateLimitThreshold' + - 'rateLimitOptions.conformAction' + - 'rateLimitOptions.exceedRedirectOptions' + - 'rateLimitOptions.exceedAction' + - 'rateLimitOptions.enforceOnKey' + - 'rateLimitOptions.enforceOnKeyName' + - 'rateLimitOptions.enforceOnKeyConfigs' + - 'rateLimitOptions.banThreshold' + - 'rateLimitOptions.banDurationSec' + properties: + - name: 'rateLimitThreshold' + type: NestedObject + description: | + Threshold at which to begin ratelimiting. + properties: + - name: 'count' + type: Integer + description: | + Number of HTTP(S) requests for calculating the threshold. + - name: 'intervalSec' + type: Integer + description: | + Interval over which the threshold is computed. + - name: 'conformAction' + type: String + description: | + Action to take for requests that are under the configured rate limit threshold. + Valid option is "allow" only. + - name: 'exceedRedirectOptions' + type: NestedObject + description: | + Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. This field is only supported in Global Security Policies of type CLOUD_ARMOR. + properties: + - name: 'type' + type: String + description: | + Type of the redirect action. + - name: 'target' + type: String + description: | + Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA. + - name: 'exceedAction' + type: String + description: | + Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. + Valid options are deny(STATUS), where valid values for STATUS are 403, 404, 429, and 502. + - name: 'enforceOnKey' + type: Enum + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: + * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKey" is not configured. + * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. + * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. + * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. + * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. + * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. + * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. + * REGION_CODE: The country/region from which the request originates. + * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. + * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. + enum_values: + - 'ALL' + - 'IP' + - 'HTTP_HEADER' + - 'XFF_IP' + - 'HTTP_COOKIE' + - 'HTTP_PATH' + - 'SNI' + - 'REGION_CODE' + - 'TLS_JA3_FINGERPRINT' + - 'USER_IP' + - name: 'enforceOnKeyName' + type: String + description: | + Rate limit key name applicable only for the following key types: + HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. + HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - name: 'enforceOnKeyConfigs' + type: Array + description: | + If specified, any combination of values of enforceOnKeyType/enforceOnKeyName is treated as the key on which ratelimit threshold/action is enforced. + You can specify up to 3 enforceOnKeyConfigs. + If enforceOnKeyConfigs is specified, enforceOnKey must not be specified. + item_type: + type: NestedObject + properties: + - name: 'enforceOnKeyType' + type: Enum + description: | + Determines the key to enforce the rateLimitThreshold on. Possible values are: + * ALL: A single rate limit threshold is applied to all the requests matching this rule. This is the default value if "enforceOnKeyConfigs" is not configured. + * IP: The source IP address of the request is the key. Each IP has this limit enforced separately. + * HTTP_HEADER: The value of the HTTP header whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the header value. If no such header is present in the request, the key type defaults to ALL. + * XFF_IP: The first IP address (i.e. the originating client IP address) specified in the list of IPs under X-Forwarded-For HTTP header. If no such header is present or the value is not a valid IP, the key defaults to the source IP address of the request i.e. key type IP. + * HTTP_COOKIE: The value of the HTTP cookie whose name is configured under "enforceOnKeyName". The key value is truncated to the first 128 bytes of the cookie value. If no such cookie is present in the request, the key type defaults to ALL. + * HTTP_PATH: The URL path of the HTTP request. The key value is truncated to the first 128 bytes. + * SNI: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to ALL on a HTTP session. + * REGION_CODE: The country/region from which the request originates. + * TLS_JA3_FINGERPRINT: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. + * USER_IP: The IP address of the originating client, which is resolved based on "userIpRequestHeaders" configured with the security policy. If there is no "userIpRequestHeaders" configuration or an IP address cannot be resolved from it, the key type defaults to IP. + enum_values: + - 'ALL' + - 'IP' + - 'HTTP_HEADER' + - 'XFF_IP' + - 'HTTP_COOKIE' + - 'HTTP_PATH' + - 'SNI' + - 'REGION_CODE' + - 'TLS_JA3_FINGERPRINT' + - 'USER_IP' + - name: 'enforceOnKeyName' + type: String + description: | + Rate limit key name applicable only for the following key types: + HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. + HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value. + - name: 'banThreshold' + type: NestedObject + description: | + Can only be specified if the action for the rule is "rate_based_ban". + If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'. + properties: + - name: 'count' + type: Integer + description: | + Number of HTTP(S) requests for calculating the threshold. + - name: 'intervalSec' + type: Integer + description: | + Interval over which the threshold is computed. + - name: 'banDurationSec' + type: Integer + description: | + Can only be specified if the action for the rule is "rate_based_ban". + If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold. + - name: 'preview' + type: Boolean + description: | + If set to true, the specified action is not enforced. diff --git a/mmv1/products/compute/go_ServiceAttachment.yaml b/mmv1/products/compute/go_ServiceAttachment.yaml new file mode 100644 index 000000000000..39e46277bc81 --- /dev/null +++ b/mmv1/products/compute/go_ServiceAttachment.yaml @@ -0,0 +1,243 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServiceAttachment' +kind: 'compute#ServiceAttachment' +description: | + Represents a ServiceAttachment resource. +references: + guides: + 'Configuring Private Service Connect to access services': 'https://cloud.google.com/vpc/docs/configure-private-service-connect-services' + api: 'https://cloud.google.com/compute/docs/reference/beta/serviceAttachments' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/serviceAttachments' +has_self_link: true +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + constants: 'templates/terraform/constants/go/compute_service_attachment.go.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl' +examples: + - name: 'service_attachment_basic' + primary_resource_id: 'psc_ilb_service_attachment' + vars: + service_attachment_name: 'my-psc-ilb' + network_name: 'psc-ilb-network' + nat_subnetwork_name: 'psc-ilb-nat' + producer_subnetwork_name: 'psc-ilb-producer-subnetwork' + producer_health_check_name: 'producer-service-health-check' + producer_service_name: 'producer-service' + producer_forwarding_rule_name: 'producer-forwarding-rule' + consumer_address_name: 'psc-ilb-consumer-address' + consumer_forwarding_rule_name: 'psc-ilb-consumer-forwarding-rule' + - name: 'service_attachment_explicit_projects' + primary_resource_id: 'psc_ilb_service_attachment' + vars: + service_attachment_name: 'my-psc-ilb' + network_name: 'psc-ilb-network' + nat_subnetwork_name: 'psc-ilb-nat' + producer_subnetwork_name: 'psc-ilb-producer-subnetwork' + producer_health_check_name: 'producer-service-health-check' + producer_service_name: 'producer-service' + producer_forwarding_rule_name: 'producer-forwarding-rule' + consumer_address_name: 'psc-ilb-consumer-address' + consumer_forwarding_rule_name: 'psc-ilb-consumer-forwarding-rule' + - name: 'service_attachment_explicit_networks' + primary_resource_id: 'psc_ilb_service_attachment' + vars: + service_attachment_name: 'my-psc-ilb' + network_name: 'psc-ilb-network' + nat_subnetwork_name: 'psc-ilb-nat' + producer_subnetwork_name: 'psc-ilb-producer-subnetwork' + producer_health_check_name: 'producer-service-health-check' + producer_service_name: 'producer-service' + producer_forwarding_rule_name: 'producer-forwarding-rule' + consumer_network_name: 'psc-ilb-consumer-network' + consumer_address_name: 'psc-ilb-consumer-address' + consumer_forwarding_rule_name: 'psc-ilb-consumer-forwarding-rule' + - name: 'service_attachment_reconcile_connections' + primary_resource_id: 'psc_ilb_service_attachment' + vars: + service_attachment_name: 'my-psc-ilb' + network_name: 'psc-ilb-network' + nat_subnetwork_name: 'psc-ilb-nat' + producer_subnetwork_name: 'psc-ilb-producer-subnetwork' + producer_health_check_name: 'producer-service-health-check' + producer_service_name: 'producer-service' + producer_forwarding_rule_name: 'producer-forwarding-rule' + consumer_address_name: 'psc-ilb-consumer-address' + consumer_forwarding_rule_name: 'psc-ilb-consumer-forwarding-rule' +parameters: + - name: 'region' + type: ResourceRef + description: | + URL of the region where the resource resides. + required: false + immutable: true + ignore_read: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` + which means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + immutable: true + - name: 'description' + type: String + description: | + An optional description of this resource. + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. This field is used internally during + updates of this resource. + output: true + - name: 'connectionPreference' + type: String + description: | + The connection preference to use for this service attachment. Valid + values include "ACCEPT_AUTOMATIC", "ACCEPT_MANUAL". + required: true + - name: 'connectedEndpoints' + type: Array + description: | + An array of the consumer forwarding rules connected to this service + attachment. + output: true + item_type: + type: NestedObject + properties: + - name: 'endpoint' + type: String + description: | + The URL of the consumer forwarding rule. + output: true + - name: 'status' + type: String + description: | + The status of the connection from the consumer forwarding rule to + this service attachment. + output: true + - name: 'targetService' + type: ResourceRef + description: | + The URL of a forwarding rule that represents the service identified by + this service attachment. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'ForwardingRule' + imports: 'selfLink' + - name: 'natSubnets' + type: Array + description: | + An array of subnets that is provided for NAT in this service attachment. + required: true + send_empty_value: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'subnet' + type: ResourceRef + description: | + A subnet that is provided for NAT in this service attachment. + resource: 'Subnetwork' + imports: 'selfLink' + - name: 'enableProxyProtocol' + type: Boolean + description: | + If true, enable the proxy protocol which is for supplying client TCP/IP + address data in TCP connections that traverse proxies on their way to + destination servers. + required: true + - name: 'domainNames' + type: Array + description: | + If specified, the domain name will be used during the integration between + the PSC connected endpoints and the Cloud DNS. For example, this is a + valid domain name: "p.mycompany.com.". Current max number of domain names + supported is 1. + immutable: true + item_type: + type: String + - name: 'consumerRejectLists' + type: Array + description: | + An array of projects that are not allowed to connect to this service + attachment. + send_empty_value: true + item_type: + type: String + - name: 'consumerAcceptLists' + type: Array + description: | + An array of projects that are allowed to connect to this service + attachment. + is_set: true + send_empty_value: true + set_hash_func: 'computeServiceAttachmentConsumerAcceptListsHash' + item_type: + type: NestedObject + properties: + - name: 'projectIdOrNum' + type: String + description: | + A project that is allowed to connect to this service attachment. + Only one of project_id_or_num and network_url may be set. + - name: 'networkUrl' + type: String + description: | + The network that is allowed to connect to this service attachment. + Only one of project_id_or_num and network_url may be set. + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'connectionLimit' + type: Integer + description: | + The number of consumer forwarding rules the consumer project can + create. + required: true + - name: 'reconcileConnections' + type: Boolean + description: | + This flag determines whether a consumer accept/reject list change can reconcile the statuses of existing ACCEPTED or REJECTED PSC endpoints. + + If false, connection policy update will only affect existing PENDING PSC endpoints. Existing ACCEPTED/REJECTED endpoints will remain untouched regardless how the connection policy is modified . + If true, update will affect both PENDING and ACCEPTED/REJECTED PSC endpoints. For example, an ACCEPTED PSC endpoint will be moved to REJECTED if its project is added to the reject list. + default_from_api: true + send_empty_value: true diff --git a/mmv1/products/compute/go_Snapshot.yaml b/mmv1/products/compute/go_Snapshot.yaml new file mode 100644 index 000000000000..7f94a8ef9064 --- /dev/null +++ b/mmv1/products/compute/go_Snapshot.yaml @@ -0,0 +1,248 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Snapshot' +kind: 'compute#snapshot' +description: | + Represents a Persistent Disk Snapshot resource. + + Use snapshots to back up data from your persistent disks. Snapshots are + different from public images and custom images, which are used primarily + to create instances or configure instance templates. Snapshots are useful + for periodic backup of the data on your persistent disks. You can create + snapshots from persistent disks even while they are attached to running + instances. + + Snapshots are incremental, so you can create regular snapshots on a + persistent disk faster and at a much lower cost than if you regularly + created a full image of the disk. +# 'createSnapshot' is a zonal operation while 'snapshot.delete' is a global +# operation. we'll leave the object as global operation and use the disk's +# zonal operation for the create action. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/disks/create-snapshots' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/snapshots' +docs: +base_url: 'projects/{{project}}/global/snapshots' +has_self_link: true +create_url: 'PRE_CREATE_REPLACE_ME/createSnapshot' +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + full_url: 'selfLink' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + parent_resource_attribute: 'name' + import_format: + - 'projects/{{project}}/global/snapshots/{{name}}' + - '{{name}}' +custom_code: + decoder: 'templates/terraform/decoders/go/snapshot.go.tmpl' + pre_create: 'templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl' +examples: + - name: 'snapshot_basic' + primary_resource_id: 'snapshot' + primary_resource_name: 'fmt.Sprintf("tf-test-my-snapshot%s", context["random_suffix"])' + vars: + snapshot_name: 'my-snapshot' + disk_name: 'debian-disk' + - name: 'snapshot_chainname' + primary_resource_id: 'snapshot' + primary_resource_name: 'fmt.Sprintf("tf-test-snapshot-chainname%s", context["random_suffix"])' + vars: + snapshot_name: 'my-snapshot' + disk_name: 'debian-disk' + chain_name: 'snapshot-chain' +parameters: + - name: 'sourceDisk' + type: ResourceRef + description: 'A reference to the disk used to create this snapshot.' + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Disk' + imports: 'name' + - name: 'zone' + type: ResourceRef + description: 'A reference to the zone where the disk is hosted.' + required: false + immutable: true + ignore_read: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' + - name: 'snapshotEncryptionKey' + type: NestedObject + description: | + Encrypts the snapshot using a customer-supplied encryption key. + + After you encrypt a snapshot using a customer-supplied key, you must + provide the same key if you use the snapshot later. For example, you + must provide the encryption key when you create a disk from the + encrypted snapshot in a future request. + + Customer-supplied encryption keys do not protect access to metadata of + the snapshot. + + If you do not provide an encryption key when creating the snapshot, + then the snapshot will be encrypted using an automatically generated + key and you do not need to provide a key to use the snapshot later. + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + ignore_read: true + sensitive: true + custom_flatten: 'templates/terraform/custom_flatten/go/compute_snapshot_snapshot_encryption_raw_key.go.tmpl' + - name: 'sha256' + type: String + description: | + The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied + encryption key that protects this resource. + output: true + - name: 'kmsKeySelfLink' + type: String + description: | + The name of the encryption key that is stored in Google Cloud KMS. + api_name: kmsKeyName + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account used for the encryption request for the given KMS key. + If absent, the Compute Engine Service Agent service account is used. + # ignore_read in providers - this is only used in Create + - name: 'sourceDiskEncryptionKey' + type: NestedObject + description: | + The customer-supplied encryption key of the source snapshot. Required + if the source snapshot is protected by a customer-supplied encryption + key. + ignore_read: true + properties: + - name: 'rawKey' + type: String + description: | + Specifies a 256-bit customer-supplied encryption key, encoded in + RFC 4648 base64 to either encrypt or decrypt this resource. + # The docs list this field but it is never returned. + sensitive: true + - name: 'kmsKeyServiceAccount' + type: String + description: | + The service account used for the encryption request for the given KMS key. + If absent, the Compute Engine Service Agent service account is used. +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'snapshot_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'diskSizeGb' + type: Integer + description: 'Size of the snapshot, specified in GB.' + output: true + - name: 'chainName' + type: String + description: | + Creates the new snapshot in the snapshot chain labeled with the + specified name. The chain name must be 1-63 characters long and + comply with RFC1035. This is an uncommon option only for advanced + service owners who needs to create separate snapshot chains, for + example, for chargeback tracking. When you describe your snapshot + resource, this field is visible only if it has a non-empty value. + - name: 'name' + type: String + description: | + Name of the resource; provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + # 'sourceDiskId' not useful for object convergence. + immutable: true + - name: 'storageBytes' + type: Integer + description: | + A size of the storage used by the snapshot. As snapshots share + storage, this number is expected to change with snapshot + creation/deletion. + output: true + - name: 'storageLocations' + type: Array + description: | + Cloud Storage bucket storage location of the snapshot (regional or multi-regional). + default_from_api: true + item_type: + type: String + - name: 'licenses' + type: Array + description: | + A list of public visible licenses that apply to this snapshot. This + can be because the original image had licenses attached (such as a + Windows image). snapshotEncryptionKey nested object Encrypts the + snapshot using a customer-supplied encryption key. + output: true + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'license' + type: ResourceRef + description: 'A reference to a license associated with this snapshot' + resource: 'License' + imports: 'selfLink' + - name: 'labels' + type: KeyValueLabels + description: Labels to apply to this Snapshot. + immutable: false + update_url: 'projects/{{project}}/global/snapshots/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/global/snapshots/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' diff --git a/mmv1/products/compute/go_SslCertificate.yaml b/mmv1/products/compute/go_SslCertificate.yaml new file mode 100644 index 000000000000..40808e930866 --- /dev/null +++ b/mmv1/products/compute/go_SslCertificate.yaml @@ -0,0 +1,124 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SslCertificate' +kind: 'compute#sslCertificate' +description: | + An SslCertificate resource, used for HTTPS load balancing. This resource + provides a mechanism to upload an SSL key and certificate to + the load balancer to serve secure connections from the user. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates' +docs: + optional_properties: '* `name_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. Conflicts with `name`. +' +base_url: 'projects/{{project}}/global/sslCertificates' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl' +examples: + - name: 'ssl_certificate_basic' + primary_resource_id: 'default' + ignore_read_extra: + - 'name_prefix' + skip_vcr: true + - name: 'ssl_certificate_random_provider' + primary_resource_id: 'default' + external_providers: ["random", "time"] + skip_vcr: true + - name: 'ssl_certificate_target_https_proxies' + primary_resource_id: 'default' + vars: + target_https_proxy_name: 'test-proxy' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + ignore_read_extra: + - 'name_prefix' + skip_vcr: true +parameters: +properties: + - name: 'certificate' + type: String + description: | + The certificate in PEM format. + The certificate chain must be no greater than 5 certs long. + The chain must include at least one intermediate cert. + required: true + sensitive: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'expireTime' + type: String + description: 'Expire time of the certificate in RFC3339 text format.' + output: true + - name: 'certificate_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + + + These are in the same namespace as the managed SSL certificates. + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl' + validation: + function: 'verify.ValidateGCEName' + - name: 'privateKey' + type: String + description: 'The write-only private key in PEM format.' + required: true + immutable: true + ignore_read: true + sensitive: true + diff_suppress_func: 'tpgresource.Sha256DiffSuppress' + custom_flatten: 'templates/terraform/custom_flatten/go/sha256.tmpl' diff --git a/mmv1/products/compute/go_SslPolicy.yaml b/mmv1/products/compute/go_SslPolicy.yaml new file mode 100644 index 000000000000..6d2ecd29f45d --- /dev/null +++ b/mmv1/products/compute/go_SslPolicy.yaml @@ -0,0 +1,136 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SslPolicy' +kind: 'compute#sslPolicy' +description: | + Represents a SSL policy. SSL policies give you the ability to control the + features of SSL that your SSL proxy or HTTPS load balancer negotiates. +references: + guides: + 'Using SSL Policies': 'https://cloud.google.com/compute/docs/load-balancing/ssl-policies' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/sslPolicies' +docs: +base_url: 'projects/{{project}}/global/sslPolicies' +has_self_link: true +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/ssl_policy.tmpl' + update_encoder: 'templates/terraform/update_encoder/go/ssl_policy.tmpl' +custom_diff: + - 'sslPolicyCustomizeDiff' +examples: + - name: 'ssl_policy_basic' + primary_resource_id: 'prod-ssl-policy' + vars: + production_ssl_policy_name: 'production-ssl-policy' + nonprod_ssl_policy_name: 'nonprod-ssl-policy' + custom_ssl_policy_name: 'custom-ssl-policy' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'profile' + type: Enum + description: | + Profile specifies the set of SSL features that can be used by the + load balancer when negotiating SSL with clients. If using `CUSTOM`, + the set of SSL features to enable must be specified in the + `customFeatures` field. + + See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) + for information on what cipher suites each profile provides. If + `CUSTOM` is used, the `custom_features` attribute **must be set**. + default_value: COMPATIBLE + enum_values: + - 'COMPATIBLE' + - 'MODERN' + - 'RESTRICTED' + - 'CUSTOM' + - name: 'minTlsVersion' + type: Enum + description: | + The minimum version of SSL protocol that can be used by the clients + to establish a connection with the load balancer. + default_value: TLS_1_0 + enum_values: + - 'TLS_1_0' + - 'TLS_1_1' + - 'TLS_1_2' + - name: 'enabledFeatures' + type: Array + description: 'The list of features enabled in the SSL policy.' + is_set: true + output: true + item_type: + type: String + - name: 'customFeatures' + type: Array + description: | + Profile specifies the set of SSL features that can be used by the + load balancer when negotiating SSL with clients. This can be one of + `COMPATIBLE`, `MODERN`, `RESTRICTED`, or `CUSTOM`. If using `CUSTOM`, + the set of SSL features to enable must be specified in the + `customFeatures` field. + + See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) + for which ciphers are available to use. **Note**: this argument + *must* be present when using the `CUSTOM` profile. This argument + *must not* be present when using any other profile. + is_set: true + item_type: + type: String + - name: 'fingerprint' + type: String + description: | + Fingerprint of this resource. A hash of the contents stored in this + object. This field is used in optimistic locking. + output: true diff --git a/mmv1/products/compute/go_Subnetwork.yaml b/mmv1/products/compute/go_Subnetwork.yaml new file mode 100644 index 000000000000..4184e5a30d5a --- /dev/null +++ b/mmv1/products/compute/go_Subnetwork.yaml @@ -0,0 +1,402 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Subnetwork' +kind: 'compute#subnetwork' +description: | + A VPC network is a virtual version of the traditional physical networks + that exist within and between physical data centers. A VPC network + provides connectivity for your Compute Engine virtual machine (VM) + instances, Container Engine containers, App Engine Flex services, and + other network-related resources. + + Each GCP project contains one or more VPC networks. Each VPC network is a + global entity spanning all GCP regions. This global VPC network allows VM + instances and other resources to communicate with each other via internal, + private IP addresses. + + Each VPC network is subdivided into subnets, and each subnet is contained + within a single region. You can have more than one subnet in a region for + a given VPC network. Each subnet has a contiguous private RFC1918 IP + space. You create instances, containers, and the like in these subnets. + When you create an instance, you must create it in a subnet, and the + instance draws its internal IP address from that subnet. + + Virtual machine (VM) instances in a VPC network can communicate with + instances in all other subnets of the same VPC network, regardless of + region, using their RFC1918 private IP addresses. You can isolate portions + of the network, even entire subnets, using firewall rules. +references: + guides: + 'Private Google Access': 'https://cloud.google.com/vpc/docs/configure-private-google-access' + 'Cloud Networking': 'https://cloud.google.com/vpc/docs/using-vpc' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/subnetworks' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/subnetworks' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +iam_policy: + allowed_iam_role: 'roles/compute.networkUser' + parent_resource_attribute: 'subnetwork' + iam_conditions_request_type: 'QUERY_PARAM' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/subnetwork.tmpl' + constants: 'templates/terraform/constants/go/subnetwork.tmpl' +custom_diff: + - 'customdiff.ForceNewIfChange("ip_cidr_range", IsShrinkageIpCidr)' +examples: + - name: 'subnetwork_basic' + primary_resource_id: 'network-with-private-secondary-ip-ranges' + primary_resource_name: 'fmt.Sprintf("tf-test-test-subnetwork%s", context["random_suffix"])' + vars: + subnetwork_name: 'test-subnetwork' + network_name: 'test-network' + - name: 'subnetwork_logging_config' + primary_resource_id: 'subnet-with-logging' + vars: + subnetwork_name: 'log-test-subnetwork' + network_name: 'log-test-network' + - name: 'subnetwork_internal_l7lb' + primary_resource_id: 'network-for-l7lb' + min_version: 'beta' + vars: + subnetwork_name: 'l7lb-test-subnetwork' + network_name: 'l7lb-test-network' + - name: 'subnetwork_ipv6' + primary_resource_id: 'subnetwork-ipv6' + vars: + subnetwork_name: 'ipv6-test-subnetwork' + network_name: 'ipv6-test-network' + - name: 'subnetwork_internal_ipv6' + primary_resource_id: 'subnetwork-internal-ipv6' + vars: + subnetwork_name: 'internal-ipv6-test-subnetwork' + network_name: 'internal-ipv6-test-network' + - name: 'subnetwork_purpose_private_nat' + primary_resource_id: 'subnetwork-purpose-private-nat' + min_version: 'beta' + vars: + subnetwork_name: 'subnet-purpose-test-subnetwork' + network_name: 'subnet-purpose-test-network' + - name: 'subnetwork_cidr_overlap' + primary_resource_id: 'subnetwork-cidr-overlap' + min_version: 'beta' + vars: + subnetwork_name: 'subnet-cidr-overlap' + network_name: 'net-cidr-overlap' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when + you create the resource. This field can be set only at resource + creation time. + - name: 'gatewayAddress' + type: String + description: | + The gateway address for default routes to reach destination addresses + outside this subnetwork. + output: true + - name: 'ipCidrRange' + type: String + description: | + The range of internal addresses that are owned by this subnetwork. + Provide this property when you create the subnetwork. For example, + 10.0.0.0/8 or 192.168.0.0/16. Ranges must be unique and + non-overlapping within a network. Only IPv4 is supported. + required: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/expandIpCidrRange' + update_verb: 'POST' + validation: + function: 'verify.ValidateIpCidrRange' + - name: 'name' + type: String + description: | + The name of the resource, provided by the client when initially + creating the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which + means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'network' + type: ResourceRef + description: | + The network this subnet belongs to. + Only networks that are in the distributed mode can have subnetworks. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' + - name: 'purpose' + type: String + description: | + The purpose of the resource. This field can be either `PRIVATE_RFC_1918`, `REGIONAL_MANAGED_PROXY`, `GLOBAL_MANAGED_PROXY`, `PRIVATE_SERVICE_CONNECT` or `PRIVATE_NAT`([Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html)). + A subnet with purpose set to `REGIONAL_MANAGED_PROXY` is a user-created subnetwork that is reserved for regional Envoy-based load balancers. + A subnetwork in a given region with purpose set to `GLOBAL_MANAGED_PROXY` is a proxy-only subnet and is shared between all the cross-regional Envoy-based load balancers. + A subnetwork with purpose set to `PRIVATE_SERVICE_CONNECT` reserves the subnet for hosting a Private Service Connect published service. + A subnetwork with purpose set to `PRIVATE_NAT` is used as source range for Private NAT gateways. + Note that `REGIONAL_MANAGED_PROXY` is the preferred setting for all regional Envoy load balancers. + If unspecified, the purpose defaults to `PRIVATE_RFC_1918`. + immutable: true + default_from_api: true + - name: 'role' + type: Enum + description: | + The role of subnetwork. + Currently, this field is only used when `purpose` is `REGIONAL_MANAGED_PROXY`. + The value can be set to `ACTIVE` or `BACKUP`. + An `ACTIVE` subnetwork is one that is currently being used for Envoy-based load balancers in a region. + A `BACKUP` subnetwork is one that is ready to be promoted to `ACTIVE` or is currently draining. + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + update_id: 'role' + fingerprint_name: 'fingerprint' + enum_values: + - 'ACTIVE' + - 'BACKUP' + - name: 'secondaryIpRange' + type: Array + description: | + An array of configurations for secondary IP ranges for VM instances + contained in this subnetwork. The primary IP of such VM must belong + to the primary ipCidrRange of the subnetwork. The alias IPs may belong + to either primary or secondary ranges. + + **Note**: This field uses [attr-as-block mode](https://www.terraform.io/docs/configuration/attr-as-blocks.html) to avoid + breaking users during the 0.12 upgrade. To explicitly send a list + of zero objects you must use the following syntax: + `example=[]` + For more details about this behavior, see [this section](https://www.terraform.io/docs/configuration/attr-as-blocks.html#defining-a-fixed-object-collection-value). + api_name: secondaryIpRanges + unordered_list: true + schema_config_mode_attr: true + default_from_api: true + send_empty_value: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + update_id: 'secondaryIpRanges' + fingerprint_name: 'fingerprint' + item_type: + type: NestedObject + properties: + - name: 'rangeName' + type: String + description: | + The name associated with this subnetwork secondary range, used + when adding an alias IP range to a VM instance. The name must + be 1-63 characters long, and comply with RFC1035. The name + must be unique within the subnetwork. + required: true + validation: + function: 'verify.ValidateGCEName' + - name: 'ipCidrRange' + type: String + description: | + The range of IP addresses belonging to this subnetwork secondary + range. Provide this property when you create the subnetwork. + Ranges must be unique and non-overlapping with all primary and + secondary IP ranges within a network. Only IPv4 is supported. + required: true + validation: + function: 'verify.ValidateIpCidrRange' + - name: 'privateIpGoogleAccess' + type: Boolean + description: | + When enabled, VMs in this subnetwork without external IP addresses can + access Google APIs and services by using Private Google Access. + default_from_api: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}/setPrivateIpGoogleAccess' + update_verb: 'POST' + - name: 'privateIpv6GoogleAccess' + type: String + description: The private IPv6 google access type for the VMs in this subnet. + default_from_api: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + fingerprint_name: 'fingerprint' + - name: 'region' + type: ResourceRef + description: | + The GCP region for this subnetwork. + required: false + immutable: true + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' + - name: 'logConfig' + type: NestedObject + description: | + This field denotes the VPC flow logging options for this subnetwork. If + logging is enabled, logs are exported to Cloud Logging. Flow logging + isn't supported if the subnet `purpose` field is set to subnetwork is + `REGIONAL_MANAGED_PROXY` or `GLOBAL_MANAGED_PROXY`. + send_empty_value: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + update_id: 'logConfig' + fingerprint_name: 'fingerprint' + custom_flatten: 'templates/terraform/custom_flatten/go/subnetwork_log_config.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/subnetwork_log_config.go.tmpl' + properties: + - name: 'aggregationInterval' + type: Enum + description: | + Can only be specified if VPC flow logging for this subnetwork is enabled. + Toggles the aggregation interval for collecting flow logs. Increasing the + interval time will reduce the amount of generated flow logs for long + lasting connections. Default is an interval of 5 seconds per connection. + at_least_one_of: + - 'log_config.0.aggregation_interval' + - 'log_config.0.flow_sampling' + - 'log_config.0.metadata' + - 'log_config.0.filterExpr' + default_value: INTERVAL_5_SEC + enum_values: + - 'INTERVAL_5_SEC' + - 'INTERVAL_30_SEC' + - 'INTERVAL_1_MIN' + - 'INTERVAL_5_MIN' + - 'INTERVAL_10_MIN' + - 'INTERVAL_15_MIN' + - name: 'flowSampling' + type: Double + description: | + Can only be specified if VPC flow logging for this subnetwork is enabled. + The value of the field must be in [0, 1]. Set the sampling rate of VPC + flow logs within the subnetwork where 1.0 means all collected logs are + reported and 0.0 means no logs are reported. Default is 0.5 which means + half of all collected logs are reported. + at_least_one_of: + - 'log_config.0.aggregation_interval' + - 'log_config.0.flow_sampling' + - 'log_config.0.metadata' + - 'log_config.0.filterExpr' + default_value: 0.5 + - name: 'metadata' + type: Enum + description: | + Can only be specified if VPC flow logging for this subnetwork is enabled. + Configures whether metadata fields should be added to the reported VPC + flow logs. + at_least_one_of: + - 'log_config.0.aggregation_interval' + - 'log_config.0.flow_sampling' + - 'log_config.0.metadata' + - 'log_config.0.filterExpr' + default_value: INCLUDE_ALL_METADATA + enum_values: + - 'EXCLUDE_ALL_METADATA' + - 'INCLUDE_ALL_METADATA' + - 'CUSTOM_METADATA' + - name: 'metadataFields' + type: Array + description: | + List of metadata fields that should be added to reported logs. + Can only be specified if VPC flow logs for this subnetwork is enabled and "metadata" is set to CUSTOM_METADATA. + is_set: true + item_type: + type: String + - name: 'filterExpr' + type: String + description: | + Export filter used to define which VPC flow logs should be logged, as as CEL expression. See + at_least_one_of: + - 'log_config.0.aggregation_interval' + - 'log_config.0.flow_sampling' + - 'log_config.0.metadata' + - 'log_config.0.filterExpr' + default_value: true + - name: 'stackType' + type: Enum + description: | + The stack type for this subnet to identify whether the IPv6 feature is enabled or not. + If not specified IPV4_ONLY will be used. + default_from_api: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + fingerprint_name: 'fingerprint' + enum_values: + - 'IPV4_ONLY' + - 'IPV4_IPV6' + - name: 'ipv6AccessType' + type: Enum + description: | + The access type of IPv6 address this subnet holds. It's immutable and can only be specified during creation + or the first time the subnet is updated into IPV4_IPV6 dual stack. If the ipv6_type is EXTERNAL then this subnet + cannot enable direct path. + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + fingerprint_name: 'fingerprint' + enum_values: + - 'EXTERNAL' + - 'INTERNAL' + - name: 'ipv6CidrRange' + type: String + description: | + The range of internal IPv6 addresses that are owned by this subnetwork. + output: true + - name: 'internalIpv6Prefix' + type: String + description: | + The internal IPv6 address range that is assigned to this subnetwork. + output: true + - name: 'externalIpv6Prefix' + type: String + description: | + The range of external IPv6 addresses that are owned by this subnetwork. + default_from_api: true + - name: 'allowSubnetCidrRoutesOverlap' + type: Boolean + description: | + Typically packets destined to IPs within the subnetwork range that do not match + existing resources are dropped and prevented from leaving the VPC. + Setting this field to true will allow these packets to match dynamic routes injected + via BGP even if their destinations match existing subnet ranges. + min_version: 'beta' + default_from_api: true + send_empty_value: true + update_url: 'projects/{{project}}/regions/{{region}}/subnetworks/{{name}}' + update_verb: 'PATCH' + fingerprint_name: 'fingerprint' diff --git a/mmv1/products/compute/go_TargetGrpcProxy.yaml b/mmv1/products/compute/go_TargetGrpcProxy.yaml new file mode 100644 index 000000000000..06078ff914ec --- /dev/null +++ b/mmv1/products/compute/go_TargetGrpcProxy.yaml @@ -0,0 +1,117 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetGrpcProxy' +kind: 'compute#targetGrpcProxy' +description: | + Represents a Target gRPC Proxy resource. A target gRPC proxy is a component + of load balancers intended for load balancing gRPC traffic. Global forwarding + rules reference a target gRPC proxy. The Target gRPC Proxy references + a URL map which specifies how traffic routes to gRPC backend services. +references: + guides: + 'Using Target gRPC Proxies': 'https://cloud.google.com/traffic-director/docs/proxyless-overview' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/targetGrpcProxies' +docs: +base_url: 'projects/{{project}}/global/targetGrpcProxies' +has_self_link: true +update_verb: 'PATCH' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'target_grpc_proxy_basic' + primary_resource_id: 'default' + vars: + proxy_name: 'proxy' + urlmap_name: 'urlmap' + backend_name: 'backend' + healthcheck_name: 'healthcheck' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource + is created. The name must be 1-63 characters long, and comply + with RFC1035. Specifically, the name must be 1-63 characters long + and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which + means the first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + immutable: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'selfLinkWithId' + type: String + description: 'Server-defined URL with id for the resource.' + output: true + - name: 'urlMap' + type: String + description: | + URL to the UrlMap resource that defines the mapping from URL to + the BackendService. The protocol field in the BackendService + must be set to GRPC. + immutable: true + update_id: 'urlMap' + fingerprint_name: 'fingerprint' + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - name: 'validateForProxyless' + type: Boolean + description: | + If true, indicates that the BackendServices referenced by + the urlMap may be accessed by gRPC applications without using + a sidecar proxy. This will enable configuration checks on urlMap + and its referenced BackendServices to not allow unsupported features. + A gRPC application must use "xds:///" scheme in the target URI + of the service it is connecting to. If false, indicates that the + BackendServices referenced by the urlMap will be accessed by gRPC + applications via a sidecar proxy. In this case, a gRPC application + must not use "xds:///" scheme in the target URI of the service + it is connecting to + immutable: true + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in + this object. This field is used in optimistic locking. This field + will be ignored when inserting a TargetGrpcProxy. An up-to-date + fingerprint must be provided in order to patch/update the + TargetGrpcProxy; otherwise, the request will fail with error + 412 conditionNotMet. To see the latest fingerprint, make a get() + request to retrieve the TargetGrpcProxy. A base64-encoded string. + output: true diff --git a/mmv1/products/compute/go_TargetHttpProxy.yaml b/mmv1/products/compute/go_TargetHttpProxy.yaml new file mode 100644 index 000000000000..401f81e16f6b --- /dev/null +++ b/mmv1/products/compute/go_TargetHttpProxy.yaml @@ -0,0 +1,118 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetHttpProxy' +kind: 'compute#targetHttpProxy' +description: | + Represents a TargetHttpProxy resource, which is used by one or more global + forwarding rule to route incoming HTTP requests to a URL map. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/http/target-proxies' + api: 'https://cloud.google.com/compute/docs/reference/v1/targetHttpProxies' +docs: +base_url: 'projects/{{project}}/global/targetHttpProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'target_http_proxy_basic' + primary_resource_id: 'default' + vars: + target_http_proxy_name: 'test-proxy' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + - name: 'target_http_proxy_http_keep_alive_timeout' + primary_resource_id: 'default' + vars: + target_http_proxy_name: 'test-http-keep-alive-timeout-proxy' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + - name: 'target_http_proxy_https_redirect' + primary_resource_id: 'default' + vars: + target_http_proxy_name: 'test-https-redirect-proxy' + url_map_name: 'url-map' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + - name: 'urlMap' + type: ResourceRef + description: | + A reference to the UrlMap resource that defines the mapping from URL + to the BackendService. + required: true + update_url: 'projects/{{project}}/targetHttpProxies/{{name}}/setUrlMap' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'UrlMap' + imports: 'selfLink' + - name: 'proxyBind' + type: Boolean + description: | + This field only applies when the forwarding rule that references + this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + default_from_api: true + - name: 'httpKeepAliveTimeoutSec' + type: Integer + description: | + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (610 seconds) will be used. For Global + external HTTP(S) load balancer, the minimum allowed value is 5 seconds and + the maximum allowed value is 1200 seconds. For Global external HTTP(S) + load balancer (classic), this option is not available publicly. diff --git a/mmv1/products/compute/go_TargetHttpsProxy.yaml b/mmv1/products/compute/go_TargetHttpsProxy.yaml new file mode 100644 index 000000000000..d9ec4cdc7d50 --- /dev/null +++ b/mmv1/products/compute/go_TargetHttpsProxy.yaml @@ -0,0 +1,220 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetHttpsProxy' +kind: 'compute#targetHttpsProxy' +description: | + Represents a TargetHttpsProxy resource, which is used by one or more + global forwarding rule to route incoming HTTPS requests to a URL map. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/compute/docs/load-balancing/http/target-proxies' + api: 'https://cloud.google.com/compute/docs/reference/v1/targetHttpsProxies' +docs: +base_url: 'projects/{{project}}/global/targetHttpsProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + encoder: 'templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl' + update_encoder: 'templates/terraform/encoders/go/compute_target_https_proxy.go.tmpl' + decoder: 'templates/terraform/decoders/go/compute_target_https_proxy.go.tmpl' +examples: + - name: 'target_https_proxy_basic' + primary_resource_id: 'default' + vars: + target_https_proxy_name: 'test-proxy' + ssl_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + - name: 'target_https_proxy_http_keep_alive_timeout' + primary_resource_id: 'default' + vars: + target_https_proxy_name: 'test-http-keep-alive-timeout-proxy' + ssl_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + - name: 'target_https_proxy_mtls' + primary_resource_id: 'default' + min_version: 'beta' + vars: + target_https_proxy_name: 'test-mtls-proxy' + ssl_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' + http_health_check_name: 'http-health-check' + server_tls_policy_name: 'my-tls-policy' + trust_config_name: 'my-trust-config' + - name: 'target_https_proxy_certificate_manager_certificate' + primary_resource_id: 'default' + vars: + target_https_proxy_name: 'target-http-proxy' + certificate_manager_certificate_name: 'my-certificate' + url_map_name: 'url-map' + backend_service_name: 'backend-service' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'quicOverride' + type: Enum + description: | + Specifies the QUIC override policy for this resource. This determines + whether the load balancer will attempt to negotiate QUIC with clients + or not. Can specify one of NONE, ENABLE, or DISABLE. If NONE is + specified, Google manages whether QUIC is used. + update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride' + update_verb: 'POST' + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: NONE + enum_values: + - 'NONE' + - 'ENABLE' + - 'DISABLE' + - name: 'certificateManagerCertificates' + type: Array + description: | + URLs to certificate manager certificate resources that are used to authenticate connections between users and the load balancer. + Certificate manager certificates only apply when the load balancing scheme is set to INTERNAL_MANAGED. + For EXTERNAL and EXTERNAL_MANAGED, use certificate_map instead. + sslCertificates and certificateManagerCertificates fields can not be defined together. + Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificates/{resourceName}` or just the self_link `projects/{project}/locations/{location}/certificates/{resourceName}` + update_url: 'projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates' + update_verb: 'POST' + conflicts: + - ssl_certificates + diff_suppress_func: 'tpgresource.CompareResourceNames' + custom_expand: 'templates/terraform/custom_expand/go/certificate_manager_certificate_construct_full_url.go.tmpl' + item_type: + type: String + - name: 'sslCertificates' + type: Array + description: | + URLs to SslCertificate resources that are used to authenticate connections between users and the load balancer. + Currently, you may specify up to 15 SSL certificates. sslCertificates do not apply when the load balancing scheme is set to INTERNAL_SELF_MANAGED. + sslCertificates and certificateManagerCertificates can not be defined together. + update_url: 'projects/{{project}}/targetHttpsProxies/{{name}}/setSslCertificates' + update_verb: 'POST' + conflicts: + - certificate_manager_certificates + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'sslCertificate' + type: ResourceRef + description: 'The SSL certificate URL used by this TargetHttpsProxy' + resource: 'SslCertificate' + imports: 'selfLink' + - name: 'certificateMap' + type: String + description: | + A reference to the CertificateMap resource uri that identifies a certificate map + associated with the given target proxy. This field is only supported for EXTERNAL and EXTERNAL_MANAGED load balancing schemes. + For INTERNAL_MANAGED, use certificate_manager_certificates instead. + Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}`. + update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setCertificateMap' + update_verb: 'POST' + - name: 'sslPolicy' + type: ResourceRef + description: | + A reference to the SslPolicy resource that will be associated with + the TargetHttpsProxy resource. If not set, the TargetHttpsProxy + resource will not have any SSL policy configured. + update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setSslPolicy' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'SslPolicy' + imports: 'selfLink' + - name: 'urlMap' + type: ResourceRef + description: | + A reference to the UrlMap resource that defines the mapping from URL + to the BackendService. + required: true + update_url: 'projects/{{project}}/targetHttpsProxies/{{name}}/setUrlMap' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'UrlMap' + imports: 'selfLink' + - name: 'proxyBind' + type: Boolean + description: | + This field only applies when the forwarding rule that references + this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + default_from_api: true + - name: 'httpKeepAliveTimeoutSec' + type: Integer + description: | + Specifies how long to keep a connection open, after completing a response, + while there is no matching traffic (in seconds). If an HTTP keepalive is + not specified, a default value (610 seconds) will be used. For Global + external HTTP(S) load balancer, the minimum allowed value is 5 seconds and + the maximum allowed value is 1200 seconds. For Global external HTTP(S) + load balancer (classic), this option is not available publicly. + - name: 'serverTlsPolicy' + type: ResourceRef + description: | + A URL referring to a networksecurity.ServerTlsPolicy + resource that describes how the proxy should authenticate inbound + traffic. serverTlsPolicy only applies to a global TargetHttpsProxy + attached to globalForwardingRules with the loadBalancingScheme + set to INTERNAL_SELF_MANAGED or EXTERNAL or EXTERNAL_MANAGED. + For details which ServerTlsPolicy resources are accepted with + INTERNAL_SELF_MANAGED and which with EXTERNAL, EXTERNAL_MANAGED + loadBalancingScheme consult ServerTlsPolicy documentation. + If left blank, communications are not encrypted. + resource: 'SslPolicy' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_TargetInstance.yaml b/mmv1/products/compute/go_TargetInstance.yaml new file mode 100644 index 000000000000..99d92de1ee4d --- /dev/null +++ b/mmv1/products/compute/go_TargetInstance.yaml @@ -0,0 +1,142 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetInstance' +kind: 'compute#targetInstance' +description: | + Represents a TargetInstance resource which defines an endpoint instance + that terminates traffic of certain protocols. In particular, they are used + in Protocol Forwarding, where forwarding rules can send packets to a + non-NAT'ed target instance. Each target instance contains a single + virtual machine instance that receives and handles traffic from the + corresponding forwarding rules. +references: + guides: + 'Using Protocol Forwarding': 'https://cloud.google.com/compute/docs/protocol-forwarding' + api: 'https://cloud.google.com/compute/docs/reference/v1/targetInstances' +docs: +base_url: 'projects/{{project}}/zones/{{zone}}/targetInstances' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + post_create: 'templates/terraform/post_create/go/compute_target_instance_security_policy.go.tmpl' +examples: + - name: 'target_instance_basic' + primary_resource_id: 'default' + vars: + target_name: 'target' + instance_name: 'target-vm' + - name: 'target_instance_custom_network' + primary_resource_id: 'custom_network' + min_version: 'beta' + vars: + target_name: 'custom-network' + instance_name: 'custom-network-target-vm' + - name: 'target_instance_with_security_policy' + primary_resource_id: 'default' + min_version: 'beta' + vars: + network_name: 'custom-default-network' + subnetname_name: 'custom-default-subnet' + instance_name: 'target-vm' + region_sec_policy: 'region-secpolicy' + target_name: 'target-instance' +parameters: + - name: 'zone' + type: ResourceRef + description: | + URL of the zone where the target instance resides. + required: false + immutable: true + default_from_api: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Zone' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'network' + type: String + description: + 'The URL of the network this target instance uses to forward traffic. If + not specified, the traffic will be forwarded to the network that the + default network interface belongs to.' + min_version: 'beta' + immutable: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + - name: 'instance' + type: ResourceRef + description: | + The Compute instance VM handling traffic for this target instance. + Accepts the instance self-link, relative path + (e.g. `projects/project/zones/zone/instances/instance`) or name. If + name is given, the zone will default to the given zone or + the provider-default zone and the project will default to the + provider-level project. + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_expand: 'templates/terraform/custom_expand/go/self_link_from_name.tmpl' + resource: 'Instance' + imports: 'selfLink' + - name: 'natPolicy' + type: Enum + description: | + NAT option controlling how IPs are NAT'ed to the instance. + Currently only NO_NAT (default value) is supported. + immutable: true + default_value: NO_NAT + enum_values: + - 'NO_NAT' + - name: 'securityPolicy' + type: String + description: | + The resource URL for the security policy associated with this target instance. + min_version: 'beta' + update_url: 'projects/{{project}}/zones/{{zone}}/targetInstances/{{name}}/setSecurityPolicy' + update_verb: 'POST' diff --git a/mmv1/products/compute/go_TargetSslProxy.yaml b/mmv1/products/compute/go_TargetSslProxy.yaml new file mode 100644 index 000000000000..1f294f259f28 --- /dev/null +++ b/mmv1/products/compute/go_TargetSslProxy.yaml @@ -0,0 +1,145 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetSslProxy' +kind: 'compute#targetSslProxy' +description: | + Represents a TargetSslProxy resource, which is used by one or more + global forwarding rule to route incoming SSL requests to a backend + service. +references: + guides: + 'Setting Up SSL proxy for Google Cloud Load Balancing': 'https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/' + api: 'https://cloud.google.com/compute/docs/reference/v1/targetSslProxies' +docs: +base_url: 'projects/{{project}}/global/targetSslProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'target_ssl_proxy_basic' + primary_resource_id: 'default' + vars: + target_ssl_proxy_name: 'test-proxy' + ssl_certificate_name: 'default-cert' + backend_service_name: 'backend-service' + health_check_name: 'health-check' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to + the backend. + update_url: 'projects/{{project}}/global/targetSslProxies/{{name}}/setProxyHeader' + update_verb: 'POST' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'backendService' + type: ResourceRef + description: | + A reference to the BackendService resource. + api_name: service + required: true + update_url: 'projects/{{project}}/global/targetSslProxies/{{name}}/setBackendService' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'sslCertificates' + type: Array + description: | + A list of SslCertificate resources that are used to authenticate + connections between users and the load balancer. At least one + SSL certificate must be specified. + update_url: 'projects/{{project}}/global/targetSslProxies/{{name}}/setSslCertificates' + update_verb: 'POST' + exactly_one_of: + - 'sslCertificates' + - 'certificateMap' + custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' + item_type: + name: 'sslCertificate' + type: ResourceRef + description: 'The SSL certificates used by this TargetSslProxy' + resource: 'SslCertificate' + imports: 'selfLink' + - name: 'certificateMap' + type: String + description: | + A reference to the CertificateMap resource uri that identifies a certificate map + associated with the given target proxy. This field can only be set for global target proxies. + Accepted format is `//certificatemanager.googleapis.com/projects/{project}/locations/{location}/certificateMaps/{resourceName}`. + update_url: 'projects/{{project}}/global/targetSslProxies/{{name}}/setCertificateMap' + update_verb: 'POST' + exactly_one_of: + - 'sslCertificates' + - 'certificateMap' + - name: 'sslPolicy' + type: ResourceRef + description: | + A reference to the SslPolicy resource that will be associated with + the TargetSslProxy resource. If not set, the TargetSslProxy + resource will not have any SSL policy configured. + update_url: 'projects/{{project}}/global/targetSslProxies/{{name}}/setSslPolicy' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'SslPolicy' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_TargetTcpProxy.yaml b/mmv1/products/compute/go_TargetTcpProxy.yaml new file mode 100644 index 000000000000..b75fff706297 --- /dev/null +++ b/mmv1/products/compute/go_TargetTcpProxy.yaml @@ -0,0 +1,110 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetTcpProxy' +kind: 'compute#targetTcpProxy' +description: | + Represents a TargetTcpProxy resource, which is used by one or more + global forwarding rule to route incoming TCP requests to a Backend + service. +references: + guides: + 'Setting Up TCP proxy for Google Cloud Load Balancing': 'https://cloud.google.com/compute/docs/load-balancing/tcp-ssl/tcp-proxy' + api: 'https://cloud.google.com/compute/docs/reference/v1/targetTcpProxies' +docs: +base_url: 'projects/{{project}}/global/targetTcpProxies' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'target_tcp_proxy_basic' + primary_resource_id: 'default' + vars: + target_tcp_proxy_name: 'test-proxy' + backend_service_name: 'backend-service' + health_check_name: 'health-check' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'proxyId' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and match + the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the + first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'proxyHeader' + type: Enum + description: | + Specifies the type of proxy header to append before sending data to + the backend. + update_url: 'projects/{{project}}/global/targetTcpProxies/{{name}}/setProxyHeader' + update_verb: 'POST' + default_value: NONE + enum_values: + - 'NONE' + - 'PROXY_V1' + - name: 'backendService' + type: ResourceRef + description: | + A reference to the BackendService resource. + api_name: service + required: true + update_url: 'projects/{{project}}/global/targetTcpProxies/{{name}}/setBackendService' + update_verb: 'POST' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'proxyBind' + type: Boolean + description: | + This field only applies when the forwarding rule that references + this target proxy has a loadBalancingScheme set to INTERNAL_SELF_MANAGED. + default_from_api: true diff --git a/mmv1/products/compute/go_UrlMap.yaml b/mmv1/products/compute/go_UrlMap.yaml new file mode 100644 index 000000000000..38680546b572 --- /dev/null +++ b/mmv1/products/compute/go_UrlMap.yaml @@ -0,0 +1,2530 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'UrlMap' +kind: 'compute#urlMap' +description: | + UrlMaps are used to route requests to a backend service based on rules + that you define for the host and path of an incoming URL. +references: + guides: + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/urlMaps' +docs: +base_url: 'projects/{{project}}/global/urlMaps' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'url_map_bucket_and_service' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + login_backend_service_name: 'login' + http_health_check_name: 'health-check' + backend_bucket_name: 'static-asset-backend-bucket' + storage_bucket_name: 'static-asset-bucket' + - name: 'url_map_traffic_director_route' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + home_backend_service_name: 'home' + health_check_name: 'health-check' + - name: 'url_map_traffic_director_route_partial' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + home_backend_service_name: 'home' + health_check_name: 'health-check' + - name: 'url_map_traffic_director_path' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + home_backend_service_name: 'home' + health_check_name: 'health-check' + - name: 'url_map_traffic_director_path_partial' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + home_backend_service_name: 'home' + health_check_name: 'health-check' + - name: 'url_map_header_based_routing' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + default_backend_service_name: 'default' + service_a_backend_service_name: 'service-a' + service_b_backend_service_name: 'service-b' + health_check_name: 'health-check' + - name: 'url_map_parameter_based_routing' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + default_backend_service_name: 'default' + service_a_backend_service_name: 'service-a' + service_b_backend_service_name: 'service-b' + health_check_name: 'health-check' + - name: 'external_http_lb_mig_backend' + primary_resource_id: 'default' + vars: + lb_backend_template: 'lb-backend-template' + lb_backend_example: 'lb-backend-example' + fw_allow_health_check: 'fw-allow-health-check' + lb_ipv4_1: 'lb-ipv4-1' + http_basic_check: 'http-basic-check' + web_backend_service: 'web-backend-service' + web_map_http: 'web-map-http' + http_lb_proxy: 'http-lb-proxy' + http_content_rule: 'http-content-rule' + ignore_read_extra: + - 'metadata' + - 'metadata_startup_script' + skip_test: true + skip_docs: true + - name: 'url_map_path_template_match' + primary_resource_id: 'urlmap' + vars: + url_map_name: 'urlmap' + cart_backend_service_name: 'cart-service' + user_backend_service_name: 'user-service' + http_health_check_name: 'health-check' + backend_bucket_name: 'static-asset-backend-bucket' + storage_bucket_name: 'static-asset-bucket' +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'defaultService' + type: ResourceRef + description: |- + The backend service or backend bucket to use when none of the given rules match. + exactly_one_of: + - 'default_service' + - 'default_url_redirect' + - 'default_route_action.0.weighted_backend_services' + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create + the resource. + - name: 'map_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'fingerprint' + type: Fingerprint + description: | + Fingerprint of this resource. A hash of the contents stored in this object. This + field is used in optimistic locking. + output: true + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. The headerAction specified here take effect after + headerAction specified under pathMatcher. + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + at_least_one_of: + - 'header_action.0.request_headers_to_add' + - 'header_action.0.request_headers_to_remove' + - 'header_action.0.response_headers_to_add' + - 'header_action.0.response_headers_to_remove' + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + at_least_one_of: + - 'header_action.0.request_headers_to_add' + - 'header_action.0.request_headers_to_remove' + - 'header_action.0.response_headers_to_add' + - 'header_action.0.response_headers_to_remove' + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + at_least_one_of: + - 'header_action.0.request_headers_to_add' + - 'header_action.0.request_headers_to_remove' + - 'header_action.0.response_headers_to_add' + - 'header_action.0.response_headers_to_remove' + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + at_least_one_of: + - 'header_action.0.request_headers_to_add' + - 'header_action.0.request_headers_to_remove' + - 'header_action.0.response_headers_to_add' + - 'header_action.0.response_headers_to_remove' + item_type: + type: String + - name: 'host_rule' + type: Array + description: | + The list of HostRules to use against the URL. + api_name: hostRules + is_set: true + item_type: + type: NestedObject + properties: + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create + the resource. + - name: 'hosts' + type: Array + description: | + The list of host patterns to match. They must be valid hostnames, except * will + match any string of ([a-z0-9-.]*). In that case, * must be the first character + and must be followed in the pattern by either - or .. + is_set: true + required: true + item_type: + type: String + - name: 'pathMatcher' + type: String + description: | + The name of the PathMatcher to use to match the path portion of the URL if the + hostRule matches the URL's host portion. + required: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is created. The + name must be 1-63 characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase + letter, and all following characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + immutable: true + - name: 'path_matcher' + type: Array + description: | + The list of named PathMatchers to use against the URL. + api_name: pathMatchers + item_type: + type: NestedObject + properties: + - name: 'defaultService' + type: ResourceRef + description: The backend service or backend bucket to use when none of the given paths match. + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'description' + type: String + description: | + An optional description of this resource. Provide this property when you create + the resource. + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. HeaderAction specified here are applied after the + matching HttpRouteRule HeaderAction and before the HeaderAction in the UrlMap + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'name' + type: String + description: | + The name to which this PathMatcher is referred by the HostRule. + required: true + - name: 'path_rule' + type: Array + description: | + The list of path rules. Use this list instead of routeRules when routing based + on simple path matching is all that's required. The order by which path rules + are specified does not matter. Matches are always done on the longest-path-first + basis. For example: a pathRule with a path /a/b/c/* will match before /a/b/* + irrespective of the order in which those paths appear in this list. Within a + given pathMatcher, only one of pathRules or routeRules must be set. + api_name: pathRules + item_type: + type: NestedObject + properties: + - name: 'service' + type: ResourceRef + description: The backend service or backend bucket to use if any of the given paths match. + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'paths' + type: Array + description: | + The list of path patterns to match. Each must start with / and the only place a + \* is allowed is at the end following a /. The string fed to the path matcher + does not include any text after the first ? or #, and those chars are not + allowed here. + is_set: true + required: true + item_type: + type: String + - name: 'routeAction' + type: NestedObject + description: | + In response to a matching path, the load balancer performs advanced routing + actions like URL rewrites, header transformations, etc. prior to forwarding the + request to the selected backend. If routeAction specifies any + weightedBackendServices, service must not be set. Conversely if service is set, + routeAction cannot contain any weightedBackendServices. Only one of routeAction + or urlRedirect must be set. + properties: + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see W3C + Recommendation for Cross Origin Resource Sharing + properties: + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the + actual request can include user credentials. This translates to the Access- + Control-Allow-Credentials header. Defaults to false. + default_value: false + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regular expression patterns that match allowed origins. For + regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. An + origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'disabled' + type: Boolean + description: | + If true, specifies the CORS policy is disabled. + required: true + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long the results of a preflight request can be cached. This + translates to the content for the Access-Control-Max-Age header. + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the + resiliency of clients to backend service failure. As part of fault injection, + when clients send requests to a backend service, delays can be introduced by + Loadbalancer on a percentage of requests before sending those request to the + backend service. Similarly requests from clients can be aborted by the + Loadbalancer for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + properties: + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault + injection. + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. The value must be between 200 + and 599 inclusive. + required: true + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be + aborted as part of fault injection. The value must be between 0.0 and 100.0 + inclusive. + required: true + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + required: true + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will + be introduced as part of fault injection. The value must be between 0.0 and + 100.0 inclusive. + required: true + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are + shadowed to a separate mirrored backend service. Loadbalancer does not wait for + responses from the shadow service. Prior to sending traffic to the shadow + service, the host / authority header is suffixed with -shadow. + properties: + - name: 'backendService' + type: ResourceRef + description: | + The BackendService resource being mirrored to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + properties: + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'retryConditions' + type: Array + description: | + Specifies one or more conditions when this retry rule applies. Valid values are: + + * 5xx: Loadbalancer will attempt a retry if the backend service responds with + any 5xx response code, or if the backend service does not respond at all, + item_type: + type: String + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time + the request is has been fully processed (i.e. end-of-stream) up until the + response has been completely processed. Timeout includes all retries. If not + specified, the default value is 15 seconds. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, prior to forwarding the request to + the matched service + properties: + - name: 'hostRewrite' + type: String + description: | + Prior to forwarding the request to the selected service, the request's host + header is replaced with contents of hostRewrite. The value must be between 1 and + 255 characters. + - name: 'pathPrefixRewrite' + type: String + description: | + Prior to forwarding the request to the selected backend service, the matching + portion of the request's path is replaced by pathPrefixRewrite. The value must + be between 1 and 1024 characters. + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match + occurs. The weights determine the fraction of traffic that flows to their + corresponding backend service. If all traffic needs to go to a single backend + service, there must be one weightedBackendService with weight set to a non 0 + number. Once a backendService is identified and before forwarding the request to + the backend service, advanced routing actions like Url rewrites and header + transformations are applied depending on additional settings specified in this + HttpRouteAction. + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The default BackendService resource. Before + forwarding the request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. headerAction specified here take effect before + headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to backendService, computed as weight / + (sum of all weightedBackendService weights in routeAction) . The selection of a + backend service is determined only for new traffic. Once a user's request has + been directed to a backendService, subsequent requests will be sent to the same + backendService as determined by the BackendService's session affinity policy. + The value must be between 0 and 1000 + required: true + - name: 'urlRedirect' + type: NestedObject + description: | + When a path pattern is matched, the request is redirected to a URL specified + by urlRedirect. If urlRedirect is specified, service or routeAction must not + be set. + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one + that was supplied in the request. The value must be between 1 and 255 + characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. + If set to false, the URL scheme of the redirected request will remain the + same as that of the request. This must only be set for UrlMaps used in + TargetHttpProxys. Setting this true for TargetHttpsProxy is not + permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one + that was supplied in the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone or neither. If neither is + supplied, the path of the original request will be used for the redirect. + The value must be between 1 and 1024 characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the + HttpRouteRuleMatch, retaining the remaining portion of the URL before + redirecting the request. prefixRedirect cannot be supplied together with + pathRedirect. Supply one alone or neither. If neither is supplied, the + path of the original request will be used for the redirect. The value + must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is + removed prior to redirecting the request. If set to false, the query + portion of the original URL is retained. + This field is required to ensure an empty block is not set. The normal default value is false. + required: true + - name: 'routeRules' + type: Array + description: | + The list of ordered HTTP route rules. Use this list instead of pathRules when + advanced route matching and routing actions are desired. The order of specifying + routeRules matters: the first rule that matches will cause its specified routing + action to take effect. Within a given pathMatcher, only one of pathRules or + routeRules must be set. routeRules are not supported in UrlMaps intended for + External load balancers. + item_type: + type: NestedObject + properties: + - name: 'priority' + type: Integer + description: | + For routeRules within a given pathMatcher, priority determines the order + in which load balancer will interpret routeRules. RouteRules are evaluated + in order of priority, from the lowest to highest number. The priority of + a rule decreases as its number increases (1, 2, 3, N+1). The first rule + that matches the request is applied. + + You cannot configure two or more routeRules with the same priority. + Priority for each rule must be set to a number between 0 and + 2147483647 inclusive. + + Priority numbers can have gaps, which enable you to add or remove rules + in the future without affecting the rest of the rules. For example, + 1, 2, 3, 4, 5, 9, 12, 16 is a valid series of priority numbers to which + you could add rules numbered from 6 to 8, 10 to 11, and 13 to 15 in the + future without any impact on existing rules. + required: true + - name: 'service' + type: ResourceRef + description: | + The backend service resource to which traffic is + directed if this rule is matched. If routeAction is additionally specified, + advanced routing actions like URL Rewrites, etc. take effect prior to sending + the request to the backend. However, if service is specified, routeAction cannot + contain any weightedBackendService s. Conversely, if routeAction specifies any + weightedBackendServices, service must not be specified. Only one of urlRedirect, + service or routeAction.weightedBackendService must be set. + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. The headerAction specified here are applied before + the matching pathMatchers[].headerAction and after pathMatchers[].routeRules[].r + outeAction.weightedBackendService.backendServiceWeightAction[].headerAction + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'matchRules' + type: Array + description: | + The rules for determining a match. + item_type: + type: NestedObject + properties: + - name: 'fullPathMatch' + type: String + description: | + For satisfying the matchRule condition, the path of the request must exactly + match the value specified in fullPathMatch after removing any query parameters + and anchor that may be part of the original URL. FullPathMatch must be between 1 + and 1024 characters. Only one of prefixMatch, fullPathMatch or regexMatch must + be specified. + - name: 'headerMatches' + type: Array + description: | + Specifies a list of header match criteria, all of which must match corresponding + headers in the request. + item_type: + type: NestedObject + properties: + - name: 'exactMatch' + type: String + description: | + The value should exactly match contents of exactMatch. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. + - name: 'headerName' + type: String + description: | + The name of the HTTP header to match. For matching against the HTTP request's + authority, use a headerMatch with the header name ":authority". For matching a + request's method, use the headerName ":method". + required: true + - name: 'invertMatch' + type: Boolean + description: | + If set to false, the headerMatch is considered a match if the match criteria + above are met. If set to true, the headerMatch is considered a match if the + match criteria above are NOT met. Defaults to false. + default_value: false + - name: 'prefixMatch' + type: String + description: | + The value of the header must start with the contents of prefixMatch. Only one of + exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + - name: 'presentMatch' + type: Boolean + description: | + A header with the contents of headerName must exist. The match takes place + whether or not the request's header has a value or not. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch must be set. + - name: 'rangeMatch' + type: NestedObject + description: | + The header value must be an integer and its value must be in the range specified + in rangeMatch. If the header does not contain an integer, number or is empty, + the match fails. For example for a range [-5, 0] - -3 will match. - 0 will + not match. - 0.25 will not match. - -3someString will not match. Only one of + exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + properties: + - name: 'rangeEnd' + type: Integer + description: | + The end of the range (exclusive). + required: true + - name: 'rangeStart' + type: Integer + description: | + The start of the range (inclusive). + required: true + - name: 'regexMatch' + type: String + description: | + The value of the header must match the regular expression specified in + regexMatch. For regular expression grammar, please see: + en.cppreference.com/w/cpp/regex/ecmascript For matching against a port + specified in the HTTP request, use a headerMatch with headerName set to PORT and + a regular expression that satisfies the RFC2616 Host header's port specifier. + Only one of exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or + rangeMatch must be set. + - name: 'suffixMatch' + type: String + description: | + The value of the header must end with the contents of suffixMatch. Only one of + exactMatch, prefixMatch, suffixMatch, regexMatch, presentMatch or rangeMatch + must be set. + - name: 'ignoreCase' + type: Boolean + description: | + Specifies that prefixMatch and fullPathMatch matches are case sensitive. + Defaults to false. + default_value: false + - name: 'metadataFilters' + type: Array + description: | + Opaque filter criteria used by Loadbalancer to restrict routing configuration to + a limited set xDS compliant clients. In their xDS requests to Loadbalancer, xDS + clients present node metadata. If a match takes place, the relevant routing + configuration is made available to those proxies. For each metadataFilter in + this list, if its filterMatchCriteria is set to MATCH_ANY, at least one of the + filterLabels must match the corresponding label provided in the metadata. If its + filterMatchCriteria is set to MATCH_ALL, then all of its filterLabels must match + with corresponding labels in the provided metadata. metadataFilters specified + here can be overrides those specified in ForwardingRule that refers to this + UrlMap. metadataFilters only applies to Loadbalancers that have their + loadBalancingScheme set to INTERNAL_SELF_MANAGED. + item_type: + type: NestedObject + properties: + - name: 'filterLabels' + type: Array + description: | + The list of label value pairs that must match labels in the provided metadata + based on filterMatchCriteria This list must not be empty and can have at the + most 64 entries. + required: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Name of metadata label. The name can have a maximum length of 1024 characters + and must be at least 1 character long. + required: true + - name: 'value' + type: String + description: | + The value of the label must match the specified value. value can have a maximum + length of 1024 characters. + required: true + min_size: 1 + max_size: 64 + - name: 'filterMatchCriteria' + type: Enum + description: | + Specifies how individual filterLabel matches within the list of filterLabels + contribute towards the overall metadataFilter match. Supported values are: + - MATCH_ANY: At least one of the filterLabels must have a matching label in the + provided metadata. + - MATCH_ALL: All filterLabels must have matching labels in + the provided metadata. + required: true + enum_values: + - 'MATCH_ALL' + - 'MATCH_ANY' + - name: 'prefixMatch' + type: String + description: | + For satisfying the matchRule condition, the request's path must begin with the + specified prefixMatch. prefixMatch must begin with a /. The value must be + between 1 and 1024 characters. Only one of prefixMatch, fullPathMatch or + regexMatch must be specified. + - name: 'queryParameterMatches' + type: Array + description: | + Specifies a list of query parameter match criteria, all of which must match + corresponding query parameters in the request. + item_type: + type: NestedObject + properties: + - name: 'exactMatch' + type: String + description: | + The queryParameterMatch matches if the value of the parameter exactly matches + the contents of exactMatch. Only one of presentMatch, exactMatch and regexMatch + must be set. + - name: 'name' + type: String + description: | + The name of the query parameter to match. The query parameter must exist in the + request, in the absence of which the request match fails. + required: true + - name: 'presentMatch' + type: Boolean + description: | + Specifies that the queryParameterMatch matches if the request contains the query + parameter, irrespective of whether the parameter has a value or not. Only one of + presentMatch, exactMatch and regexMatch must be set. + - name: 'regexMatch' + type: String + description: | + The queryParameterMatch matches if the value of the parameter matches the + regular expression specified by regexMatch. For the regular expression grammar, + please see en.cppreference.com/w/cpp/regex/ecmascript Only one of presentMatch, + exactMatch and regexMatch must be set. + - name: 'regexMatch' + type: String + description: | + For satisfying the matchRule condition, the path of the request must satisfy the + regular expression specified in regexMatch after removing any query parameters + and anchor supplied with the original URL. For regular expression grammar please + see en.cppreference.com/w/cpp/regex/ecmascript Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + - name: 'pathTemplateMatch' + type: String + description: | + For satisfying the matchRule condition, the path of the request + must match the wildcard pattern specified in pathTemplateMatch + after removing any query parameters and anchor that may be part + of the original URL. + + pathTemplateMatch must be between 1 and 255 characters + (inclusive). The pattern specified by pathTemplateMatch may + have at most 5 wildcard operators and at most 5 variable + captures in total. + - name: 'routeAction' + type: NestedObject + description: | + In response to a matching matchRule, the load balancer performs advanced routing + actions like URL rewrites, header transformations, etc. prior to forwarding the + request to the selected backend. If routeAction specifies any + weightedBackendServices, service must not be set. Conversely if service is set, + routeAction cannot contain any weightedBackendServices. Only one of routeAction + or urlRedirect must be set. + properties: + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see W3C + Recommendation for Cross Origin Resource Sharing + properties: + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the + actual request can include user credentials. This translates to the Access- + Control-Allow-Credentials header. Defaults to false. + default_value: false + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regular expression patterns that match allowed origins. For + regular expression grammar please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. An + origin is allowed if it matches either allow_origins or allow_origin_regex. + item_type: + type: String + - name: 'disabled' + type: Boolean + description: | + If true, specifies the CORS policy is disabled. + which indicates that the CORS policy is in effect. Defaults to false. + default_value: false + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long the results of a preflight request can be cached. This + translates to the content for the Access-Control-Max-Age header. + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the + resiliency of clients to backend service failure. As part of fault injection, + when clients send requests to a backend service, delays can be introduced by + Loadbalancer on a percentage of requests before sending those request to the + backend service. Similarly requests from clients can be aborted by the + Loadbalancer for a percentage of requests. timeout and retry_policy will be + ignored by clients that are configured with a fault_injection_policy. + properties: + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault + injection. + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. The value must be between 200 + and 599 inclusive. + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be + aborted as part of fault injection. The value must be between 0.0 and 100.0 + inclusive. + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault + injection, before being sent to a backend service. + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will + be introduced as part of fault injection. The value must be between 0.0 and + 100.0 inclusive. + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are + shadowed to a separate mirrored backend service. Loadbalancer does not wait for + responses from the shadow service. Prior to sending traffic to the shadow + service, the host / authority header is suffixed with -shadow. + properties: + - name: 'backendService' + type: ResourceRef + description: | + The BackendService resource being mirrored to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + properties: + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. + required: true + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction + is not set, will use the largest timeout among all backend services associated with the route. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'retryConditions' + type: Array + description: | + Specfies one or more conditions when this retry rule applies. Valid values are: + + * 5xx: Loadbalancer will attempt a retry if the backend service responds with + any 5xx response code, or if the backend service does not respond at all, + item_type: + type: String + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time + the request is has been fully processed (i.e. end-of-stream) up until the + response has been completely processed. Timeout includes all retries. If not + specified, the default value is 15 seconds. + properties: + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations + less than one second are represented with a 0 `seconds` field and a positive + `nanos` field. Must be from 0 to 999,999,999 inclusive. + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 + inclusive. + required: true + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, prior to forwarding the request to + the matched service + properties: + - name: 'hostRewrite' + type: String + description: | + Prior to forwarding the request to the selected service, the request's host + header is replaced with contents of hostRewrite. The value must be between 1 and + 255 characters. + - name: 'pathPrefixRewrite' + type: String + description: | + Prior to forwarding the request to the selected backend service, the matching + portion of the request's path is replaced by pathPrefixRewrite. The value must + be between 1 and 1024 characters. + - name: 'pathTemplateRewrite' + type: String + description: | + Prior to forwarding the request to the selected origin, if the + request matched a pathTemplateMatch, the matching portion of the + request's path is replaced re-written using the pattern specified + by pathTemplateRewrite. + + pathTemplateRewrite must be between 1 and 255 characters + (inclusive), must start with a '/', and must only use variables + captured by the route's pathTemplate matchers. + + pathTemplateRewrite may only be used when all of a route's + MatchRules specify pathTemplate. + + Only one of pathPrefixRewrite and pathTemplateRewrite may be + specified. + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match + occurs. The weights determine the fraction of traffic that flows to their + corresponding backend service. If all traffic needs to go to a single backend + service, there must be one weightedBackendService with weight set to a non 0 + number. Once a backendService is identified and before forwarding the request to + the backend service, advanced routing actions like Url rewrites and header + transformations are applied depending on additional settings specified in this + HttpRouteAction. + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The default BackendService resource. Before + forwarding the request to backendService, the loadbalancer applies any relevant + headerActions specified as part of this backendServiceWeight. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. headerAction specified here take effect before + headerAction in the enclosing HttpRouteRule, PathMatcher and UrlMap. + properties: + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the + backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request + prior to forwarding the request to the backendService. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header. + required: true + - name: 'headerValue' + type: String + description: | + The value of the header to add. + required: true + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the + header. If true, headerValue is set for the header, discarding any values that + were set for that header. + required: true + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response + prior to sending the response back to the client. + item_type: + type: String + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to backendService, computed as weight / + (sum of all weightedBackendService weights in routeAction) . The selection of a + backend service is determined only for new traffic. Once a user's request has + been directed to a backendService, subsequent requests will be sent to the same + backendService as determined by the BackendService's session affinity policy. + The value must be between 0 and 1000 + required: true + - name: 'urlRedirect' + type: NestedObject + description: | + When this rule is matched, the request is redirected to a URL specified by + urlRedirect. If urlRedirect is specified, service or routeAction must not be + set. + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one that was + supplied in the request. The value must be between 1 and 255 characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. If set + to false, the URL scheme of the redirected request will remain the same as that + of the request. This must only be set for UrlMaps used in TargetHttpProxys. + Setting this true for TargetHttpsProxy is not permitted. Defaults to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one that was + supplied in the request. Only one of pathRedirect or prefixRedirect must be + specified. The value must be between 1 and 1024 characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is removed + prior to redirecting the request. If set to false, the query portion of the + original URL is retained. Defaults to false. + default_value: false + - name: 'defaultUrlRedirect' + type: NestedObject + description: | + When none of the specified hostRules match, the request is redirected to a URL specified + by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or + defaultRouteAction must not be set. + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one that was + supplied in the request. The value must be between 1 and 255 characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. If set to + false, the URL scheme of the redirected request will remain the same as that of the + request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this + true for TargetHttpsProxy is not permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one that was + supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be between 1 and 1024 + characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request will be used for + the redirect. The value must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is removed prior + to redirecting the request. If set to false, the query portion of the original URL is + retained. + This field is required to ensure an empty block is not set. The normal default value is false. + required: true + - name: 'defaultRouteAction' + type: NestedObject + description: | + defaultRouteAction takes effect when none of the pathRules or routeRules match. The load balancer performs + advanced routing actions like URL rewrites, header transformations, etc. prior to forwarding the request + to the selected backend. If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. + Conversely if defaultService is set, defaultRouteAction cannot contain any weightedBackendServices. + + Only one of defaultRouteAction or defaultUrlRedirect must be set. + properties: + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match occurs. + The weights determine the fraction of traffic that flows to their corresponding backend service. + If all traffic needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. + + Once a backendService is identified and before forwarding the request to the backend service, + advanced routing actions like Url rewrites and header transformations are applied depending on + additional settings specified in this HttpRouteAction. + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The full or partial URL to the default BackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant headerActions + specified as part of this backendServiceWeight. + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to backendService, computed as + weight / (sum of all weightedBackendService weights in routeAction) . + + The selection of a backend service is determined only for new traffic. Once a user's request + has been directed to a backendService, subsequent requests will be sent to the same backendService + as determined by the BackendService's session affinity policy. + + The value must be between 0 and 1000 + validation: + function: 'validation.IntBetween(0, 1000)' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. + + headerAction specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + properties: + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request prior to + forwarding the request to the backendService. + item_type: + type: String + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header to add. + - name: 'headerValue' + type: String + description: | + The value of the header to add. + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the header. + If true, headerValue is set for the header, discarding any values that were set for that header. + default_value: false + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response prior to sending the + response back to the client. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header to add. + - name: 'headerValue' + type: String + description: | + The value of the header to add. + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the header. + If true, headerValue is set for the header, discarding any values that were set for that header. + default_value: false + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, prior to forwarding the request to the matched service. + properties: + - name: 'pathPrefixRewrite' + type: String + description: | + Prior to forwarding the request to the selected backend service, the matching portion of the + request's path is replaced by pathPrefixRewrite. + + The value must be between 1 and 1024 characters. + - name: 'hostRewrite' + type: String + description: | + Prior to forwarding the request to the selected service, the request's host header is replaced + with contents of hostRewrite. + + The value must be between 1 and 255 characters. + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time the request has been + fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries. + + If not specified, will use the largest timeout among all backend services associated with the route. + default_from_api: true + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + properties: + - name: 'retryConditions' + type: Array + description: | + Specfies one or more conditions when this retry rule applies. Valid values are: + + * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: disconnects, reset, read timeout, + * connection failure, and refused streams. + * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. + * connect-failure: Loadbalancer will retry on failures connecting to backend services, + for example due to connection timeouts. + * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. + Currently the only retriable error supported is 409. + * refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry. + * cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled + * deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded + * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable + item_type: + type: String + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. + validation: + function: 'validation.IntAtLeast(1)' + default_value: 1 + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + + If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, + will use the largest timeout among all backend services associated with the route. + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are + represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, + the host / authority header is suffixed with -shadow. + properties: + - name: 'backendService' + type: ResourceRef + description: | + The full or partial URL to the BackendService resource being mirrored to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see + [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + properties: + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. + An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regular expression patterns that match allowed origins. For regular expression grammar + please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + item_type: + type: String + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + item_type: + type: String + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long results of a preflight request can be cached in seconds. + This translates to the Access-Control-Max-Age header. + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the actual request can include user credentials. + This translates to the Access-Control-Allow-Credentials header. + default_value: false + - name: 'disabled' + type: Boolean + description: | + If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. + default_value: false + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a + percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted + by the Loadbalancer for a percentage of requests. + + timeout and retryPolicy will be ignored by clients that are configured with a faultInjectionPolicy. + properties: + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are + represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + validation: + function: 'validation.FloatBetween(0, 100)' + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault injection. + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. + The value must be between 200 and 599 inclusive. + validation: + function: 'validation.IntBetween(200, 599)' + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + validation: + function: 'validation.FloatBetween(0, 100)' + - name: 'test' + type: Array + description: | + The list of expected URL mapping tests. Request to update this UrlMap will + succeed only if all of the test cases pass. You can specify a maximum of 100 + tests per UrlMap. + api_name: tests + item_type: + type: NestedObject + properties: + - name: 'description' + type: String + description: | + Description of this test case. + - name: 'host' + type: String + description: | + Host portion of the URL. + required: true + - name: 'path' + type: String + description: | + Path portion of the URL. + required: true + - name: 'service' + type: ResourceRef + description: The backend service or backend bucket link that should be matched by this test. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'defaultUrlRedirect' + type: NestedObject + description: | + When none of the specified hostRules match, the request is redirected to a URL specified + by defaultUrlRedirect. If defaultUrlRedirect is specified, defaultService or + defaultRouteAction must not be set. + conflicts: + - default_route_action + exactly_one_of: + - 'default_service' + - 'default_url_redirect' + - 'default_route_action.0.weighted_backend_services' + properties: + - name: 'hostRedirect' + type: String + description: | + The host that will be used in the redirect response instead of the one that was + supplied in the request. The value must be between 1 and 255 characters. + - name: 'httpsRedirect' + type: Boolean + description: | + If set to true, the URL scheme in the redirected request is set to https. If set to + false, the URL scheme of the redirected request will remain the same as that of the + request. This must only be set for UrlMaps used in TargetHttpProxys. Setting this + true for TargetHttpsProxy is not permitted. The default is set to false. + default_value: false + - name: 'pathRedirect' + type: String + description: | + The path that will be used in the redirect response instead of the one that was + supplied in the request. pathRedirect cannot be supplied together with + prefixRedirect. Supply one alone or neither. If neither is supplied, the path of the + original request will be used for the redirect. The value must be between 1 and 1024 + characters. + - name: 'prefixRedirect' + type: String + description: | + The prefix that replaces the prefixMatch specified in the HttpRouteRuleMatch, + retaining the remaining portion of the URL before redirecting the request. + prefixRedirect cannot be supplied together with pathRedirect. Supply one alone or + neither. If neither is supplied, the path of the original request will be used for + the redirect. The value must be between 1 and 1024 characters. + - name: 'redirectResponseCode' + type: Enum + description: | + The HTTP Status code to use for this RedirectAction. Supported values are: + + * MOVED_PERMANENTLY_DEFAULT, which is the default value and corresponds to 301. + + * FOUND, which corresponds to 302. + + * SEE_OTHER which corresponds to 303. + + * TEMPORARY_REDIRECT, which corresponds to 307. In this case, the request method + will be retained. + + * PERMANENT_REDIRECT, which corresponds to 308. In this case, + the request method will be retained. + enum_values: + - 'FOUND' + - 'MOVED_PERMANENTLY_DEFAULT' + - 'PERMANENT_REDIRECT' + - 'SEE_OTHER' + - 'TEMPORARY_REDIRECT' + skip_docs_values: true + - name: 'stripQuery' + type: Boolean + description: | + If set to true, any accompanying query portion of the original URL is removed prior + to redirecting the request. If set to false, the query portion of the original URL is + retained. The default is set to false. + This field is required to ensure an empty block is not set. The normal default value is false. + required: true + - name: 'defaultRouteAction' + type: NestedObject + description: | + defaultRouteAction takes effect when none of the hostRules match. The load balancer performs advanced routing actions + like URL rewrites, header transformations, etc. prior to forwarding the request to the selected backend. + If defaultRouteAction specifies any weightedBackendServices, defaultService must not be set. Conversely if defaultService + is set, defaultRouteAction cannot contain any weightedBackendServices. + + Only one of defaultRouteAction or defaultUrlRedirect must be set. + conflicts: + - default_url_redirect + properties: + - name: 'weightedBackendServices' + type: Array + description: | + A list of weighted backend services to send traffic to when a route match occurs. + The weights determine the fraction of traffic that flows to their corresponding backend service. + If all traffic needs to go to a single backend service, there must be one weightedBackendService + with weight set to a non 0 number. + + Once a backendService is identified and before forwarding the request to the backend service, + advanced routing actions like Url rewrites and header transformations are applied depending on + additional settings specified in this HttpRouteAction. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + exactly_one_of: + - 'default_service' + - 'default_url_redirect' + - 'default_route_action.0.weighted_backend_services' + item_type: + type: NestedObject + properties: + - name: 'backendService' + type: ResourceRef + description: | + The full or partial URL to the default BackendService resource. Before forwarding the + request to backendService, the loadbalancer applies any relevant headerActions + specified as part of this backendServiceWeight. + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'weight' + type: Integer + description: | + Specifies the fraction of traffic sent to backendService, computed as + weight / (sum of all weightedBackendService weights in routeAction) . + + The selection of a backend service is determined only for new traffic. Once a user's request + has been directed to a backendService, subsequent requests will be sent to the same backendService + as determined by the BackendService's session affinity policy. + + The value must be between 0 and 1000 + validation: + function: 'validation.IntBetween(0, 1000)' + - name: 'headerAction' + type: NestedObject + description: | + Specifies changes to request and response headers that need to take effect for + the selected backendService. + + headerAction specified here take effect before headerAction in the enclosing + HttpRouteRule, PathMatcher and UrlMap. + properties: + - name: 'requestHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the request prior to + forwarding the request to the backendService. + item_type: + type: String + - name: 'requestHeadersToAdd' + type: Array + description: | + Headers to add to a matching request prior to forwarding the request to the backendService. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header to add. + - name: 'headerValue' + type: String + description: | + The value of the header to add. + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the header. + If true, headerValue is set for the header, discarding any values that were set for that header. + default_value: false + - name: 'responseHeadersToRemove' + type: Array + description: | + A list of header names for headers that need to be removed from the response prior to sending the + response back to the client. + item_type: + type: String + - name: 'responseHeadersToAdd' + type: Array + description: | + Headers to add the response prior to sending the response back to the client. + item_type: + type: NestedObject + properties: + - name: 'headerName' + type: String + description: | + The name of the header to add. + - name: 'headerValue' + type: String + description: | + The value of the header to add. + - name: 'replace' + type: Boolean + description: | + If false, headerValue is appended to any values that already exist for the header. + If true, headerValue is set for the header, discarding any values that were set for that header. + default_value: false + - name: 'urlRewrite' + type: NestedObject + description: | + The spec to modify the URL of the request, prior to forwarding the request to the matched service. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'pathPrefixRewrite' + type: String + description: | + Prior to forwarding the request to the selected backend service, the matching portion of the + request's path is replaced by pathPrefixRewrite. + + The value must be between 1 and 1024 characters. + at_least_one_of: + - 'default_route_action.0.url_rewrite.0.path_prefix_rewrite' + - 'default_route_action.0.url_rewrite.0.host_rewrite' + - name: 'hostRewrite' + type: String + description: | + Prior to forwarding the request to the selected service, the request's host header is replaced + with contents of hostRewrite. + + The value must be between 1 and 255 characters. + at_least_one_of: + - 'default_route_action.0.url_rewrite.0.path_prefix_rewrite' + - 'default_route_action.0.url_rewrite.0.host_rewrite' + - name: 'timeout' + type: NestedObject + description: | + Specifies the timeout for the selected route. Timeout is computed from the time the request has been + fully processed (i.e. end-of-stream) up until the response has been completely processed. Timeout includes all retries. + + If not specified, will use the largest timeout among all backend services associated with the route. + default_from_api: true + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + at_least_one_of: + - 'default_route_action.0.timeout.0.seconds' + - 'default_route_action.0.timeout.0.nanos' + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are represented + with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + at_least_one_of: + - 'default_route_action.0.timeout.0.seconds' + - 'default_route_action.0.timeout.0.nanos' + - name: 'retryPolicy' + type: NestedObject + description: | + Specifies the retry policy associated with this route. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'retryConditions' + type: Array + description: | + Specfies one or more conditions when this retry rule applies. Valid values are: + + * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, + or if the backend service does not respond at all, example: disconnects, reset, read timeout, + * connection failure, and refused streams. + * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. + * connect-failure: Loadbalancer will retry on failures connecting to backend services, + for example due to connection timeouts. + * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. + Currently the only retriable error supported is 409. + * refused-stream:Loadbalancer will retry if the backend service resets the stream with a REFUSED_STREAM error code. + This reset type indicates that it is safe to retry. + * cancelled: Loadbalancer will retry if the gRPC status code in the response header is set to cancelled + * deadline-exceeded: Loadbalancer will retry if the gRPC status code in the response header is set to deadline-exceeded + * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in the response header is set to unavailable + at_least_one_of: + - 'default_route_action.0.retry_policy.0.retry_conditions' + - 'default_route_action.0.retry_policy.0.num_retries' + - 'default_route_action.0.retry_policy.0.per_try_timeout' + item_type: + type: String + - name: 'numRetries' + type: Integer + description: | + Specifies the allowed number retries. This number must be > 0. If not specified, defaults to 1. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.retry_conditions' + - 'default_route_action.0.retry_policy.0.num_retries' + - 'default_route_action.0.retry_policy.0.per_try_timeout' + validation: + function: 'validation.IntAtLeast(1)' + default_value: 1 + - name: 'perTryTimeout' + type: NestedObject + description: | + Specifies a non-zero timeout per retry attempt. + + If not specified, will use the timeout set in HttpRouteAction. If timeout in HttpRouteAction is not set, + will use the largest timeout among all backend services associated with the route. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.retry_conditions' + - 'default_route_action.0.retry_policy.0.num_retries' + - 'default_route_action.0.retry_policy.0.per_try_timeout' + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + at_least_one_of: + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.seconds' + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.nanos' + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are + represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + at_least_one_of: + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.seconds' + - 'default_route_action.0.retry_policy.0.per_try_timeout.0.nanos' + - name: 'requestMirrorPolicy' + type: NestedObject + description: | + Specifies the policy on how requests intended for the route's backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow service. Prior to sending traffic to the shadow service, + the host / authority header is suffixed with -shadow. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'backendService' + type: ResourceRef + description: | + The full or partial URL to the BackendService resource being mirrored to. + required: true + custom_expand: 'templates/terraform/custom_expand/go/reference_to_backend.tmpl' + resource: 'BackendService' + imports: 'selfLink' + - name: 'corsPolicy' + type: NestedObject + description: | + The specification for allowing client side cross-origin requests. Please see + [W3C Recommendation for Cross Origin Resource Sharing](https://www.w3.org/TR/cors/) + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'allowOrigins' + type: Array + description: | + Specifies the list of origins that will be allowed to do CORS requests. + An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'allowOriginRegexes' + type: Array + description: | + Specifies the regular expression patterns that match allowed origins. For regular expression grammar + please see en.cppreference.com/w/cpp/regex/ecmascript + An origin is allowed if it matches either an item in allowOrigins or an item in allowOriginRegexes. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'allowMethods' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Methods header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'allowHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Allow-Headers header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'exposeHeaders' + type: Array + description: | + Specifies the content for the Access-Control-Expose-Headers header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + item_type: + type: String + - name: 'maxAge' + type: Integer + description: | + Specifies how long results of a preflight request can be cached in seconds. + This translates to the Access-Control-Max-Age header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + - name: 'allowCredentials' + type: Boolean + description: | + In response to a preflight request, setting this to true indicates that the actual request can include user credentials. + This translates to the Access-Control-Allow-Credentials header. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + default_value: false + - name: 'disabled' + type: Boolean + description: | + If true, specifies the CORS policy is disabled. The default value is false, which indicates that the CORS policy is in effect. + at_least_one_of: + - 'default_route_action.0.cors_policy.0.allow_origins' + - 'default_route_action.0.cors_policy.0.allow_origin_regexes' + - 'default_route_action.0.cors_policy.0.allow_methods' + - 'default_route_action.0.cors_policy.0.allow_headers' + - 'default_route_action.0.cors_policy.0.expose_headers' + - 'default_route_action.0.cors_policy.0.max_age' + - 'default_route_action.0.cors_policy.0.allow_credentials' + - 'default_route_action.0.cors_policy.0.disabled' + default_value: false + - name: 'faultInjectionPolicy' + type: NestedObject + description: | + The specification for fault injection introduced into traffic to test the resiliency of clients to backend service failure. + As part of fault injection, when clients send requests to a backend service, delays can be introduced by Loadbalancer on a + percentage of requests before sending those request to the backend service. Similarly requests from clients can be aborted + by the Loadbalancer for a percentage of requests. + + timeout and retryPolicy will be ignored by clients that are configured with a faultInjectionPolicy. + at_least_one_of: + - 'default_route_action.0.weighted_backend_services' + - 'default_route_action.0.url_rewrite' + - 'default_route_action.0.timeout' + - 'default_route_action.0.retry_policy' + - 'default_route_action.0.request_mirror_policy' + - 'default_route_action.0.cors_policy' + - 'default_route_action.0.fault_injection_policy' + properties: + - name: 'delay' + type: NestedObject + description: | + The specification for how client requests are delayed as part of fault injection, before being sent to a backend service. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay' + - 'default_route_action.0.fault_injection_policy.0.abort' + properties: + - name: 'fixedDelay' + type: NestedObject + description: | + Specifies the value of the fixed delay interval. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay' + - 'default_route_action.0.fault_injection_policy.0.delay.0.percentage' + properties: + - name: 'seconds' + type: String + description: | + Span of time at a resolution of a second. Must be from 0 to 315,576,000,000 inclusive. + Note: these bounds are computed from: 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.seconds' + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.nanos' + - name: 'nanos' + type: Integer + description: | + Span of time that's a fraction of a second at nanosecond resolution. Durations less than one second are + represented with a 0 seconds field and a positive nanos field. Must be from 0 to 999,999,999 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.seconds' + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay.0.nanos' + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) on which delay will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay.0.fixed_delay' + - 'default_route_action.0.fault_injection_policy.0.delay.0.percentage' + validation: + function: 'validation.FloatBetween(0, 100)' + - name: 'abort' + type: NestedObject + description: | + The specification for how client requests are aborted as part of fault injection. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.delay' + - 'default_route_action.0.fault_injection_policy.0.abort' + properties: + - name: 'httpStatus' + type: Integer + description: | + The HTTP status code used to abort the request. + The value must be between 200 and 599 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.abort.0.http_status' + - 'default_route_action.0.fault_injection_policy.0.abort.0.percentage' + validation: + function: 'validation.IntBetween(200, 599)' + - name: 'percentage' + type: Double + description: | + The percentage of traffic (connections/operations/requests) which will be aborted as part of fault injection. + The value must be between 0.0 and 100.0 inclusive. + at_least_one_of: + - 'default_route_action.0.fault_injection_policy.0.abort.0.http_status' + - 'default_route_action.0.fault_injection_policy.0.abort.0.percentage' + validation: + function: 'validation.FloatBetween(0, 100)' diff --git a/mmv1/products/compute/go_VpnGateway.yaml b/mmv1/products/compute/go_VpnGateway.yaml new file mode 100644 index 000000000000..87e0c5d6dda6 --- /dev/null +++ b/mmv1/products/compute/go_VpnGateway.yaml @@ -0,0 +1,107 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'VpnGateway' +kind: 'compute#targetVpnGateway' +description: | + Represents a VPN gateway running in GCP. This virtual device is managed + by Google, but used only by you. +references: + guides: + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways' +docs: + warning: 'Classic VPN is deprecating certain functionality on October 31, 2021. For more information, +see the [Classic VPN partial deprecation page](https://cloud.google.com/network-connectivity/docs/vpn/deprecations/classic-vpn-deprecation). +' +base_url: 'projects/{{project}}/regions/{{region}}/targetVpnGateways' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: +examples: + - name: 'target_vpn_gateway_basic' + primary_resource_id: 'target_gateway' + vars: + target_vpn_gateway_name: 'vpn-1' + network_name: 'network-1' + address_name: 'vpn-static-ip' + esp_forwarding_rule_name: 'fr-esp' + udp500_forwarding_rule_name: 'fr-udp500' + udp4500_forwarding_rule_name: 'fr-udp4500' + vpn_tunnel_name: 'tunnel1' + route_name: 'route1' +parameters: + - name: 'region' + type: ResourceRef + description: | + The region this gateway should sit in. + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'description' + type: String + description: 'An optional description of this resource.' + immutable: true + - name: 'name' + type: String + description: | + Name of the resource. Provided by the client when the resource is + created. The name must be 1-63 characters long, and comply with + RFC1035. Specifically, the name must be 1-63 characters long and + match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means + the first character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, except the last + character, which cannot be a dash. + required: true + immutable: true + - name: 'gateway_id' + type: Integer + description: 'The unique identifier for the resource.' + api_name: id + output: true + - name: 'network' + type: ResourceRef + description: | + The network this VPN gateway is accepting traffic for. + required: true + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Network' + imports: 'selfLink' diff --git a/mmv1/products/compute/go_VpnTunnel.yaml b/mmv1/products/compute/go_VpnTunnel.yaml new file mode 100644 index 000000000000..52b206de98c9 --- /dev/null +++ b/mmv1/products/compute/go_VpnTunnel.yaml @@ -0,0 +1,229 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'VpnTunnel' +kind: 'compute#vpnTunnel' +description: 'VPN tunnel resource.' +references: + guides: + 'Cloud VPN Overview': 'https://cloud.google.com/vpn/docs/concepts/overview' + 'Networks and Tunnel Routing': 'https://cloud.google.com/vpn/docs/concepts/choosing-networks-routing' + api: 'https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels' +docs: +base_url: 'projects/{{project}}/regions/{{region}}/vpnTunnels' +has_self_link: true +immutable: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' + kind: 'compute#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'items' +custom_code: + constants: 'templates/terraform/constants/go/vpn_tunnel.tmpl' + encoder: 'templates/terraform/encoders/go/vpn_tunnel.go.tmpl' + post_create: 'templates/terraform/post_create/go/labels.tmpl' +examples: + - name: 'vpn_tunnel_basic' + primary_resource_id: 'tunnel1' + vars: + vpn_tunnel_name: 'tunnel-1' + target_vpn_gateway_name: 'vpn-1' + network_name: 'network-1' + address_name: 'vpn-static-ip' + esp_forwarding_rule_name: 'fr-esp' + udp500_forwarding_rule_name: 'fr-udp500' + udp4500_forwarding_rule_name: 'fr-udp4500' + route_name: 'route1' +parameters: + - name: 'region' + type: ResourceRef + description: + 'The region where the tunnel is located. If unset, is set to the region of + `target_vpn_gateway`.' + required: false + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'name' +properties: + - name: 'tunnel_id' + type: String + description: + 'The unique identifier for the resource. This identifier is defined by the + server.' + api_name: id + output: true + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'name' + type: String + description: | + Name of the resource. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character + must be a lowercase letter, and all following characters must + be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + required: true + - name: 'description' + type: String + description: | + An optional description of this resource. + immutable: true + - name: 'targetVpnGateway' + type: ResourceRef + description: | + URL of the Target VPN gateway with which this VPN tunnel is + associated. + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'VpnGateway' + imports: 'selfLink' + - name: 'vpnGateway' + type: ResourceRef + description: | + URL of the VPN gateway with which this VPN tunnel is associated. + This must be used if a High Availability VPN gateway resource is created. + This field must reference a `google_compute_ha_vpn_gateway` resource. + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'HaVpnGateway' + imports: 'selfLink' + - name: 'vpnGatewayInterface' + type: Integer + description: | + The interface ID of the VPN gateway with which this VPN tunnel is associated. + immutable: true + send_empty_value: true + - name: 'peerExternalGateway' + type: ResourceRef + description: | + URL of the peer side external VPN gateway to which this VPN tunnel is connected. + immutable: true + conflicts: + - peer_gcp_gateway + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'ExternalVpnGateway' + imports: 'selfLink' + - name: 'peerExternalGatewayInterface' + type: Integer + description: | + The interface ID of the external VPN gateway to which this VPN tunnel is connected. + send_empty_value: true + - name: 'peerGcpGateway' + type: ResourceRef + description: | + URL of the peer side HA GCP VPN gateway to which this VPN tunnel is connected. + If provided, the VPN tunnel will automatically use the same vpn_gateway_interface + ID in the peer GCP VPN gateway. + This field must reference a `google_compute_ha_vpn_gateway` resource. + conflicts: + - peer_external_gateway + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'HaVpnGateway' + imports: 'selfLink' + - name: 'router' + type: ResourceRef + description: | + URL of router resource to be used for dynamic routing. + immutable: true + custom_expand: 'templates/terraform/custom_expand/go/compute_full_url.tmpl' + resource: 'Router' + imports: 'selfLink' + - name: 'peerIp' + type: String + description: | + IP address of the peer VPN gateway. Only IPv4 is supported. + default_from_api: true + validation: + function: 'validatePeerAddr' + - name: 'sharedSecret' + type: String + description: | + Shared secret used to set the secure session between the Cloud VPN + gateway and the peer VPN gateway. + required: true + ignore_read: true + sensitive: true + - name: 'sharedSecretHash' + type: String + description: | + Hash of the shared secret. + output: true + - name: 'ikeVersion' + type: Integer + description: | + IKE protocol version to use when establishing the VPN tunnel with + peer VPN gateway. + Acceptable IKE versions are 1 or 2. Default version is 2. + default_value: 2 + - name: 'localTrafficSelector' + type: Array + description: | + Local traffic selector to use when establishing the VPN tunnel with + peer VPN gateway. The value should be a CIDR formatted string, + for example `192.168.0.0/16`. The ranges should be disjoint. + Only IPv4 is supported. + is_set: true + default_from_api: true + item_type: + type: String + - name: 'remoteTrafficSelector' + type: Array + description: | + Remote traffic selector to use when establishing the VPN tunnel with + peer VPN gateway. The value should be a CIDR formatted string, + for example `192.168.0.0/16`. The ranges should be disjoint. + Only IPv4 is supported. + is_set: true + default_from_api: true + item_type: + type: String + - name: 'labels' + type: KeyValueLabels + description: Labels to apply to this VpnTunnel. + immutable: false + update_url: 'projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}/setLabels' + update_verb: 'POST' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/regions/{{region}}/vpnTunnels/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' + - name: 'detailedStatus' + type: String + description: 'Detailed status message for the VPN tunnel.' + output: true diff --git a/mmv1/products/compute/go_Zone.yaml b/mmv1/products/compute/go_Zone.yaml new file mode 100644 index 000000000000..a88b4719203a --- /dev/null +++ b/mmv1/products/compute/go_Zone.yaml @@ -0,0 +1,117 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Zone' +kind: 'compute#zone' +description: 'Represents a Zone resource.' +# Used as a resource reference +exclude: true +readonly: true +docs: +base_url: 'projects/{{project}}/zones' +has_self_link: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +collection_url_key: 'items' +custom_code: +parameters: +properties: + - name: 'creationTimestamp' + type: Time + description: 'Creation timestamp in RFC3339 text format.' + output: true + - name: 'deprecated' + type: NestedObject + description: 'The deprecation status associated with this machine type.' + output: true + properties: + - name: 'deleted' + type: Time + description: | + An optional RFC3339 timestamp on or after which the state of this + resource is intended to change to DELETED. This is only + informational and the status will not change unless the client + explicitly changes it. + output: true + - name: 'deprecated' + type: Time + description: | + An optional RFC3339 timestamp on or after which the state of this + resource is intended to change to DEPRECATED. This is only + informational and the status will not change unless the client + explicitly changes it. + output: true + - name: 'obsolete' + type: Time + description: | + An optional RFC3339 timestamp on or after which the state of this + resource is intended to change to OBSOLETE. This is only + informational and the status will not change unless the client + explicitly changes it. + output: true + - name: 'replacement' + type: String + description: | + The URL of the suggested replacement for a deprecated resource. + The suggested replacement resource must be the same kind of + resource as the deprecated resource. + output: true + - name: 'state' + type: Enum + description: | + The deprecation state of this resource. This can be DEPRECATED, + OBSOLETE, or DELETED. Operations which create a new resource + using a DEPRECATED resource will return successfully, but with a + warning indicating the deprecated resource and recommending its + replacement. Operations which use OBSOLETE or DELETED resources + will be rejected and result in an error. + output: true + enum_values: + - 'DEPRECATED' + - 'OBSOLETE' + - 'DELETED' + - name: 'description' + type: String + description: 'An optional textual description of the resource.' + output: true + - name: 'id' + type: Integer + description: 'The unique identifier for the resource.' + output: true + - name: 'name' + type: String + description: 'Name of the resource.' + - name: 'region' + type: ResourceRef + description: 'The region where the zone is located.' + output: true + custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' + resource: 'Region' + imports: 'selfLink' + - name: 'status' + type: Enum + description: 'The status of the zone.' + output: true + enum_values: + - 'UP' + - 'DOWN' + - name: 'availableCpuPlatforms' + type: Array + description: 'The available CPU platforms in this zone' + output: true + item_type: + type: String diff --git a/mmv1/products/compute/go_product.yaml b/mmv1/products/compute/go_product.yaml new file mode 100644 index 000000000000..b239add11991 --- /dev/null +++ b/mmv1/products/compute/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Compute' +display_name: 'Compute Engine' +versions: + - name: 'ga' + base_url: 'https://compute.googleapis.com/compute/v1/' + - name: 'beta' + base_url: 'https://compute.googleapis.com/compute/beta/' +scopes: + - 'https://www.googleapis.com/auth/compute' diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index e423c017745e..b74f8d4558cf 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -305,7 +305,7 @@ func convertTemplate(folder string) int { } data = r.ReplaceAll(data, []byte(`{{- end }}`)) - copyRight := `{{- /* + copyRight := `{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -316,7 +316,7 @@ func convertTemplate(folder string) int { WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}}` +*/ -}}` // Replace copyright r, err = regexp.Compile(`(?s)<%[-\s#]*[tT]he license inside this.*?limitations under the License..*?%>`) if err != nil { @@ -356,6 +356,7 @@ func checkExceptionList(filePath string) bool { "custom_flatten/bigquery_table_ref_extract_sourcetable.go", "custom_flatten/bigquery_table_ref_query_destinationtable.go", "unordered_list_customize_diff", + "default_if_empty", } for _, t := range exceptionPaths { diff --git a/mmv1/templates/terraform/constants/go/access_approval.go.tmpl b/mmv1/templates/terraform/constants/go/access_approval.go.tmpl index 9700051087d2..1c4a7a19475b 100644 --- a/mmv1/templates/terraform/constants/go/access_approval.go.tmpl +++ b/mmv1/templates/terraform/constants/go/access_approval.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} var accessApprovalCloudProductMapping = map[string]string{ "appengine.googleapis.com": "App Engine", "bigquery.googleapis.com": "BigQuery", diff --git a/mmv1/templates/terraform/constants/go/artifact_registry_repository.go.tmpl b/mmv1/templates/terraform/constants/go/artifact_registry_repository.go.tmpl index ee46402ef74f..46d3754d6602 100644 --- a/mmv1/templates/terraform/constants/go/artifact_registry_repository.go.tmpl +++ b/mmv1/templates/terraform/constants/go/artifact_registry_repository.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func upstreamPoliciesDiffSuppress(k, old, new string, d *schema.ResourceData) bool { o, n := d.GetChange("virtual_repository_config.0.upstream_policies") oldPolicies, ok := o.([]any) diff --git a/mmv1/templates/terraform/constants/go/backend_service.go.tmpl b/mmv1/templates/terraform/constants/go/backend_service.go.tmpl index 557bf476fc4d..31123d1a9e54 100644 --- a/mmv1/templates/terraform/constants/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/constants/go/backend_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // suppress changes on sample_rate if log_config is set to disabled. func suppressWhenDisabled(k, old, new string, d *schema.ResourceData) bool { _, n := d.GetChange("log_config.0.enable") diff --git a/mmv1/templates/terraform/constants/go/billing_budget.tmpl b/mmv1/templates/terraform/constants/go/billing_budget.tmpl index 2e1fc3ce20f2..bf8fb67c6bc7 100644 --- a/mmv1/templates/terraform/constants/go/billing_budget.tmpl +++ b/mmv1/templates/terraform/constants/go/billing_budget.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Check to see if a specified value in the config exists and suppress diffs if so. Otherwise run EmptyOrDefaultStringSuppress. diff --git a/mmv1/templates/terraform/constants/go/binaryauthorization_policy.tmpl b/mmv1/templates/terraform/constants/go/binaryauthorization_policy.tmpl index 49893588c535..2c4e5d6c27bb 100644 --- a/mmv1/templates/terraform/constants/go/binaryauthorization_policy.tmpl +++ b/mmv1/templates/terraform/constants/go/binaryauthorization_policy.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func DefaultBinaryAuthorizationPolicy(project string) map[string]interface{} { return map[string]interface{}{ "name": fmt.Sprintf("projects/%s/policy", project), diff --git a/mmv1/templates/terraform/constants/go/cert_manager.tmpl b/mmv1/templates/terraform/constants/go/cert_manager.tmpl index efc21d4c797b..5cbc173001a4 100644 --- a/mmv1/templates/terraform/constants/go/cert_manager.tmpl +++ b/mmv1/templates/terraform/constants/go/cert_manager.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func certManagerDefaultScopeDiffSuppress(_, old, new string, diff *schema.ResourceData) bool { if old == "" && new == "DEFAULT" || old == "DEFAULT" && new == "" { diff --git a/mmv1/templates/terraform/constants/go/compute_service_attachment.go.tmpl b/mmv1/templates/terraform/constants/go/compute_service_attachment.go.tmpl index e6f32967cf44..560f03a720d2 100644 --- a/mmv1/templates/terraform/constants/go/compute_service_attachment.go.tmpl +++ b/mmv1/templates/terraform/constants/go/compute_service_attachment.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Hash based on key, which is either project_id_or_num or network_url. func computeServiceAttachmentConsumerAcceptListsHash(v interface{}) int { diff --git a/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl index 3f409f9794a5..0bc5c0e76850 100644 --- a/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl +++ b/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func resourceDatastreamStreamCustomDiffFunc(diff tpgresource.TerraformResourceDiff) error { if diff.HasChange("desired_state") { old, new := diff.GetChange("desired_state") diff --git a/mmv1/templates/terraform/constants/go/dlp_stored_info_type.go.tmpl b/mmv1/templates/terraform/constants/go/dlp_stored_info_type.go.tmpl index 17f66396a9a7..ee31a43a150f 100644 --- a/mmv1/templates/terraform/constants/go/dlp_stored_info_type.go.tmpl +++ b/mmv1/templates/terraform/constants/go/dlp_stored_info_type.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // This customizeDiff allows updating the dictionary, regex, and large_custom_dictionary fields, but // it recreates the resource if changing between these fields. e.g., updating the regex field should // be allowed, while changing from regex to dictionary should trigger the recreation of the resource. diff --git a/mmv1/templates/terraform/constants/go/firewall.tmpl b/mmv1/templates/terraform/constants/go/firewall.tmpl index bf97dbb24fe3..af53f902e987 100644 --- a/mmv1/templates/terraform/constants/go/firewall.tmpl +++ b/mmv1/templates/terraform/constants/go/firewall.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func resourceComputeFirewallRuleHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) diff --git a/mmv1/templates/terraform/constants/go/monitoring_alert_policy.go.tmpl b/mmv1/templates/terraform/constants/go/monitoring_alert_policy.go.tmpl index f173a2f5f2eb..e6bef3f23850 100644 --- a/mmv1/templates/terraform/constants/go/monitoring_alert_policy.go.tmpl +++ b/mmv1/templates/terraform/constants/go/monitoring_alert_policy.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // API does not return a value for REDUCE_NONE func crossSeriesReducerDiffSuppress(k, old, new string, d *schema.ResourceData) bool { diff --git a/mmv1/templates/terraform/constants/go/network_endpoints.go.tmpl b/mmv1/templates/terraform/constants/go/network_endpoints.go.tmpl index 2283fd68d4d1..2f0fc216b2cf 100644 --- a/mmv1/templates/terraform/constants/go/network_endpoints.go.tmpl +++ b/mmv1/templates/terraform/constants/go/network_endpoints.go.tmpl @@ -1,5 +1,5 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -10,7 +10,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} type NetworkEndpointsNetworkEndpoint struct { IPAddress string diff --git a/mmv1/templates/terraform/constants/go/network_services_gateway.go.tmpl b/mmv1/templates/terraform/constants/go/network_services_gateway.go.tmpl index 0810dbe9c9c0..c5e1a0a8b4fa 100644 --- a/mmv1/templates/terraform/constants/go/network_services_gateway.go.tmpl +++ b/mmv1/templates/terraform/constants/go/network_services_gateway.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Checks if there is another gateway under the same location. func gatewaysSameLocation(d *schema.ResourceData, config *transport_tpg.Config, billingProject, userAgent string) ([]interface{}, error) { log.Print("[DEBUG] Looking for gateways under the same location.") diff --git a/mmv1/templates/terraform/constants/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/constants/go/region_backend_service.go.tmpl index 38b1050a7c7c..85b43ba5f628 100644 --- a/mmv1/templates/terraform/constants/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/constants/go/region_backend_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Fields in "backends" that are not allowed for non-managed backend services // (loadBalancingScheme) - the API returns an error if they are set at all // in the request. diff --git a/mmv1/templates/terraform/constants/go/region_ssl_policy.tmpl b/mmv1/templates/terraform/constants/go/region_ssl_policy.tmpl index 61dc45a58b7f..d333f71872ef 100644 --- a/mmv1/templates/terraform/constants/go/region_ssl_policy.tmpl +++ b/mmv1/templates/terraform/constants/go/region_ssl_policy.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func regionSslPolicyCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { profile := diff.Get("profile") customFeaturesCount := diff.Get("custom_features.#") diff --git a/mmv1/templates/terraform/constants/go/router.go.tmpl b/mmv1/templates/terraform/constants/go/router.go.tmpl index db0cd4255a91..f4f93dfb5b2f 100644 --- a/mmv1/templates/terraform/constants/go/router.go.tmpl +++ b/mmv1/templates/terraform/constants/go/router.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // customizeDiff func for additional checks on google_compute_router properties: func resourceComputeRouterCustomDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { diff --git a/mmv1/templates/terraform/constants/go/router_nat.go.tmpl b/mmv1/templates/terraform/constants/go/router_nat.go.tmpl index 6be9107ea142..c40c3073acb8 100644 --- a/mmv1/templates/terraform/constants/go/router_nat.go.tmpl +++ b/mmv1/templates/terraform/constants/go/router_nat.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func resourceNameSetFromSelfLinkSet(v interface{}) *schema.Set { if v == nil { return schema.NewSet(schema.HashString, nil) diff --git a/mmv1/templates/terraform/constants/go/service_networking_vpc_service_controls.go.tmpl b/mmv1/templates/terraform/constants/go/service_networking_vpc_service_controls.go.tmpl new file mode 100644 index 000000000000..78776e58a471 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/service_networking_vpc_service_controls.go.tmpl @@ -0,0 +1,76 @@ +func resourceServiceNetworkingVPCServiceControlsSet(d *schema.ResourceData, meta interface{}, config *transport_tpg.Config) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + projectNumber, err := getProjectNumber(d, config, project, userAgent) + if err != nil { + return err + } + + network := d.Get("network").(string) + enabled := d.Get("enabled").(bool) + + obj := make(map[string]interface{}) + obj["consumerNetwork"] = fmt.Sprintf("projects/%s/global/networks/%s", projectNumber, network) + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ServiceNetworkingBasePath{{"}}"}}services/{{"{{"}}service{{"}}"}}") + if err != nil { + return err + } + + if enabled { + url = url + ":enableVpcServiceControls" + } else { + url = url + ":disableVpcServiceControls" + } + + log.Printf("[DEBUG] Setting service networking VPC service controls: %#v", obj) + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error creating VPCServiceControls: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "services/{{"{{"}}service{{"}}"}}/projects/{{"{{"}}project{{"}}"}}/networks/{{"{{"}}network{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ServiceNetworkingOperationWaitTime( + config, res, "Setting service networking VPC service controls", userAgent, project, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to set service networking VPC service controls: %s", err) + } + + log.Printf("[DEBUG] Finished setting service networking VPC service controls %q: %#v", d.Id(), res) + + return resourceServiceNetworkingVPCServiceControlsRead(d, meta) +} diff --git a/mmv1/templates/terraform/constants/go/source_repo_repository.go.tmpl b/mmv1/templates/terraform/constants/go/source_repo_repository.go.tmpl index 34c8f7d1ff43..f4dfd14cf77a 100644 --- a/mmv1/templates/terraform/constants/go/source_repo_repository.go.tmpl +++ b/mmv1/templates/terraform/constants/go/source_repo_repository.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func resourceSourceRepoRepositoryPubSubConfigsHash(v interface{}) int { if v == nil { return 0 diff --git a/mmv1/templates/terraform/constants/go/spanner_database.go.tmpl b/mmv1/templates/terraform/constants/go/spanner_database.go.tmpl index 3d82ddec0208..9370e272df37 100644 --- a/mmv1/templates/terraform/constants/go/spanner_database.go.tmpl +++ b/mmv1/templates/terraform/constants/go/spanner_database.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // customizeDiff func for additional checks on google_spanner_database properties: func resourceSpannerDBDdlCustomDiffFunc(diff tpgresource.TerraformResourceDiff) error { old, new := diff.GetChange("ddl") diff --git a/mmv1/templates/terraform/constants/go/spanner_instance.go.tmpl b/mmv1/templates/terraform/constants/go/spanner_instance.go.tmpl index 8ae1f0a27c34..522a02da844a 100644 --- a/mmv1/templates/terraform/constants/go/spanner_instance.go.tmpl +++ b/mmv1/templates/terraform/constants/go/spanner_instance.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func deleteSpannerBackups(d *schema.ResourceData, config *transport_tpg.Config, res map[string]interface{}, userAgent string, billingProject string) error { var v interface{} var ok bool diff --git a/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl b/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl index 42bbd47ae12f..988ffdbae97c 100644 --- a/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl +++ b/mmv1/templates/terraform/constants/go/spanner_instance_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func replicasHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) diff --git a/mmv1/templates/terraform/constants/go/ssl_policy.tmpl b/mmv1/templates/terraform/constants/go/ssl_policy.tmpl index 88a8ee7cf17c..40544d2cf32d 100644 --- a/mmv1/templates/terraform/constants/go/ssl_policy.tmpl +++ b/mmv1/templates/terraform/constants/go/ssl_policy.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func sslPolicyCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { profile := diff.Get("profile") customFeaturesCount := diff.Get("custom_features.#") diff --git a/mmv1/templates/terraform/constants/go/subscription.go.tmpl b/mmv1/templates/terraform/constants/go/subscription.go.tmpl index aebdeafb008e..1fb3cf747795 100644 --- a/mmv1/templates/terraform/constants/go/subscription.go.tmpl +++ b/mmv1/templates/terraform/constants/go/subscription.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func comparePubsubSubscriptionExpirationPolicy(_, old, new string, _ *schema.ResourceData) bool { trimmedNew := strings.TrimLeft(new, "0") diff --git a/mmv1/templates/terraform/constants/go/tagtemplate_fields.go.tmpl b/mmv1/templates/terraform/constants/go/tagtemplate_fields.go.tmpl index 6bfdfb5da33e..3fb2624ef7eb 100644 --- a/mmv1/templates/terraform/constants/go/tagtemplate_fields.go.tmpl +++ b/mmv1/templates/terraform/constants/go/tagtemplate_fields.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} //Use it to delete TagTemplate Field func deleteTagTemplateField(d *schema.ResourceData, config *transport_tpg.Config, name, billingProject, userAgent string) (error) { diff --git a/mmv1/templates/terraform/custom_check_destroy/go/monitoring_monitored_project.go.tmpl b/mmv1/templates/terraform/custom_check_destroy/go/monitoring_monitored_project.go.tmpl index 494638cf3e49..0c2f700ab11e 100644 --- a/mmv1/templates/terraform/custom_check_destroy/go/monitoring_monitored_project.go.tmpl +++ b/mmv1/templates/terraform/custom_check_destroy/go/monitoring_monitored_project.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := acctest.GoogleProviderConfig(t) url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}MonitoringBasePath{{"}}"}}v1/locations/global/metricsScopes/{{"{{"}}metrics_scope{{"}}"}}") diff --git a/mmv1/templates/terraform/custom_create/go/service_networking_vpc_service_controls.go.tmpl b/mmv1/templates/terraform/custom_create/go/service_networking_vpc_service_controls.go.tmpl new file mode 100644 index 000000000000..6779a2b7246c --- /dev/null +++ b/mmv1/templates/terraform/custom_create/go/service_networking_vpc_service_controls.go.tmpl @@ -0,0 +1 @@ +return resourceServiceNetworkingVPCServiceControlsSet(d, meta, config) diff --git a/mmv1/templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl b/mmv1/templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl index 19e3c311ca38..3493454ce2e5 100644 --- a/mmv1/templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { {{- if $.IsSet }} v = v.(*schema.Set).List() diff --git a/mmv1/templates/terraform/custom_expand/go/base64.go.tmpl b/mmv1/templates/terraform/custom_expand/go/base64.go.tmpl index 4b075b8554e8..d8a4241f722a 100644 --- a/mmv1/templates/terraform/custom_expand/go/base64.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/base64.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/bigquery_access_role.go.tmpl b/mmv1/templates/terraform/custom_expand/go/bigquery_access_role.go.tmpl index d9b23e6d7915..d789edc7091e 100644 --- a/mmv1/templates/terraform/custom_expand/go/bigquery_access_role.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bigquery_access_role.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/bigquery_dataset_ref.go.tmpl b/mmv1/templates/terraform/custom_expand/go/bigquery_dataset_ref.go.tmpl index 7f0aad14d919..bb63a9cf7784 100644 --- a/mmv1/templates/terraform/custom_expand/go/bigquery_dataset_ref.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bigquery_dataset_ref.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl b/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl index 192d65b16f1c..2cee71c3a8ec 100644 --- a/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref_array.go.tmpl b/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref_array.go.tmpl index 065a88c89c46..19fb689a348c 100644 --- a/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref_array.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bigquery_table_ref_array.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) req := make([]interface{}, 0, len(l)) diff --git a/mmv1/templates/terraform/custom_expand/go/bigtable_app_profile_routing.tmpl b/mmv1/templates/terraform/custom_expand/go/bigtable_app_profile_routing.tmpl index 892980a08656..ec33a04d251c 100644 --- a/mmv1/templates/terraform/custom_expand/go/bigtable_app_profile_routing.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bigtable_app_profile_routing.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || !v.(bool) { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/binaryauthorization_attestors.tmpl b/mmv1/templates/terraform/custom_expand/go/binaryauthorization_attestors.tmpl index a121db9fd01b..2c871906f019 100644 --- a/mmv1/templates/terraform/custom_expand/go/binaryauthorization_attestors.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/binaryauthorization_attestors.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { r := regexp.MustCompile("projects/(.+)/attestors/(.+)") diff --git a/mmv1/templates/terraform/custom_expand/go/bool_to_object.go.tmpl b/mmv1/templates/terraform/custom_expand/go/bool_to_object.go.tmpl index 9b67a35158eb..ce0c8bfd5895 100644 --- a/mmv1/templates/terraform/custom_expand/go/bool_to_object.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bool_to_object.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || !v.(bool) { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/bool_to_upper_string.tmpl b/mmv1/templates/terraform/custom_expand/go/bool_to_upper_string.tmpl index d1dde20ffcf1..100af9c9d271 100644 --- a/mmv1/templates/terraform/custom_expand/go/bool_to_upper_string.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/bool_to_upper_string.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/certificate_manager_certificate_construct_full_url.go.tmpl b/mmv1/templates/terraform/custom_expand/go/certificate_manager_certificate_construct_full_url.go.tmpl index 30f5df957f1a..fabce69a7f41 100644 --- a/mmv1/templates/terraform/custom_expand/go/certificate_manager_certificate_construct_full_url.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/certificate_manager_certificate_construct_full_url.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/compute_full_url.tmpl b/mmv1/templates/terraform/custom_expand/go/compute_full_url.tmpl index aec724154409..1e1d3dd1fd32 100644 --- a/mmv1/templates/terraform/custom_expand/go/compute_full_url.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/compute_full_url.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil || v.(string) == "" { return "", nil diff --git a/mmv1/templates/terraform/custom_expand/go/computed_lite_subscription_topic.tmpl b/mmv1/templates/terraform/custom_expand/go/computed_lite_subscription_topic.tmpl index 1f6d07ca635e..548fc4d52118 100644 --- a/mmv1/templates/terraform/custom_expand/go/computed_lite_subscription_topic.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/computed_lite_subscription_topic.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { project, err := tpgresource.GetProject(d, config) if err != nil { diff --git a/mmv1/templates/terraform/custom_expand/go/computed_subscription_topic.tmpl b/mmv1/templates/terraform/custom_expand/go/computed_subscription_topic.tmpl index 720a17462bb3..940524b196cf 100644 --- a/mmv1/templates/terraform/custom_expand/go/computed_subscription_topic.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/computed_subscription_topic.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { project, err := tpgresource.GetProject(d, config) if err != nil { diff --git a/mmv1/templates/terraform/custom_expand/go/container_analysis_note.tmpl b/mmv1/templates/terraform/custom_expand/go/container_analysis_note.tmpl index 27448d9793a7..e613350a9020 100644 --- a/mmv1/templates/terraform/custom_expand/go/container_analysis_note.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/container_analysis_note.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { r := regexp.MustCompile("projects/(.+)/notes/(.+)") if r.MatchString(v.(string)) { diff --git a/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_authorization_user.go.tmpl b/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_authorization_user.go.tmpl index 20371f6d8a93..4d3eba6ea01b 100644 --- a/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_authorization_user.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_authorization_user.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} type attachedClusterUser struct { Username string `json:"username"` diff --git a/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_empty_logging.go.tmpl b/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_empty_logging.go.tmpl index 6dcb6241a75f..6714aa99383f 100644 --- a/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_empty_logging.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/containerattached_cluster_empty_logging.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) diff --git a/mmv1/templates/terraform/custom_expand/go/data_catalog_tag.go.tmpl b/mmv1/templates/terraform/custom_expand/go/data_catalog_tag.go.tmpl index ec2322b5901d..cd2e2bde8c67 100644 --- a/mmv1/templates/terraform/custom_expand/go/data_catalog_tag.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/data_catalog_tag.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // we flattened the original["enum_value"]["display_name"] object to be just original["enum_value"] so here, // v is the value we want from the config diff --git a/mmv1/templates/terraform/custom_expand/go/datastream_stream_dataset_id.go.tmpl b/mmv1/templates/terraform/custom_expand/go/datastream_stream_dataset_id.go.tmpl index 4a4513481de1..58b5fc887c66 100644 --- a/mmv1/templates/terraform/custom_expand/go/datastream_stream_dataset_id.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/datastream_stream_dataset_id.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { s := v.(string) re := regexp.MustCompile(`projects/(.+)/datasets/([^\.\?\#]+)`) diff --git a/mmv1/templates/terraform/custom_expand/go/days_to_duration_string.go.tmpl b/mmv1/templates/terraform/custom_expand/go/days_to_duration_string.go.tmpl index 726206deb89f..e07883db9d16 100644 --- a/mmv1/templates/terraform/custom_expand/go/days_to_duration_string.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/days_to_duration_string.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/default_to_project.go.tmpl b/mmv1/templates/terraform/custom_expand/go/default_to_project.go.tmpl index 3018f73aecd4..f87a2056ccd1 100644 --- a/mmv1/templates/terraform/custom_expand/go/default_to_project.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/default_to_project.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // If the property hasn't been explicitly set in config use the project defined by the provider or env. func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { diff --git a/mmv1/templates/terraform/custom_expand/go/disk_consistency_group_policy.tmpl b/mmv1/templates/terraform/custom_expand/go/disk_consistency_group_policy.tmpl index 97fda731b888..4aa4b5ead7e2 100644 --- a/mmv1/templates/terraform/custom_expand/go/disk_consistency_group_policy.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/disk_consistency_group_policy.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) diff --git a/mmv1/templates/terraform/custom_expand/go/dns_managed_zone_private_visibility_config.go.tmpl b/mmv1/templates/terraform/custom_expand/go/dns_managed_zone_private_visibility_config.go.tmpl index becfd5eb5d2a..18cb76ef57ca 100644 --- a/mmv1/templates/terraform/custom_expand/go/dns_managed_zone_private_visibility_config.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/dns_managed_zone_private_visibility_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) if len(l) == 0 || l[0] == nil { diff --git a/mmv1/templates/terraform/custom_expand/go/firewall_log_config.go.tmpl b/mmv1/templates/terraform/custom_expand/go/firewall_log_config.go.tmpl index 6d24ba65a888..32f3feb64713 100644 --- a/mmv1/templates/terraform/custom_expand/go/firewall_log_config.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/firewall_log_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/custom_expand/go/gke_hub_membership.tmpl b/mmv1/templates/terraform/custom_expand/go/gke_hub_membership.tmpl index 6c9d11a70d18..21f522d77dbc 100644 --- a/mmv1/templates/terraform/custom_expand/go/gke_hub_membership.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/gke_hub_membership.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if strings.HasPrefix(v.(string), "//") { return v, nil diff --git a/mmv1/templates/terraform/custom_expand/go/json_schema.tmpl b/mmv1/templates/terraform/custom_expand/go/json_schema.tmpl index dac9da3f7844..47d4ad2420f5 100644 --- a/mmv1/templates/terraform/custom_expand/go/json_schema.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/json_schema.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { b := []byte(v.(string)) if len(b) == 0 { diff --git a/mmv1/templates/terraform/custom_expand/go/json_value.tmpl b/mmv1/templates/terraform/custom_expand/go/json_value.tmpl index 368e4eba22e3..5402fe154bd7 100644 --- a/mmv1/templates/terraform/custom_expand/go/json_value.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/json_value.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { b := []byte(v.(string)) if len(b) == 0 { diff --git a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl index 9b9f5f103c10..c5754ad27380 100644 --- a/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { var certName string if v, ok := d.GetOk("name"); ok { diff --git a/mmv1/templates/terraform/custom_expand/go/network_management_connectivity_test_name.go.tmpl b/mmv1/templates/terraform/custom_expand/go/network_management_connectivity_test_name.go.tmpl index ce8dcd49c1e5..895be69be76c 100644 --- a/mmv1/templates/terraform/custom_expand/go/network_management_connectivity_test_name.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/network_management_connectivity_test_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // projects/X/tests/Y - note not "connectivityTests" f, err := tpgresource.ParseGlobalFieldValue("tests", v.(string), "project", d, config, true) diff --git a/mmv1/templates/terraform/custom_expand/go/preserved_state_disks.go.tmpl b/mmv1/templates/terraform/custom_expand/go/preserved_state_disks.go.tmpl index e311ae9792bb..99354e968cd4 100644 --- a/mmv1/templates/terraform/custom_expand/go/preserved_state_disks.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/preserved_state_disks.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return map[string]interface{}{}, nil diff --git a/mmv1/templates/terraform/custom_expand/go/pubsublite_topic_reservation_config_throughput_reservation.go.tmpl b/mmv1/templates/terraform/custom_expand/go/pubsublite_topic_reservation_config_throughput_reservation.go.tmpl index 61131a40a2b1..94203257c486 100644 --- a/mmv1/templates/terraform/custom_expand/go/pubsublite_topic_reservation_config_throughput_reservation.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/pubsublite_topic_reservation_config_throughput_reservation.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { f, err := tpgresource.ParseRegionalFieldValue("reservations", v.(string), "project", "region", "zone", d, config, true) if err != nil { diff --git a/mmv1/templates/terraform/custom_expand/go/redis_instance_authorized_network.tmpl b/mmv1/templates/terraform/custom_expand/go/redis_instance_authorized_network.tmpl index ffb52b04300f..34c8043fb995 100644 --- a/mmv1/templates/terraform/custom_expand/go/redis_instance_authorized_network.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/redis_instance_authorized_network.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { fv, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) if err != nil { diff --git a/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl b/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl index 55dfb8bf6d9b..bcdd41d6f9df 100644 --- a/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* This provides the best long-form self link possible given the input. If the input is a full URL including scheme, we return it unmodified https://compute.googleapis.com/v1/projects/foo/regions/bar/backendBuckets/baz -> (the same) diff --git a/mmv1/templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl b/mmv1/templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl index 3a2799e26c7a..973e737d8467 100644 --- a/mmv1/templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { f, err := {{ template "expandResourceRef" dict "VarName" "v.(string)" "ResourceRef" $.ResourceRef "ResourceType" $.ResourceType}} if err != nil { diff --git a/mmv1/templates/terraform/custom_expand/go/secret_version_enable.go.tmpl b/mmv1/templates/terraform/custom_expand/go/secret_version_enable.go.tmpl index c21cf7eedf8f..f1bd48c68a16 100644 --- a/mmv1/templates/terraform/custom_expand/go/secret_version_enable.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/secret_version_enable.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { name := d.Get("name").(string) if name == "" { diff --git a/mmv1/templates/terraform/custom_expand/go/secret_version_secret_data.go.tmpl b/mmv1/templates/terraform/custom_expand/go/secret_version_secret_data.go.tmpl index bcf6cf8e1866..d9bb2b8f6045 100644 --- a/mmv1/templates/terraform/custom_expand/go/secret_version_secret_data.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/secret_version_secret_data.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return nil, nil diff --git a/mmv1/templates/terraform/custom_expand/go/self_link_from_name.tmpl b/mmv1/templates/terraform/custom_expand/go/self_link_from_name.tmpl index 1e962710a25a..5b26c6437d99 100644 --- a/mmv1/templates/terraform/custom_expand/go/self_link_from_name.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/self_link_from_name.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from a partial self link. if v == nil || v.(string) == "" { diff --git a/mmv1/templates/terraform/custom_expand/go/shortname_to_url.go.tmpl b/mmv1/templates/terraform/custom_expand/go/shortname_to_url.go.tmpl index f89ca0ed61c2..fd1532722ff6 100644 --- a/mmv1/templates/terraform/custom_expand/go/shortname_to_url.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/shortname_to_url.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { return tpgresource.ReplaceVars(d, config, "{{$.GetIdFormat}}") } diff --git a/mmv1/templates/terraform/custom_expand/go/spanner_instance_config.go.tmpl b/mmv1/templates/terraform/custom_expand/go/spanner_instance_config.go.tmpl index 704965ec0e12..693581a253f4 100644 --- a/mmv1/templates/terraform/custom_expand/go/spanner_instance_config.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/spanner_instance_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { r := regexp.MustCompile("projects/(.+)/instanceConfigs/(.+)") if r.MatchString(v.(string)) { diff --git a/mmv1/templates/terraform/custom_expand/go/subnetwork_log_config.go.tmpl b/mmv1/templates/terraform/custom_expand/go/subnetwork_log_config.go.tmpl index fb887baa873c..f545370b60c1 100644 --- a/mmv1/templates/terraform/custom_expand/go/subnetwork_log_config.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/subnetwork_log_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { l := v.([]interface{}) transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_automated_backup_policy_start_times_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_automated_backup_policy_start_times_flatten.go.tmpl index ec5aa9d26ade..5f6683e17f4c 100644 --- a/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_automated_backup_policy_start_times_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_automated_backup_policy_start_times_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_user_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_user_flatten.go.tmpl index 1b0d068adac0..ff16cc4b1143 100644 --- a/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_user_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/alloydb_cluster_input_user_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return []interface{}{ map[string]interface{}{ diff --git a/mmv1/templates/terraform/custom_flatten/go/apigee_organization_property.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/apigee_organization_property.go.tmpl index 2797ef9fbce5..50d88829e20f 100644 --- a/mmv1/templates/terraform/custom_flatten/go/apigee_organization_property.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/apigee_organization_property.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/appengine_standardappversion_automatic_scaling_handlenil.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/appengine_standardappversion_automatic_scaling_handlenil.go.tmpl index b66ecbed78c8..1037f7555d69 100644 --- a/mmv1/templates/terraform/custom_flatten/go/appengine_standardappversion_automatic_scaling_handlenil.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/appengine_standardappversion_automatic_scaling_handlenil.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flattenAppEngineStandardAppVersionAutomaticScaling(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/bigquery_connection_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/bigquery_connection_flatten.go.tmpl index af964da3a282..a9920df36c10 100644 --- a/mmv1/templates/terraform/custom_flatten/go/bigquery_connection_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/bigquery_connection_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return []interface{}{ map[string]interface{}{ diff --git a/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_location.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_location.go.tmpl index feed168bc2bd..b38e659db266 100644 --- a/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_location.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_location.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Older Datasets in BigQuery have no Location set in the API response. This may be an issue when importing // datasets created before BigQuery was available in multiple zones. We can safely assume that these datasets // are in the US, as this was the default at the time. diff --git a/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_ref.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_ref.go.tmpl index bffca68048ec..2a8191828c01 100644 --- a/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_ref.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/bigquery_dataset_ref.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl index 57c14e3ccf31..5ac2e603d8bf 100644 --- a/mmv1/templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // KmsKeyName switched from using a key name to a key version, this will separate the key name from the key version and save them // separately in state. https://github.com/hashicorp/terraform-provider-google/issues/9208 func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { diff --git a/mmv1/templates/terraform/custom_flatten/go/bigquery_table_ref_copy_sourcetables.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/bigquery_table_ref_copy_sourcetables.go.tmpl index 3d8a658ef6ce..1e0934965e9d 100644 --- a/mmv1/templates/terraform/custom_flatten/go/bigquery_table_ref_copy_sourcetables.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/bigquery_table_ref_copy_sourcetables.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/bigtable_app_profile_routing.tmpl b/mmv1/templates/terraform/custom_flatten/go/bigtable_app_profile_routing.tmpl index dc356bc13185..510165a22154 100644 --- a/mmv1/templates/terraform/custom_flatten/go/bigtable_app_profile_routing.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/bigtable_app_profile_routing.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return false diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudbuild_approval_required.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudbuild_approval_required.go.tmpl index 5aa8c0e7e5b5..a50dfe88b47f 100644 --- a/mmv1/templates/terraform/custom_flatten/go/cloudbuild_approval_required.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/cloudbuild_approval_required.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) if v == nil { diff --git a/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_admin.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_admin.go.tmpl index fb318d1d0e4e..ef5c426a2141 100644 --- a/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_admin.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_admin.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flattenClouddomainsRegistrationContactSettingsAdminContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_registrant.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_registrant.go.tmpl index 87449410eb81..cfa9cd3d9f55 100644 --- a/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_registrant.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_registrant.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flattenClouddomainsRegistrationContactSettingsRegistrantContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_technical.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_technical.go.tmpl index 7f712275bb16..6b1bff930e59 100644 --- a/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_technical.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/clouddomains_ignore_numbers_technical.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flattenClouddomainsRegistrationContactSettingsTechnicalContact(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_bucket.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_bucket.go.tmpl index 7df3d00588df..81a6e12e495a 100644 --- a/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_bucket.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_bucket.go.tmpl @@ -1,5 +1,5 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -10,7 +10,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // This flatten function is shared between the resource and the datasource. // TF Input format: {bucket-name} diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_object.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_object.go.tmpl index 463f795920fb..e0fe7cbe43ac 100644 --- a/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_object.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/cloudfunctions2_function_source_object.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // This flatten function is shared between the resource and the datasource. // TF Input format: {object-name} diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudrun_ignore_force_override.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudrun_ignore_force_override.go.tmpl index ecadfb260ca6..3ec5347a2c72 100644 --- a/mmv1/templates/terraform/custom_flatten/go/cloudrun_ignore_force_override.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/cloudrun_ignore_force_override.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // We want to ignore read on this field, but cannot because it is nested return d.Get("spec.0.force_override") diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudscheduler_job_appenginerouting.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudscheduler_job_appenginerouting.go.tmpl index 6da457d6c089..23ee53ece4eb 100644 --- a/mmv1/templates/terraform/custom_flatten/go/cloudscheduler_job_appenginerouting.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/cloudscheduler_job_appenginerouting.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // An `appEngineRouting` in API response is useless, so we set config values rather than api response to state. func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { diff --git a/mmv1/templates/terraform/custom_flatten/go/cloudtasks_queue_appenginerouting.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/cloudtasks_queue_appenginerouting.go.tmpl index e44891ae1022..8ff917aed00d 100644 --- a/mmv1/templates/terraform/custom_flatten/go/cloudtasks_queue_appenginerouting.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/cloudtasks_queue_appenginerouting.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // service, version, and instance are input-only. host is output-only. func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { diff --git a/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl index e4ce7115a853..ad0b6e400916 100644 --- a/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/compute_router_range.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/compute_snapshot_snapshot_encryption_raw_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/compute_snapshot_snapshot_encryption_raw_key.go.tmpl index 3b945377811e..cfd37d6dec6c 100644 --- a/mmv1/templates/terraform/custom_flatten/go/compute_snapshot_snapshot_encryption_raw_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/compute_snapshot_snapshot_encryption_raw_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("snapshot_encryption_key.0.raw_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/consumer_quote_override_override_value.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/consumer_quote_override_override_value.go.tmpl index dc915763522b..e019df6cddab 100644 --- a/mmv1/templates/terraform/custom_flatten/go/consumer_quote_override_override_value.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/consumer_quote_override_override_value.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return "0" diff --git a/mmv1/templates/terraform/custom_flatten/go/containerattached_cluster_authorization_user.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/containerattached_cluster_authorization_user.go.tmpl index 4f01f009119b..7a145d0ade70 100644 --- a/mmv1/templates/terraform/custom_flatten/go/containerattached_cluster_authorization_user.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/containerattached_cluster_authorization_user.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // The custom expander transforms input into something like this: // authorization { diff --git a/mmv1/templates/terraform/custom_flatten/go/data_catalog_tag.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/data_catalog_tag.go.tmpl index 755d4a7e152c..7a3a3c7efb03 100644 --- a/mmv1/templates/terraform/custom_flatten/go/data_catalog_tag.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/data_catalog_tag.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_alloydb_settings_initial_user_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_alloydb_settings_initial_user_password.go.tmpl index c546a22603ea..4781b335b4dc 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_alloydb_settings_initial_user_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_alloydb_settings_initial_user_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("alloydb.0.settings.0.initial_user.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_cloudsql_settings_root_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_cloudsql_settings_root_password.go.tmpl index 5c19fd80a7b1..e5ad4fb53244 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_cloudsql_settings_root_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_cloudsql_settings_root_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("cloudsql.0.settings.0.root_password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_password.go.tmpl index f8bd29bd04ff..4da22b3453b5 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_ca_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_ca_certificate.go.tmpl index 8dbb97f6e6aa..0f98aebf4469 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_ca_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_ca_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql.0.ssl.0.ca_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_certificate.go.tmpl index dd612f56486d..613153fcf0ac 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql.0.ssl.0.client_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_key.go.tmpl index 583aad6dfc14..40bfb2fed102 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_mysql_ssl_client_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql.0.ssl.0.client_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_password.go.tmpl index de580662c608..6e7a408a3b84 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle.0.forward_ssh_connectivity.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_private_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_private_key.go.tmpl index 081dad1e37e8..55b25a952269 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_private_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_forward_ssh_private_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle.0.forward_ssh_connectivity.0.private_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_password.go.tmpl index 3436d0dc7e99..05f0399cf243 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_ca_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_ca_certificate.go.tmpl index 226d2eeaa58f..2dc49161499f 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_ca_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_ca_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle.0.ssl.0.ca_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_certificate.go.tmpl index d455e7407300..e990ab44bada 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle.0.ssl.0.client_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_key.go.tmpl index 861ac8747d20..35f46ad59a02 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_oracle_ssl_client_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle.0.ssl.0.client_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_password.go.tmpl index 4a12a2fac987..a3a3597a8005 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_ca_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_ca_certificate.go.tmpl index 2631a67015f9..a6ee3a208f81 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_ca_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_ca_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql.0.ssl.0.ca_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_certificate.go.tmpl index 68c859b87c3b..51a56266b40c 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql.0.ssl.0.client_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_key.go.tmpl index e6173bcda743..0890ed843a19 100644 --- a/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/database_migration_service_connection_profile_postgresql_ssl_client_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql.0.ssl.0.client_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/dataplex_datascan_ignore_profile_result.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/dataplex_datascan_ignore_profile_result.go.tmpl index 8c6aecf6fcfa..377720040b0c 100644 --- a/mmv1/templates/terraform/custom_flatten/go/dataplex_datascan_ignore_profile_result.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/dataplex_datascan_ignore_profile_result.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // We want to ignore read on this field, but cannot because it is nested return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl index caa5e8619c5a..6686da377457 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("forward_ssh_connectivity.0.password") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl index 88b5cdff84c6..b9ba7b80e025 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("forward_ssh_connectivity.0.private_key") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl index 6dff3bc5d1d5..a13a80c652ad 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.password") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl index 1688a21adb33..085c8a4017dc 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.ca_certificate") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl index 3b05a1883c8c..748ee3af5070 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.client_certificate") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl index fd040ff9153e..c672cf97634d 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.client_key") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl index e7248cd2fe57..6fc344c88f3b 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle_profile.0.password") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl index fe44f01baba4..c68160c71e7a 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql_profile.0.password") diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl index e158423c3fff..2c39ddfa4edd 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("sql_server_profile.0.password") diff --git a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl index 55dfa218c445..d381d195cfe9 100644 --- a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl @@ -14,13 +14,13 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return {{$.GoLiteral $.DefaultValue}} } -{{- if $.IsA "Integer" }} +{{ if $.IsA "Integer" }} // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { return intVal } // let terraform core handle it if we can't convert the string to an int. } -{{- end }} +{{ end }} return v } diff --git a/mmv1/templates/terraform/custom_flatten/go/dialogflowcx_agent_git_integration_settings_github_settings.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/dialogflowcx_agent_git_integration_settings_github_settings.go.tmpl index a548646c706c..80840c50a804 100644 --- a/mmv1/templates/terraform/custom_flatten/go/dialogflowcx_agent_git_integration_settings_github_settings.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/dialogflowcx_agent_git_integration_settings_github_settings.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flattenDialogflowCXAgentGitIntegrationSettingsGithubSettings(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/disk_consistency_group_policy.tmpl b/mmv1/templates/terraform/custom_flatten/go/disk_consistency_group_policy.tmpl index 1cb3e544b9c1..5cf7420ee985 100644 --- a/mmv1/templates/terraform/custom_flatten/go/disk_consistency_group_policy.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/disk_consistency_group_policy.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/duration_string_to_days.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/duration_string_to_days.go.tmpl index 64962e5dedde..cf0d58b309fe 100644 --- a/mmv1/templates/terraform/custom_flatten/go/duration_string_to_days.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/duration_string_to_days.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/filestore_instance_networks_reserved_ip_range.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/filestore_instance_networks_reserved_ip_range.go.tmpl index f87f7d0120d8..5197a4ac8970 100644 --- a/mmv1/templates/terraform/custom_flatten/go/filestore_instance_networks_reserved_ip_range.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/filestore_instance_networks_reserved_ip_range.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("networks.0.reserved_ip_range") } diff --git a/mmv1/templates/terraform/custom_flatten/go/firewall_log_config.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/firewall_log_config.go.tmpl index 2588d9b9323e..028b752dc260 100644 --- a/mmv1/templates/terraform/custom_flatten/go/firewall_log_config.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/firewall_log_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl index 7d1e216b2a25..b409691f8a1a 100644 --- a/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Not all self links behave like ResourceRef expects, eg they may expect a fully qualified url. In those cases, we need to manually define this flattener. */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { diff --git a/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl index 5faa186a1cac..e6772ff622f4 100644 --- a/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* This should be used for multi-resource ref fields that can't be made to real resource refs yet */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { diff --git a/mmv1/templates/terraform/custom_flatten/go/health_check_log_config.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/health_check_log_config.go.tmpl index 5efe7f7a0d62..055964af6c86 100644 --- a/mmv1/templates/terraform/custom_flatten/go/health_check_log_config.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/health_check_log_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) if v == nil { diff --git a/mmv1/templates/terraform/custom_flatten/go/http_headers.tmpl b/mmv1/templates/terraform/custom_flatten/go/http_headers.tmpl index c6828c1f1078..8bc1d6dc4453 100644 --- a/mmv1/templates/terraform/custom_flatten/go/http_headers.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/http_headers.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (interface{}) { var headers = v.(map[string]interface{}) if v, ok := headers["User-Agent"]; ok { diff --git a/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl index 8b22f15bc6f7..6ddc09d748dd 100644 --- a/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_extra_attributes_oauth2_config_client_secret_value.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_oidc_client_secret_value.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_oidc_client_secret_value.go.tmpl index c7263b11407a..3cff63c1ac6e 100644 --- a/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_oidc_client_secret_value.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/iam_workforce_pool_provider_oidc_client_secret_value.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/id_from_name.tmpl b/mmv1/templates/terraform/custom_flatten/go/id_from_name.tmpl index 4ba3575e2985..9e5840e1f524 100644 --- a/mmv1/templates/terraform/custom_flatten/go/id_from_name.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/id_from_name.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { parts := strings.Split(d.Get("name").(string), "/") return parts[len(parts)-1] diff --git a/mmv1/templates/terraform/custom_flatten/go/image_kms_key_name.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/image_kms_key_name.go.tmpl index 4c8c75c4e6ca..c5987badbb27 100644 --- a/mmv1/templates/terraform/custom_flatten/go/image_kms_key_name.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/image_kms_key_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/json_schema.tmpl b/mmv1/templates/terraform/custom_flatten/go/json_schema.tmpl index cd8b868c3075..ab1c9cfb42ee 100644 --- a/mmv1/templates/terraform/custom_flatten/go/json_schema.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/json_schema.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/json_to_string_map.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/json_to_string_map.go.tmpl index d7de49d0bf3d..8d4b32b0b7bb 100644 --- a/mmv1/templates/terraform/custom_flatten/go/json_to_string_map.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/json_to_string_map.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/monitoring_slo_availability_sli.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/monitoring_slo_availability_sli.go.tmpl index 792406f6e2a7..41393fc0d5bd 100644 --- a/mmv1/templates/terraform/custom_flatten/go/monitoring_slo_availability_sli.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/monitoring_slo_availability_sli.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/name_from_self_link.tmpl b/mmv1/templates/terraform/custom_flatten/go/name_from_self_link.tmpl index ad0f45e30300..24b2086d705e 100644 --- a/mmv1/templates/terraform/custom_flatten/go/name_from_self_link.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/name_from_self_link.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/network_services_timeout_mirror.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/network_services_timeout_mirror.go.tmpl index 9915d6d56b31..dde4bac022fa 100644 --- a/mmv1/templates/terraform/custom_flatten/go/network_services_timeout_mirror.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/network_services_timeout_mirror.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { out := make(map[string]string) diff --git a/mmv1/templates/terraform/custom_flatten/go/object_to_bool.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/object_to_bool.go.tmpl index 0670953df1ed..387434d8af56 100644 --- a/mmv1/templates/terraform/custom_flatten/go/object_to_bool.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/object_to_bool.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return v != nil } diff --git a/mmv1/templates/terraform/custom_flatten/go/os_config_patch_deployment_recurring_schedule_time_of_day.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/os_config_patch_deployment_recurring_schedule_time_of_day.go.tmpl index a577a04a661e..8f58473b844b 100644 --- a/mmv1/templates/terraform/custom_flatten/go/os_config_patch_deployment_recurring_schedule_time_of_day.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/os_config_patch_deployment_recurring_schedule_time_of_day.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/preserved_state_disks.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/preserved_state_disks.go.tmpl index bb215f5044b7..fe9c3ddaec37 100644 --- a/mmv1/templates/terraform/custom_flatten/go/preserved_state_disks.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/preserved_state_disks.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/pubsub_no_wrapper_write_metadata_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/pubsub_no_wrapper_write_metadata_flatten.go.tmpl index 99b910422fa3..e41bf88fdde2 100644 --- a/mmv1/templates/terraform/custom_flatten/go/pubsub_no_wrapper_write_metadata_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/pubsub_no_wrapper_write_metadata_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/repository_short_name_from_name.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/repository_short_name_from_name.go.tmpl index 6a9f2d0f8991..320ff3d2744a 100644 --- a/mmv1/templates/terraform/custom_flatten/go/repository_short_name_from_name.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/repository_short_name_from_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/secret_version_access.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/secret_version_access.go.tmpl index 92c77b820f6e..96648c12a922 100644 --- a/mmv1/templates/terraform/custom_flatten/go/secret_version_access.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/secret_version_access.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/custom_flatten/go/secret_version_enable.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/secret_version_enable.go.tmpl index 58ff987c7d01..7181f6359ee1 100644 --- a/mmv1/templates/terraform/custom_flatten/go/secret_version_enable.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/secret_version_enable.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v.(string) == "ENABLED" { return true diff --git a/mmv1/templates/terraform/custom_flatten/go/secret_version_version.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/secret_version_version.go.tmpl index 04738d20b958..a9a0c57977c6 100644 --- a/mmv1/templates/terraform/custom_flatten/go/secret_version_version.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/secret_version_version.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { name := d.Get("name").(string) secretRegex := regexp.MustCompile("projects/(.+)/secrets/(.+)/versions/(.+)$") diff --git a/mmv1/templates/terraform/custom_flatten/go/securityposture_custom_constraint_name.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/securityposture_custom_constraint_name.go.tmpl index 30e4745c3d4e..f57b9e3dc050 100644 --- a/mmv1/templates/terraform/custom_flatten/go/securityposture_custom_constraint_name.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/securityposture_custom_constraint_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/set_to_project.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/set_to_project.go.tmpl index 32fa0d60d6a1..d40f11d84181 100644 --- a/mmv1/templates/terraform/custom_flatten/go/set_to_project.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/set_to_project.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("project") } diff --git a/mmv1/templates/terraform/custom_flatten/go/subnetwork_log_config.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/subnetwork_log_config.go.tmpl index c4f6add89c75..31a086e3c263 100644 --- a/mmv1/templates/terraform/custom_flatten/go/subnetwork_log_config.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/subnetwork_log_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return nil diff --git a/mmv1/templates/terraform/custom_flatten/go/tags_tag_binding_name.tmpl b/mmv1/templates/terraform/custom_flatten/go/tags_tag_binding_name.tmpl index f165ec2bf06a..5555c41dbf53 100644 --- a/mmv1/templates/terraform/custom_flatten/go/tags_tag_binding_name.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/tags_tag_binding_name.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_group_ignore_description.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_group_ignore_description.go.tmpl index c89a4fdf02be..52ee7a82a028 100644 --- a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_group_ignore_description.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_group_ignore_description.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("description") } diff --git a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl index 614d964a428d..be921aead132 100644 --- a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_endpoint_private_service_connect_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_contents_delta_uri.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_contents_delta_uri.go.tmpl index 68eaa26f6e66..571033c66459 100644 --- a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_contents_delta_uri.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_contents_delta_uri.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // We want to ignore read on this field, but cannot because it is nested return d.Get("metadata.0.contents_delta_uri") diff --git a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_is_complete_overwrite.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_is_complete_overwrite.go.tmpl index 8a479586a747..0aab2cee1386 100644 --- a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_is_complete_overwrite.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_index_ignore_is_complete_overwrite.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { // We want to ignore read on this field, but cannot because it is nested return d.Get("metadata.0.is_complete_overwrite") diff --git a/mmv1/templates/terraform/custom_flatten/go/workbench_instance_boot_disk_type_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/workbench_instance_boot_disk_type_flatten.go.tmpl index ed490ce5854d..a66fbf78d002 100644 --- a/mmv1/templates/terraform/custom_flatten/go/workbench_instance_boot_disk_type_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/workbench_instance_boot_disk_type_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("gce_setup.0.boot_disk.0.disk_type") } diff --git a/mmv1/templates/terraform/custom_flatten/go/workbench_instance_data_disk_type_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/workbench_instance_data_disk_type_flatten.go.tmpl index 8179994eeadf..b144903bffc6 100644 --- a/mmv1/templates/terraform/custom_flatten/go/workbench_instance_data_disk_type_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/workbench_instance_data_disk_type_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("gce_setup.0.data_disks.0.disk_type") } diff --git a/mmv1/templates/terraform/custom_flatten/go/workbench_instance_vm_image_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/workbench_instance_vm_image_flatten.go.tmpl index cdd619fa9dd5..fe4419e18d00 100644 --- a/mmv1/templates/terraform/custom_flatten/go/workbench_instance_vm_image_flatten.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/workbench_instance_vm_image_flatten.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("gce_setup.0.vm_image") } diff --git a/mmv1/templates/terraform/custom_flatten/go/workstations_config_confidential_instance.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/workstations_config_confidential_instance.go.tmpl index 7fd536829647..2deeb14ca8fc 100644 --- a/mmv1/templates/terraform/custom_flatten/go/workstations_config_confidential_instance.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/workstations_config_confidential_instance.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/custom_flatten/go/workstations_config_shielded_instance.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/workstations_config_shielded_instance.go.tmpl index 468c479ec72d..9a1f51712fc5 100644 --- a/mmv1/templates/terraform/custom_flatten/go/workstations_config_shielded_instance.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/workstations_config_shielded_instance.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_egress_policy.go.tmpl b/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_egress_policy.go.tmpl index 807ddbbf737b..121f2ee42714 100644 --- a/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_egress_policy.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_egress_policy.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value diff --git a/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl b/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl index 807ddbbf737b..121f2ee42714 100644 --- a/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value diff --git a/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl b/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl index 70b5b59ab8f2..77b57820573e 100644 --- a/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value diff --git a/mmv1/templates/terraform/custom_import/go/monitoring_monitored_project.go.tmpl b/mmv1/templates/terraform/custom_import/go/monitoring_monitored_project.go.tmpl index 049cc9d783c7..abe689a1e68b 100644 --- a/mmv1/templates/terraform/custom_import/go/monitoring_monitored_project.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/monitoring_monitored_project.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} name := d.Get("name").(string) name = tpgresource.GetResourceNameFromSelfLink(name) d.Set("name", name) diff --git a/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_access_policy.go.tmpl b/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_access_policy.go.tmpl index 8ac1610a533b..de48dc2505b1 100644 --- a/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_access_policy.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_access_policy.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value diff --git a/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl b/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl index de1c3f7bcc37..490f76b3484f 100644 --- a/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) // current import_formats can't import fields with forward slashes in their value diff --git a/mmv1/templates/terraform/custom_update/go/cloud_identity_group_membership.go.tmpl b/mmv1/templates/terraform/custom_update/go/cloud_identity_group_membership.go.tmpl index c1105aaf3c66..f606f1b0fec3 100644 --- a/mmv1/templates/terraform/custom_update/go/cloud_identity_group_membership.go.tmpl +++ b/mmv1/templates/terraform/custom_update/go/cloud_identity_group_membership.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { return err diff --git a/mmv1/templates/terraform/custom_update/go/secret_version.go.tmpl b/mmv1/templates/terraform/custom_update/go/secret_version.go.tmpl index fcd1f4a1b159..4f5dce983088 100644 --- a/mmv1/templates/terraform/custom_update/go/secret_version.go.tmpl +++ b/mmv1/templates/terraform/custom_update/go/secret_version.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} _, err := expandSecretManagerSecretVersionEnabled(d.Get("enabled"), d, config) if err != nil { return err diff --git a/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl index f61950b4226c..06c94a1d0553 100644 --- a/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/backend_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // We need to pretend IAP isn't there if it's disabled for Terraform to maintain // BC behaviour with the handwritten resource. v, ok := res["iap"] diff --git a/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl b/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl index 41564182db0c..a9d8d364bcc3 100644 --- a/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/bigquery_data_transfer.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if paramMap, ok := res["params"]; ok { params := paramMap.(map[string]interface{}) for _, sp := range sensitiveParams { diff --git a/mmv1/templates/terraform/decoders/go/cloud_run.go.tmpl b/mmv1/templates/terraform/decoders/go/cloud_run.go.tmpl index e1c05d03d3a1..38aa801f7644 100644 --- a/mmv1/templates/terraform/decoders/go/cloud_run.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/cloud_run.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // metadata is not present if the API returns an error if obj, ok := res["metadata"]; ok { if meta, ok := obj.(map[string]interface{}); ok { diff --git a/mmv1/templates/terraform/decoders/go/containeranalysis_attestation_field_name.go.tmpl b/mmv1/templates/terraform/decoders/go/containeranalysis_attestation_field_name.go.tmpl index be338c044b6a..e30e69b7e66f 100644 --- a/mmv1/templates/terraform/decoders/go/containeranalysis_attestation_field_name.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/containeranalysis_attestation_field_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- if eq $.TargetVersionName "ga" }} // Field was renamed in GA API res["attestationAuthority"] = res["attestation"] diff --git a/mmv1/templates/terraform/decoders/go/containeranalysis_occurrence.go.tmpl b/mmv1/templates/terraform/decoders/go/containeranalysis_occurrence.go.tmpl index f246d5331eef..44a02b6cb54b 100644 --- a/mmv1/templates/terraform/decoders/go/containeranalysis_occurrence.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/containeranalysis_occurrence.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- if ne $.TargetVersionName "ga" }} // Resource object was flattened in GA API if nestedResource, ok := res["resource"]; ok { diff --git a/mmv1/templates/terraform/decoders/go/dlp_job_trigger.go.tmpl b/mmv1/templates/terraform/decoders/go/dlp_job_trigger.go.tmpl index 3cda81929e95..80f3609b00ae 100644 --- a/mmv1/templates/terraform/decoders/go/dlp_job_trigger.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/dlp_job_trigger.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) if err := d.Set("trigger_id", flattenDataLossPrevention{{$.Name}}Name(res["name"], d, config)); err != nil { return nil, fmt.Errorf("Error reading {{$.Name}}: %s", err) diff --git a/mmv1/templates/terraform/decoders/go/dlp_template_id.go.tmpl b/mmv1/templates/terraform/decoders/go/dlp_template_id.go.tmpl index beb0a871f0ac..45f394202ead 100644 --- a/mmv1/templates/terraform/decoders/go/dlp_template_id.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/dlp_template_id.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) if err := d.Set("template_id", flattenDataLossPrevention{{$.Name}}Name(res["name"], d, config)); err != nil { return nil, fmt.Errorf("Error reading {{$.Name}}: %s", err) diff --git a/mmv1/templates/terraform/decoders/go/firestore_document.go.tmpl b/mmv1/templates/terraform/decoders/go/firestore_document.go.tmpl index 6ad0249ee9c9..6099067a5a58 100644 --- a/mmv1/templates/terraform/decoders/go/firestore_document.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/firestore_document.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // We use this decoder to add the path field if name, ok := res["name"]; ok { re := regexp.MustCompile("^projects/[^/]+/databases/[^/]+/documents/(.+)$") diff --git a/mmv1/templates/terraform/decoders/go/kms.go.tmpl b/mmv1/templates/terraform/decoders/go/kms.go.tmpl index a1f9936efacb..9d543f3845a9 100644 --- a/mmv1/templates/terraform/decoders/go/kms.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/kms.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Modify the name to be the user specified form. // We can't just ignore_read on `name` as the linter will // complain that the returned `res` is never used afterwards. diff --git a/mmv1/templates/terraform/decoders/go/long_name_to_self_link.go.tmpl b/mmv1/templates/terraform/decoders/go/long_name_to_self_link.go.tmpl index c03ada0c8390..d689f6e3e4b0 100644 --- a/mmv1/templates/terraform/decoders/go/long_name_to_self_link.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/long_name_to_self_link.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Take the returned long form of the name and use it as `self_link`. // Then modify the name to be the user specified form. // We can't just ignore_read on `name` as the linter will diff --git a/mmv1/templates/terraform/decoders/go/monitoring_monitored_project.go.tmpl b/mmv1/templates/terraform/decoders/go/monitoring_monitored_project.go.tmpl index 82c8ab576e83..793b3a55200e 100644 --- a/mmv1/templates/terraform/decoders/go/monitoring_monitored_project.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/monitoring_monitored_project.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // terraform resource config config := meta.(*transport_tpg.Config) diff --git a/mmv1/templates/terraform/decoders/go/monitoring_notification_channel.go.tmpl b/mmv1/templates/terraform/decoders/go/monitoring_notification_channel.go.tmpl index e5fad1a3ecf8..9298b2d12435 100644 --- a/mmv1/templates/terraform/decoders/go/monitoring_notification_channel.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/monitoring_notification_channel.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if labelmap, ok := res["labels"]; ok { labels := labelmap.(map[string]interface{}) for _, sl := range sensitiveLabels { diff --git a/mmv1/templates/terraform/decoders/go/network_endpoint.go.tmpl b/mmv1/templates/terraform/decoders/go/network_endpoint.go.tmpl index 8f14dc675325..ba825b656de2 100644 --- a/mmv1/templates/terraform/decoders/go/network_endpoint.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/network_endpoint.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} v, ok := res["networkEndpoint"] if !ok || v == nil { return res, nil diff --git a/mmv1/templates/terraform/decoders/go/network_endpoints.go.tmpl b/mmv1/templates/terraform/decoders/go/network_endpoints.go.tmpl index 663a3175d5da..9a7b210030e7 100644 --- a/mmv1/templates/terraform/decoders/go/network_endpoints.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/network_endpoints.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { diff --git a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl index a5115fb455ae..024fd0117bbe 100644 --- a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // We need to pretend IAP isn't there if it's disabled for Terraform to maintain // BC behaviour with the handwritten resource. v, ok := res["iap"] diff --git a/mmv1/templates/terraform/decoders/go/snapshot.go.tmpl b/mmv1/templates/terraform/decoders/go/snapshot.go.tmpl index f12238dabb2f..2334a81ef893 100644 --- a/mmv1/templates/terraform/decoders/go/snapshot.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/snapshot.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if v, ok := res["snapshotEncryptionKey"]; ok { original := v.(map[string]interface{}) transformed := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/decoders/go/sql_source_representation_instance.go.tmpl b/mmv1/templates/terraform/decoders/go/sql_source_representation_instance.go.tmpl index 381e1903c103..6744476686d9 100644 --- a/mmv1/templates/terraform/decoders/go/sql_source_representation_instance.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/sql_source_representation_instance.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if v, ok := res["onPremisesConfiguration"]; ok { opc := v.(map[string]interface{}) hostPort := opc["hostPort"] diff --git a/mmv1/templates/terraform/decoders/go/treat_destroyed_state_as_gone.tmpl b/mmv1/templates/terraform/decoders/go/treat_destroyed_state_as_gone.tmpl index 2a75e3922480..0fab45e22849 100644 --- a/mmv1/templates/terraform/decoders/go/treat_destroyed_state_as_gone.tmpl +++ b/mmv1/templates/terraform/decoders/go/treat_destroyed_state_as_gone.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if v := res["state"]; v == "DESTROYED" { return nil, nil } diff --git a/mmv1/templates/terraform/decoders/go/unwrap_global_neg.go.tmpl b/mmv1/templates/terraform/decoders/go/unwrap_global_neg.go.tmpl index 8f14dc675325..ba825b656de2 100644 --- a/mmv1/templates/terraform/decoders/go/unwrap_global_neg.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/unwrap_global_neg.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} v, ok := res["networkEndpoint"] if !ok || v == nil { return res, nil diff --git a/mmv1/templates/terraform/decoders/go/unwrap_resource.go.tmpl b/mmv1/templates/terraform/decoders/go/unwrap_resource.go.tmpl index 7a798ecda836..de8f898e5c8a 100644 --- a/mmv1/templates/terraform/decoders/go/unwrap_resource.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/unwrap_resource.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} v, ok := res["{{camelize $.Name "lower"}}"] if !ok || v == nil { return res, nil diff --git a/mmv1/templates/terraform/encoders/active_directory_domain_trust.go.erb b/mmv1/templates/terraform/encoders/active_directory_domain_trust.go.erb index 873c7aa74b04..2c0b939820eb 100644 --- a/mmv1/templates/terraform/encoders/active_directory_domain_trust.go.erb +++ b/mmv1/templates/terraform/encoders/active_directory_domain_trust.go.erb @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -%> - wrappedReq := map[string]interface{}{ "trust": obj, } diff --git a/mmv1/templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl b/mmv1/templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl index 7fc837505699..5d980ea2427f 100644 --- a/mmv1/templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,6 +9,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} delete(obj, "parent") return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/active_directory_domain_trust.go.tmpl b/mmv1/templates/terraform/encoders/go/active_directory_domain_trust.go.tmpl index ab6867c3163f..38872710d8e3 100644 --- a/mmv1/templates/terraform/encoders/go/active_directory_domain_trust.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/active_directory_domain_trust.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,8 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} - +*/ -}} wrappedReq := map[string]interface{}{ "trust": obj, } diff --git a/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl index 2010d4d0d522..333664a0ba96 100644 --- a/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/backend_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // The BackendService API's Update / PUT API is badly formed and behaves like // a PATCH field for at least IAP. When sent a `null` `iap` field, the API // doesn't disable an existing field. To work around this, we need to emulate diff --git a/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl b/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl index 9b790f9795e6..4ec9950635f4 100644 --- a/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/bigquery_data_transfer.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} paramMap, ok := obj["params"] if !ok { paramMap = make(map[string]string) diff --git a/mmv1/templates/terraform/encoders/go/bigquery_job.go.tmpl b/mmv1/templates/terraform/encoders/go/bigquery_job.go.tmpl index f9f9dc1b3f09..df0560b27ab5 100644 --- a/mmv1/templates/terraform/encoders/go/bigquery_job.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/bigquery_job.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} project, err := tpgresource.GetProject(d, meta.(*transport_tpg.Config)) if err != nil { return nil, err diff --git a/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl b/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl index 8497ada387ac..c08c105443be 100644 --- a/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- /* Because instance is a URL param only, it does not get expanded and the URL is constructed from ResourceData. Set it in state and use a encoder instead of a field expander */}} diff --git a/mmv1/templates/terraform/encoders/go/cloud_run_domain_mapping.go.tmpl b/mmv1/templates/terraform/encoders/go/cloud_run_domain_mapping.go.tmpl index 6a8c49c77d12..151918f12fdb 100644 --- a/mmv1/templates/terraform/encoders/go/cloud_run_domain_mapping.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/cloud_run_domain_mapping.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} name := d.Get("name").(string) metadata := obj["metadata"].(map[string]interface{}) metadata["name"] = name diff --git a/mmv1/templates/terraform/encoders/go/cloud_run_service.go.tmpl b/mmv1/templates/terraform/encoders/go/cloud_run_service.go.tmpl index 931fcc3d8b7a..8b7acfe865b4 100644 --- a/mmv1/templates/terraform/encoders/go/cloud_run_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/cloud_run_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} name := d.Get("name").(string) if obj["metadata"] == nil { obj["metadata"] = make(map[string]interface{}) diff --git a/mmv1/templates/terraform/encoders/go/cloudbuildv2_repository.go.tmpl b/mmv1/templates/terraform/encoders/go/cloudbuildv2_repository.go.tmpl index 6b7753d1833c..baf759264b04 100644 --- a/mmv1/templates/terraform/encoders/go/cloudbuildv2_repository.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/cloudbuildv2_repository.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) // Extract any empty fields from the parent_connection field. project, err := tpgresource.GetProject(d, config) diff --git a/mmv1/templates/terraform/encoders/go/clouddomains_registration.go.tmpl b/mmv1/templates/terraform/encoders/go/clouddomains_registration.go.tmpl index b816af3ec4c9..782feaddd6ba 100644 --- a/mmv1/templates/terraform/encoders/go/clouddomains_registration.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/clouddomains_registration.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Request body is registration object with additional fields // See https://cloud.google.com/domains/docs/reference/rest/v1beta1/projects.locations.registrations/register diff --git a/mmv1/templates/terraform/encoders/go/compute_global_network_endpoint.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_global_network_endpoint.go.tmpl index 891aae34803d..7e295d87c61a 100644 --- a/mmv1/templates/terraform/encoders/go/compute_global_network_endpoint.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_global_network_endpoint.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("global_network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("global_network_endpoint_group").(string))); err != nil { return nil, fmt.Errorf("Error setting global_network_endpoint_group: %s", err) diff --git a/mmv1/templates/terraform/encoders/go/compute_instance_group_membership.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_instance_group_membership.go.tmpl index 53133ee4dd63..b6a74238b067 100644 --- a/mmv1/templates/terraform/encoders/go/compute_instance_group_membership.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_instance_group_membership.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Instance Group is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("instance_group", tpgresource.GetResourceNameFromSelfLink(d.Get("instance_group").(string))); err != nil { return nil, fmt.Errorf("Error setting instance_group: %s", err) diff --git a/mmv1/templates/terraform/encoders/go/compute_network_endpoint.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_network_endpoint.go.tmpl index 561d8442f926..011148ae7a10 100644 --- a/mmv1/templates/terraform/encoders/go/compute_network_endpoint.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_network_endpoint.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("network_endpoint_group").(string))); err != nil { return nil, fmt.Errorf("Error setting network_endpoint_group: %s", err) diff --git a/mmv1/templates/terraform/encoders/go/compute_network_endpoints.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_network_endpoints.go.tmpl index be414662887d..6f7c098c903e 100644 --- a/mmv1/templates/terraform/encoders/go/compute_network_endpoints.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_network_endpoints.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("network_endpoint_group").(string))); err != nil { return nil, fmt.Errorf("Error setting network_endpoint_group: %s", err) diff --git a/mmv1/templates/terraform/encoders/go/compute_per_instance_config.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_per_instance_config.go.tmpl index 1797a0f90509..03816a6bf6af 100644 --- a/mmv1/templates/terraform/encoders/go/compute_per_instance_config.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_per_instance_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} wrappedReq := map[string]interface{}{ "instances": []interface{}{obj}, } diff --git a/mmv1/templates/terraform/encoders/go/compute_region_network_endpoint.go.tmpl b/mmv1/templates/terraform/encoders/go/compute_region_network_endpoint.go.tmpl index 6bbad31d623c..e6e5ada15728 100644 --- a/mmv1/templates/terraform/encoders/go/compute_region_network_endpoint.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/compute_region_network_endpoint.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Network Endpoint Group is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("region_network_endpoint_group", tpgresource.GetResourceNameFromSelfLink(d.Get("region_network_endpoint_group").(string))); err != nil { return nil, fmt.Errorf("Error setting region_network_endpoint_group: %s", err) diff --git a/mmv1/templates/terraform/encoders/go/containeranalysis_attestation_field_name.go.tmpl b/mmv1/templates/terraform/encoders/go/containeranalysis_attestation_field_name.go.tmpl index 0ad87829a3b2..23177474781b 100644 --- a/mmv1/templates/terraform/encoders/go/containeranalysis_attestation_field_name.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/containeranalysis_attestation_field_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- if eq $.TargetVersionName "ga" }} // Field was renamed in GA API obj["attestation"] = obj["attestationAuthority"] diff --git a/mmv1/templates/terraform/encoders/go/containeranalysis_occurrence.go.tmpl b/mmv1/templates/terraform/encoders/go/containeranalysis_occurrence.go.tmpl index b8f8a1148758..baa978dce8f3 100644 --- a/mmv1/templates/terraform/encoders/go/containeranalysis_occurrence.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/containeranalysis_occurrence.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} {{- if ne $.TargetVersionName "ga" }} // Resource object was flattened in GA API if resourceuri, ok := obj["resourceUri"]; ok { diff --git a/mmv1/templates/terraform/encoders/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/encoders/go/datastream_stream.go.tmpl index 1b83ca8f749f..ac0b53608cb7 100644 --- a/mmv1/templates/terraform/encoders/go/datastream_stream.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/datastream_stream.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if d.HasChange("desired_state") { obj["state"] = d.Get("desired_state") } diff --git a/mmv1/templates/terraform/encoders/go/dlp_job_trigger.go.tmpl b/mmv1/templates/terraform/encoders/go/dlp_job_trigger.go.tmpl index 416f7e16229a..2cb266c209cf 100644 --- a/mmv1/templates/terraform/encoders/go/dlp_job_trigger.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/dlp_job_trigger.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} newObj := make(map[string]interface{}) newObj["{{camelize $.Name "lower"}}"] = obj diff --git a/mmv1/templates/terraform/encoders/go/dlp_stored_info_type.go.tmpl b/mmv1/templates/terraform/encoders/go/dlp_stored_info_type.go.tmpl index b8d74e40ce77..69e3e192cd60 100644 --- a/mmv1/templates/terraform/encoders/go/dlp_stored_info_type.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/dlp_stored_info_type.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} newObj := make(map[string]interface{}) newObj["config"] = obj storedInfoTypeIdProp, ok := d.GetOk("stored_info_type_id") diff --git a/mmv1/templates/terraform/encoders/go/health_check_type.tmpl b/mmv1/templates/terraform/encoders/go/health_check_type.tmpl index f5849366f680..e58d074f46a3 100644 --- a/mmv1/templates/terraform/encoders/go/health_check_type.tmpl +++ b/mmv1/templates/terraform/encoders/go/health_check_type.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} if _, ok := d.GetOk("http_health_check"); ok { hc := d.Get("http_health_check").([]interface{})[0] diff --git a/mmv1/templates/terraform/encoders/go/index.go.tmpl b/mmv1/templates/terraform/encoders/go/index.go.tmpl index 231e5124eca3..fd2d80feda0e 100644 --- a/mmv1/templates/terraform/encoders/go/index.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/index.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // We've added project / database / collection as split fields of the name, but // the API doesn't expect them. Make sure we remove them from any requests. diff --git a/mmv1/templates/terraform/encoders/go/kms_crypto_key.go.tmpl b/mmv1/templates/terraform/encoders/go/kms_crypto_key.go.tmpl index 66416a0c3ba4..dfb4a6af809e 100644 --- a/mmv1/templates/terraform/encoders/go/kms_crypto_key.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/kms_crypto_key.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // if rotationPeriod is set, nextRotationTime must also be set. if d.Get("rotation_period") != "" { rotationPeriod := d.Get("rotation_period").(string) diff --git a/mmv1/templates/terraform/encoders/go/location_from_region.go.tmpl b/mmv1/templates/terraform/encoders/go/location_from_region.go.tmpl index e5a0f2cc1b52..316a14641ca7 100644 --- a/mmv1/templates/terraform/encoders/go/location_from_region.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/location_from_region.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) if _, ok := d.GetOk("location"); !ok { location, err := tpgresource.GetRegionFromSchema("region", "zone", d, config) diff --git a/mmv1/templates/terraform/encoders/go/logging_linked_dataset.go.tmpl b/mmv1/templates/terraform/encoders/go/logging_linked_dataset.go.tmpl index 871e6384d2fb..1bf305d148e2 100644 --- a/mmv1/templates/terraform/encoders/go/logging_linked_dataset.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/logging_linked_dataset.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Extract any empty fields from the bucket field. parent := d.Get("parent").(string) bucket := d.Get("bucket").(string) diff --git a/mmv1/templates/terraform/encoders/go/logging_log_view.go.tmpl b/mmv1/templates/terraform/encoders/go/logging_log_view.go.tmpl index 871e6384d2fb..1bf305d148e2 100644 --- a/mmv1/templates/terraform/encoders/go/logging_log_view.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/logging_log_view.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Extract any empty fields from the bucket field. parent := d.Get("parent").(string) bucket := d.Get("bucket").(string) diff --git a/mmv1/templates/terraform/encoders/go/monitoring_monitored_project.go.tmpl b/mmv1/templates/terraform/encoders/go/monitoring_monitored_project.go.tmpl index 76442aa5e446..2dbbbcafe017 100644 --- a/mmv1/templates/terraform/encoders/go/monitoring_monitored_project.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/monitoring_monitored_project.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} name := d.Get("name").(string) log.Printf("[DEBUG] Encoded monitored project name: %s", name) name = tpgresource.GetResourceNameFromSelfLink(name) diff --git a/mmv1/templates/terraform/encoders/go/monitoring_notification_channel.go.tmpl b/mmv1/templates/terraform/encoders/go/monitoring_notification_channel.go.tmpl index bde26ac93c37..4a4fbc8502d3 100644 --- a/mmv1/templates/terraform/encoders/go/monitoring_notification_channel.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/monitoring_notification_channel.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} labelmap, ok := obj["labels"] if !ok { labelmap = make(map[string]string) diff --git a/mmv1/templates/terraform/encoders/go/monitoring_service.go.tmpl b/mmv1/templates/terraform/encoders/go/monitoring_service.go.tmpl index e9b60bab04a1..b27fe821f566 100644 --- a/mmv1/templates/terraform/encoders/go/monitoring_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/monitoring_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Currently only CUSTOM service types can be created, but the // custom identifier block does not actually have fields right now. // Set to empty to indicate manually-created service type is CUSTOM. diff --git a/mmv1/templates/terraform/encoders/go/monitoring_slo.go.tmpl b/mmv1/templates/terraform/encoders/go/monitoring_slo.go.tmpl index 038323202a85..d135a3426c0b 100644 --- a/mmv1/templates/terraform/encoders/go/monitoring_slo.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/monitoring_slo.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Name/Service Level Objective ID is a query parameter and cannot // be given in data delete(obj, "sloId") diff --git a/mmv1/templates/terraform/encoders/go/network_peering_routes_config.go.tmpl b/mmv1/templates/terraform/encoders/go/network_peering_routes_config.go.tmpl index 8bdd678fdf3e..f52b45c6a4da 100644 --- a/mmv1/templates/terraform/encoders/go/network_peering_routes_config.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/network_peering_routes_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // Stick request in a networkPeering block as in // https://cloud.google.com/compute/docs/reference/rest/v1/networks/updatePeering newObj := make(map[string]interface{}) diff --git a/mmv1/templates/terraform/encoders/go/no_send_name.go.tmpl b/mmv1/templates/terraform/encoders/go/no_send_name.go.tmpl index 0db0a98e867f..8fa3b1ba6c68 100644 --- a/mmv1/templates/terraform/encoders/go/no_send_name.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/no_send_name.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,6 +9,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} delete(obj, "name") return obj, nil diff --git a/mmv1/templates/terraform/encoders/go/redis_location_id_for_fallback_zone.go.tmpl b/mmv1/templates/terraform/encoders/go/redis_location_id_for_fallback_zone.go.tmpl index 7748ca309318..0c045875af48 100644 --- a/mmv1/templates/terraform/encoders/go/redis_location_id_for_fallback_zone.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/redis_location_id_for_fallback_zone.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} config := meta.(*transport_tpg.Config) region, err := tpgresource.GetRegionFromSchema("region", "location_id", d, config) if err != nil { diff --git a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl index 684766841fb7..dd54de781463 100644 --- a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} // The RegionBackendService API's Update / PUT API is badly formed and behaves like // a PATCH field for at least IAP. When sent a `null` `iap` field, the API // doesn't disable an existing field. To work around this, we need to emulate diff --git a/mmv1/templates/terraform/encoders/go/sql_source_representation_instance.go.tmpl b/mmv1/templates/terraform/encoders/go/sql_source_representation_instance.go.tmpl index 68cd0ce41bf6..3897db08efdb 100644 --- a/mmv1/templates/terraform/encoders/go/sql_source_representation_instance.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/sql_source_representation_instance.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} opc := obj["onPremisesConfiguration"].(map[string]interface{}) opc["hostPort"] = fmt.Sprintf("%v:%v", opc["host"], opc["port"]) delete(opc, "host") diff --git a/mmv1/templates/terraform/encoders/go/wrap_object.go.tmpl b/mmv1/templates/terraform/encoders/go/wrap_object.go.tmpl index b739d6c24c22..6ac0bbe4e8ac 100644 --- a/mmv1/templates/terraform/encoders/go/wrap_object.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/wrap_object.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} newObj := make(map[string]interface{}) newObj["{{camelize $.Name "lower"}}"] = obj return newObj, nil diff --git a/mmv1/templates/terraform/encoders/go/wrap_object_with_deployment_resource_pool_id.go.tmpl b/mmv1/templates/terraform/encoders/go/wrap_object_with_deployment_resource_pool_id.go.tmpl index eccc59bc586e..1a05fac6eb84 100644 --- a/mmv1/templates/terraform/encoders/go/wrap_object_with_deployment_resource_pool_id.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/wrap_object_with_deployment_resource_pool_id.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} newObj := make(map[string]interface{}) newObj["deploymentResourcePool"] = obj nameProp, ok := d.GetOk("name") diff --git a/mmv1/templates/terraform/encoders/go/wrap_object_with_template_id.go.tmpl b/mmv1/templates/terraform/encoders/go/wrap_object_with_template_id.go.tmpl index efc2e3edc08d..45728664d213 100644 --- a/mmv1/templates/terraform/encoders/go/wrap_object_with_template_id.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/wrap_object_with_template_id.go.tmpl @@ -1,4 +1,4 @@ -{{- /* +{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} newObj := make(map[string]interface{}) newObj["{{camelize $.Name "lower"}}"] = obj templateIdProp, ok := d.GetOk("template_id") diff --git a/mmv1/templates/terraform/examples/go/pubsub_subscription_push_bq_service_account.tf.tmpl b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_bq_service_account.tf.tmpl new file mode 100644 index 000000000000..01c15668a72b --- /dev/null +++ b/mmv1/templates/terraform/examples/go/pubsub_subscription_push_bq_service_account.tf.tmpl @@ -0,0 +1,56 @@ +resource "google_pubsub_topic" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "topic_name"}}" +} + +resource "google_pubsub_subscription" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "subscription_name"}}" + topic = google_pubsub_topic.{{$.PrimaryResourceId}}.id + + bigquery_config { + table = "${google_bigquery_table.test.project}.${google_bigquery_table.test.dataset_id}.${google_bigquery_table.test.table_id}" + service_account_email = google_service_account.bq_write_service_account.email + } + + depends_on = [google_service_account.bq_write_service_account, google_project_iam_member.viewer, google_project_iam_member.editor] +} + +data "google_project" "project" { +} + +resource "google_service_account" "bq_write_service_account" { + account_id = "{{index $.Vars "service_account_id"}}" + display_name = "BQ Write Service Account" +} + +resource "google_project_iam_member" "viewer" { + project = data.google_project.project.project_id + role = "roles/bigquery.metadataViewer" + member = "serviceAccount:${google_service_account.bq_write_service_account.email}" +} + +resource "google_project_iam_member" "editor" { + project = data.google_project.project.project_id + role = "roles/bigquery.dataEditor" + member = "serviceAccount:${google_service_account.bq_write_service_account.email}" +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "{{index $.Vars "dataset_id"}}" +} + +resource "google_bigquery_table" "test" { + deletion_protection = false + table_id = "{{index $.Vars "table_id"}}" + dataset_id = google_bigquery_dataset.test.dataset_id + + schema = < <% end -%> -<% unless property.api_name == property.name -%> +<% unless property.api_name == property.name || property.api_name.nil? -%> api_name: <%= property.api_name %> <% end -%> <% unless !property.unordered_list -%> From bb3772e87e97e5895f0271116c238faf0895c17c Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 17 Jun 2024 13:24:35 -0500 Subject: [PATCH 157/356] go rewrite - sort context map in test file (#10975) --- mmv1/templates/terraform/env_var_context.go.erb | 4 +++- .../terraform/examples/base_configs/test_file.go.erb | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mmv1/templates/terraform/env_var_context.go.erb b/mmv1/templates/terraform/env_var_context.go.erb index 3b7f28bbdb28..df6cef3c6854 100644 --- a/mmv1/templates/terraform/env_var_context.go.erb +++ b/mmv1/templates/terraform/env_var_context.go.erb @@ -1,4 +1,5 @@ -<% example.test_env_vars&.each do |var_name, var_type| -%> +<% unless example.test_env_vars.nil? -%> +<% example.test_env_vars.sort.each do |var_name, var_type| -%> <% if var_type == :ORG_ID -%> "<%= var_name -%>": envvar.GetTestOrgFromEnv(t), <% elsif var_type == :ORG_DOMAIN -%> @@ -29,3 +30,4 @@ "<%= var_name -%>": envvar.GetTestZoneFromEnv(), <% end -%> <% end -%> +<% end -%> diff --git a/mmv1/templates/terraform/examples/base_configs/test_file.go.erb b/mmv1/templates/terraform/examples/base_configs/test_file.go.erb index aeb0c5a31ca7..298046969e4f 100644 --- a/mmv1/templates/terraform/examples/base_configs/test_file.go.erb +++ b/mmv1/templates/terraform/examples/base_configs/test_file.go.erb @@ -67,7 +67,7 @@ func TestAcc<%= test_slug -%>(t *testing.T) { context := map[string]interface{} { <%= lines(indent(compile(pwd + '/templates/terraform/env_var_context.go.erb'), 4)) -%> <% unless example.test_vars_overrides.nil? -%> - <% example.test_vars_overrides.each do |var_name, override| -%> + <% example.test_vars_overrides.sort.each do |var_name, override| -%> "<%= var_name %>": <%= override %>, <% end -%> <% end -%> From e69e5f7253af4532e5ed75617a8cab176eef4a72 Mon Sep 17 00:00:00 2001 From: bcreddy-gcp <123543489+bcreddy-gcp@users.noreply.github.com> Date: Mon, 17 Jun 2024 14:26:02 -0700 Subject: [PATCH 158/356] fix empty accelerator config permadiff and instance not starting after an update in resource `google_workbench_instance` (#10961) --- mmv1/products/workbench/Instance.yaml | 1 + .../terraform/constants/workbench_instance.go | 10 ++ .../post_update/workbench_instance.go.erb | 9 +- .../pre_update/workbench_instance.go.erb | 60 +++++---- ...bench_instance_shielded_config_test.go.erb | 40 ++++++ .../resource_workbench_instance_test.go.erb | 124 +++++++++++++++++- 6 files changed, 213 insertions(+), 31 deletions(-) diff --git a/mmv1/products/workbench/Instance.yaml b/mmv1/products/workbench/Instance.yaml index fbac9a9a0e51..2f431e1ce050 100644 --- a/mmv1/products/workbench/Instance.yaml +++ b/mmv1/products/workbench/Instance.yaml @@ -135,6 +135,7 @@ properties: custom_flatten: templates/terraform/custom_flatten/name_from_self_link.erb - !ruby/object:Api::Type::Array name: acceleratorConfigs + diff_suppress_func: WorkbenchInstanceAcceleratorDiffSuppress description: | The hardware accelerators used on this instance. If you use accelerators, make sure that your configuration has [enough vCPUs and memory to support the `machine_type` you have selected](https://cloud.google.com/compute/docs/gpus/#gpus-list). diff --git a/mmv1/templates/terraform/constants/workbench_instance.go b/mmv1/templates/terraform/constants/workbench_instance.go index b758fb35c2db..9616479a6365 100644 --- a/mmv1/templates/terraform/constants/workbench_instance.go +++ b/mmv1/templates/terraform/constants/workbench_instance.go @@ -126,6 +126,16 @@ func WorkbenchInstanceTagsDiffSuppress(_, _, _ string, d *schema.ResourceData) b return false } +func WorkbenchInstanceAcceleratorDiffSuppress(_, _, _ string, d *schema.ResourceData) bool { + old, new := d.GetChange("gce_setup.0.accelerator_configs") + oldInterface := old.([]interface{}) + newInterface := new.([]interface{}) + if len(oldInterface) == 0 && len(newInterface) == 1 && newInterface[0] == nil{ + return true + } + return false + } + <% unless compiler == "terraformgoogleconversion-codegen" -%> // waitForWorkbenchInstanceActive waits for an workbench instance to become "ACTIVE" func waitForWorkbenchInstanceActive(d *schema.ResourceData, config *transport_tpg.Config, timeout time.Duration) error { diff --git a/mmv1/templates/terraform/post_update/workbench_instance.go.erb b/mmv1/templates/terraform/post_update/workbench_instance.go.erb index cd216f018387..604ee537a488 100644 --- a/mmv1/templates/terraform/post_update/workbench_instance.go.erb +++ b/mmv1/templates/terraform/post_update/workbench_instance.go.erb @@ -1,7 +1,7 @@ state := d.Get("state").(string) desired_state := d.Get("desired_state").(string) -if state != desired_state { +if state != desired_state || stopInstance{ verb := "start" if desired_state == "STOPPED" { verb = "stop" @@ -15,6 +15,13 @@ if state != desired_state { return fmt.Errorf("Error waiting to modify Workbench Instance state: %s", err) } + if verb == "start"{ + if err := waitForWorkbenchInstanceActive(d, config, d.Timeout(schema.TimeoutUpdate) - time.Minute); err != nil { + return fmt.Errorf("Workbench instance %q did not reach ACTIVE state: %q", d.Get("name").(string), err) + } + + } + } else { log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) } diff --git a/mmv1/templates/terraform/pre_update/workbench_instance.go.erb b/mmv1/templates/terraform/pre_update/workbench_instance.go.erb index 847a0bcd1311..0b873071374d 100644 --- a/mmv1/templates/terraform/pre_update/workbench_instance.go.erb +++ b/mmv1/templates/terraform/pre_update/workbench_instance.go.erb @@ -1,35 +1,25 @@ -name := d.Get("name").(string) -if d.HasChange("gce_setup.0.machine_type") || d.HasChange("gce_setup.0.accelerator_configs") || d.HasChange("gce_setup.0.shielded_instance_config"){ - state := d.Get("state").(string) - - if state != "STOPPED" { - dRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, "stop") - if err != nil { - return err - } - - if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, dRes); err != nil { - return fmt.Errorf("Error stopping Workbench Instance: %s", err) - } - - } else { - log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) - } - -} else { - log.Printf("[DEBUG] Workbench Instance %q need not be stopped for the update.", name) -} - // Build custom mask since the notebooks API does not support gce_setup as a valid mask +stopInstance := false newUpdateMask := []string{} if d.HasChange("gce_setup.0.machine_type") { newUpdateMask = append(newUpdateMask, "gce_setup.machine_type") + stopInstance = true } if d.HasChange("gce_setup.0.accelerator_configs") { newUpdateMask = append(newUpdateMask, "gce_setup.accelerator_configs") + stopInstance = true +} +if d.HasChange("gce_setup.0.shielded_instance_config.0.enable_secure_boot") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config.enable_secure_boot") + stopInstance = true } -if d.HasChange("gce_setup.0.shielded_instance_config") { - newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config") +if d.HasChange("gce_setup.0.shielded_instance_config.0.enable_vtpm") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config.enable_vtpm") + stopInstance = true +} +if d.HasChange("gce_setup.0.shielded_instance_config.0.enable_integrity_monitoring") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config.enable_integrity_monitoring") + stopInstance = true } if d.HasChange("gce_setup.0.metadata") { newUpdateMask = append(newUpdateMask, "gceSetup.metadata") @@ -43,3 +33,25 @@ url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": str if err != nil { return err } + +name := d.Get("name").(string) +if stopInstance{ + state := d.Get("state").(string) + + if state != "STOPPED" { + dRes, err := modifyWorkbenchInstanceState(config, d, project, billingProject, userAgent, "stop") + if err != nil { + return err + } + + if err := waitForWorkbenchOperation(config, d, project, billingProject, userAgent, dRes); err != nil { + return fmt.Errorf("Error stopping Workbench Instance: %s", err) + } + + } else { + log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) + } + +} else { + log.Printf("[DEBUG] Workbench Instance %q need not be stopped for the update.", name) +} diff --git a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_shielded_config_test.go.erb b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_shielded_config_test.go.erb index 22fd838d2e46..cdb78463e00f 100644 --- a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_shielded_config_test.go.erb +++ b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_shielded_config_test.go.erb @@ -22,6 +22,10 @@ func TestAccWorkbenchInstance_shielded_config_update(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_shielded_config_false(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -31,6 +35,10 @@ func TestAccWorkbenchInstance_shielded_config_update(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -55,6 +63,10 @@ func TestAccWorkbenchInstance_shielded_config_remove(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -64,6 +76,10 @@ func TestAccWorkbenchInstance_shielded_config_remove(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_none(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -88,6 +104,10 @@ func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_shielded_config_none(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -97,6 +117,10 @@ func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_none(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -106,6 +130,10 @@ func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_false(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -115,6 +143,10 @@ func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_false(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -124,6 +156,10 @@ func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -133,6 +169,10 @@ func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { }, { Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", diff --git a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb index 8fbff0b8f080..374b188fde54 100644 --- a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb @@ -22,6 +22,10 @@ func TestAccWorkbenchInstance_update(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -29,8 +33,12 @@ func TestAccWorkbenchInstance_update(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, }, - { + { Config: testAccWorkbenchInstance_update(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -98,6 +106,10 @@ func TestAccWorkbenchInstance_updateGpu(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_basicGpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -105,8 +117,12 @@ func TestAccWorkbenchInstance_updateGpu(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, }, - { + { Config: testAccWorkbenchInstance_updateGpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -178,6 +194,10 @@ func TestAccWorkbenchInstance_removeGpu(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_Gpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -185,8 +205,12 @@ func TestAccWorkbenchInstance_removeGpu(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, }, - { + { Config: testAccWorkbenchInstance_updateGpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -243,6 +267,10 @@ func TestAccWorkbenchInstance_updateMetadata(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -250,8 +278,12 @@ func TestAccWorkbenchInstance_updateMetadata(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, }, - { + { Config: testAccWorkbenchInstance_updateMetadata(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -296,6 +328,10 @@ func TestAccWorkbenchInstance_updateState(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -303,8 +339,12 @@ func TestAccWorkbenchInstance_updateState(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, }, - { + { Config: testAccWorkbenchInstance_updateState(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "STOPPED"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -312,8 +352,12 @@ func TestAccWorkbenchInstance_updateState(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, }, - { + { Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), }, { ResourceName: "google_workbench_instance.instance", @@ -336,3 +380,71 @@ resource "google_workbench_instance" "instance" { } `, context) } + +func TestAccWorkbenchInstance_empty_accelerator(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_empty_accelerator(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_empty_accelerator(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_empty_accelerator(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + accelerator_configs{ + } + } +} +`, context) +} From 410d1238c3ed91917f80a1f15a94362999e0cf0a Mon Sep 17 00:00:00 2001 From: Samir Ribeiro <42391123+Samir-Cit@users.noreply.github.com> Date: Tue, 18 Jun 2024 13:14:11 -0300 Subject: [PATCH 159/356] Add API documentations for "Service LB Policy" resource (#10984) --- mmv1/products/networkservices/ServiceLBPolicies.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mmv1/products/networkservices/ServiceLBPolicies.yaml b/mmv1/products/networkservices/ServiceLBPolicies.yaml index a26c0bb1cbf4..c9ff29ec4e09 100644 --- a/mmv1/products/networkservices/ServiceLBPolicies.yaml +++ b/mmv1/products/networkservices/ServiceLBPolicies.yaml @@ -21,6 +21,8 @@ update_verb: :PATCH update_mask: true description: | ServiceLbPolicy holds global load balancing and traffic distribution configuration that can be applied to a BackendService. +references: !ruby/object:Api::Resource::ReferenceLinks + api: 'https://cloud.google.com/service-mesh/docs/reference/network-services/rest/v1/projects.locations.serviceLbPolicies' async: !ruby/object:Api::OpAsync operation: !ruby/object:Api::OpAsync::Operation path: 'name' From 207c6125a1bf60f774074a895b3cbecb6259be69 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Tue, 18 Jun 2024 18:41:10 +0100 Subject: [PATCH 160/356] Make `google_vpc_access_connector` acc tests use a bootstrapped VPC network (#10995) --- mmv1/products/vpcaccess/Connector.yaml | 6 ++++++ .../terraform/examples/vpc_access_connector.tf.erb | 2 +- .../examples/vpc_access_connector_shared_vpc.tf.erb | 7 +------ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/mmv1/products/vpcaccess/Connector.yaml b/mmv1/products/vpcaccess/Connector.yaml index 6237f8d680b5..b7a68917399c 100644 --- a/mmv1/products/vpcaccess/Connector.yaml +++ b/mmv1/products/vpcaccess/Connector.yaml @@ -46,11 +46,17 @@ examples: primary_resource_id: 'connector' vars: name: 'vpc-con' + network_name: 'default' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-access-connector")' - !ruby/object:Provider::Terraform::Examples name: 'vpc_access_connector_shared_vpc' primary_resource_id: 'connector' vars: name: 'vpc-con' + network_name: 'default' + test_vars_overrides: + network_name: 'acctest.BootstrapSharedServiceNetworkingConnection(t, "vpc-access-connector")' custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/no_send_name.go.erb post_create: templates/terraform/post_create/sleep.go.erb diff --git a/mmv1/templates/terraform/examples/vpc_access_connector.tf.erb b/mmv1/templates/terraform/examples/vpc_access_connector.tf.erb index 1a9e997c5400..46ab6b13120d 100644 --- a/mmv1/templates/terraform/examples/vpc_access_connector.tf.erb +++ b/mmv1/templates/terraform/examples/vpc_access_connector.tf.erb @@ -1,5 +1,5 @@ resource "google_vpc_access_connector" "connector" { name = "<%= ctx[:vars]['name'] %>" ip_cidr_range = "10.8.0.0/28" - network = "default" + network = "<%= ctx[:vars]['network_name'] %>" } diff --git a/mmv1/templates/terraform/examples/vpc_access_connector_shared_vpc.tf.erb b/mmv1/templates/terraform/examples/vpc_access_connector_shared_vpc.tf.erb index aae7c260019c..e2ce01691191 100644 --- a/mmv1/templates/terraform/examples/vpc_access_connector_shared_vpc.tf.erb +++ b/mmv1/templates/terraform/examples/vpc_access_connector_shared_vpc.tf.erb @@ -10,10 +10,5 @@ resource "google_compute_subnetwork" "custom_test" { name = "<%= ctx[:vars]['name'] %>" ip_cidr_range = "10.2.0.0/28" region = "us-central1" - network = google_compute_network.custom_test.id -} - -resource "google_compute_network" "custom_test" { - name = "<%= ctx[:vars]['name'] %>" - auto_create_subnetworks = false + network = "<%= ctx[:vars]['network_name'] %>" } \ No newline at end of file From 93dc95968b3e7a9da9e8214f5aa9c737f073ac8d Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 18 Jun 2024 12:49:10 -0500 Subject: [PATCH 161/356] go rewrite - compute diffs continued (#10988) --- mmv1/api/resource.go | 83 ++++++++++++++++--- mmv1/api/resource/examples.go | 75 ++++++++++------- mmv1/api/type.go | 22 ++--- mmv1/google/template_utils.go | 29 +++---- .../datasource_iam.html.markdown.tmpl | 3 +- .../examples/base_configs/test_file.go.tmpl | 12 +-- .../terraform/expand_property_method.go.tmpl | 2 +- .../terraform/flatten_property_method.go.tmpl | 12 +-- mmv1/templates/terraform/nested_query.go.tmpl | 3 +- .../property_documentation.html.markdown.tmpl | 4 +- mmv1/templates/terraform/resource.go.tmpl | 81 +++++++++--------- .../terraform/resource.html.markdown.tmpl | 9 +- .../terraform/schema_property.go.tmpl | 14 ++-- .../terraform/schema_subresource.go.tmpl | 2 +- mmv1/templates/terraform/sweeper_file.go.tmpl | 3 +- 15 files changed, 218 insertions(+), 136 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index f1726a744761..71d768a65630 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -558,6 +558,15 @@ func (r *Resource) addLabelsFields(props []*Type, parent *Type, labels *Type) [] return props } +func (r *Resource) HasLabelsField() bool { + for _, p := range r.Properties { + if p.Name == "labels" { + return true + } + } + return false +} + // def add_annotations_fields(props, parent, annotations) func (r *Resource) addAnnotationsFields(props []*Type, parent *Type, annotations *Type) []*Type { @@ -878,6 +887,9 @@ func (r Resource) HasZone() bool { // resource functions needed for template that previously existed in terraform.go but due to how files are being inherited here it was easier to put in here // taken wholesale from tpgtools func (r Resource) Updatable() bool { + if r.Immutable && !r.RootLabels() { + return false + } for _, p := range r.AllProperties() { if !p.Immutable && !(p.Required && p.DefaultFromApi) { return true @@ -907,6 +919,11 @@ func (r Resource) TerraformName() string { } func (r Resource) ImportIdFormatsFromResource() []string { + + var ids []string + for _, id := range r.GetIdentity() { + ids = append(ids, google.Underscore(id.Name)) + } return ImportIdFormats(r.ImportFormat, r.Identity, r.BaseUrl) } @@ -942,7 +959,7 @@ func ImportIdFormats(importFormat, identity []string, baseUrl string) []string { transformedIdentity = append(transformedIdentity, fmt.Sprintf("{{%s}}", id)) } identityPath := strings.Join(transformedIdentity, "/") - idFormats = []string{fmt.Sprintf("%s/{{name}}", identityPath)} + idFormats = []string{fmt.Sprintf("%s/%s", underscoredBaseUrl, identityPath)} } } else { idFormats = importFormat @@ -991,7 +1008,7 @@ func ImportIdFormats(importFormat, identity []string, baseUrl string) []string { func (r Resource) IgnoreReadPropertiesToString(e resource.Examples) string { var props []string for _, tp := range r.AllUserProperties() { - if tp.UrlParamOnly || tp.IgnoreRead || tp.IsA("ResourceRef") { + if tp.UrlParamOnly || tp.IsA("ResourceRef") { props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp.Name))) } } @@ -1001,10 +1018,28 @@ func (r Resource) IgnoreReadPropertiesToString(e resource.Examples) string { for _, tp := range r.IgnoreReadLabelsFields(r.PropertiesWithExcluded()) { props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp))) } + for _, tp := range ignoreReadFields(r.AllUserProperties()) { + props = append(props, fmt.Sprintf("\"%s\"", tp)) + } slices.Sort(props) - return fmt.Sprintf("[]string{%s}", strings.Join(props, ", ")) + if len(props) > 0 { + return fmt.Sprintf("[]string{%s}", strings.Join(props, ", ")) + } + return "" +} + +func ignoreReadFields(props []*Type) []string { + var fields []string + for _, tp := range props { + if tp.IgnoreRead && !tp.UrlParamOnly && !tp.IsA("ResourceRef") { + fields = append(fields, tp.TerraformLineage()) + } else if tp.IsA("NestedObject") && tp.AllProperties() != nil { + fields = append(fields, ignoreReadFields(tp.AllProperties())...) + } + } + return fields } func (r *Resource) SetCompiler(t string) { @@ -1342,19 +1377,26 @@ func (r Resource) GetPropertyUpdateMasksGroups(properties []*Type, maskPrefix st } // Formats whitespace in the style of the old Ruby generator's descriptions in documentation -func (r Resource) FormatDocDescription(desc string) string { - returnString := strings.ReplaceAll(desc, "\n\n", "\n") +func (r Resource) FormatDocDescription(desc string, indent bool) string { + returnString := desc + if indent { + returnString = strings.ReplaceAll(returnString, "\n\n", "\n") + returnString = strings.ReplaceAll(returnString, "\n", "\n ") - returnString = strings.ReplaceAll(returnString, "\n", "\n ") + // fix removing for ruby -> go transition diffs + returnString = strings.ReplaceAll(returnString, "\n \n **Note**: This field is non-authoritative,", "\n\n **Note**: This field is non-authoritative,") - // fix removing for ruby -> go transition diffs - returnString = strings.ReplaceAll(returnString, "\n \n **Note**: This field is non-authoritative,", "\n\n **Note**: This field is non-authoritative,") - - return strings.TrimSuffix(returnString, "\n ") + return strings.TrimSuffix(returnString, "\n ") + } + return strings.TrimSuffix(returnString, "\n") } func (r Resource) CustomTemplate(templatePath string, appendNewline bool) string { - return resource.ExecuteTemplate(&r, templatePath, appendNewline) + output := resource.ExecuteTemplate(&r, templatePath, appendNewline) + if !appendNewline { + output = strings.TrimSuffix(output, "\n") + } + return output } // Returns the key of the list of resources in the List API response @@ -1407,7 +1449,8 @@ type UpdateGroup struct { // def properties_without_custom_update(properties) func (r Resource) propertiesWithCustomUpdate(properties []*Type) []*Type { return google.Reject(properties, func(p *Type) bool { - return p.UpdateUrl == "" || p.UpdateVerb == "" || p.UpdateVerb == "NOOP" + return p.UpdateUrl == "" || p.UpdateVerb == "" || p.UpdateVerb == "NOOP" || + p.IsA("KeyValueTerraformLabels") || p.IsA("KeyValueLabels") }) } @@ -1448,3 +1491,19 @@ func (r Resource) PropertyNamesToStrings(properties []*Type) []string { func (r Resource) IsExcluded() bool { return r.Exclude || r.ExcludeResource } + +func (r Resource) TestExamples() []resource.Examples { + return google.Reject(google.Reject(r.Examples, func(e resource.Examples) bool { + return e.SkipTest + }), func(e resource.Examples) bool { + return e.MinVersion != "" && slices.Index(product.ORDER, r.TargetVersionName) < slices.Index(product.ORDER, e.MinVersion) + }) +} + +func (r Resource) VersionedProvider(exampleVersion string) bool { + vp := r.MinVersion + if exampleVersion != "" { + vp = exampleVersion + } + return vp != "" && vp != "ga" +} diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index 31d59450e64b..a1386f8f7081 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -18,6 +18,7 @@ import ( "fmt" "net/url" "path/filepath" + "regexp" "strings" "text/template" @@ -179,8 +180,9 @@ func (e *Examples) UnmarshalYAML(n *yaml.Node) error { // Executes example templates for documentation and tests func (e *Examples) SetHCLText() { - docVars := make(map[string]string) - testVars := e.TestEnvVars + originalVars := e.Vars + originalTestEnvVars := e.TestEnvVars + docTestEnvVars := make(map[string]string) docs_defaults := map[string]string{ "PROJECT_NAME": "my-project-name", "CREDENTIALS": "my/credentials/filename.json", @@ -198,16 +200,23 @@ func (e *Examples) SetHCLText() { // Apply doc defaults to test_env_vars from YAML for key := range e.TestEnvVars { - docVars[key] = docs_defaults[e.TestEnvVars[key]] + docTestEnvVars[key] = docs_defaults[e.TestEnvVars[key]] } - e.TestEnvVars = docVars + e.TestEnvVars = docTestEnvVars e.DocumentationHCLText = ExecuteTemplate(e, e.ConfigPath, true) - e.TestEnvVars = testVars + // Remove region tags + re1 := regexp.MustCompile(`# \[[a-zA-Z_ ]+\]\n`) + re2 := regexp.MustCompile(`\n# \[[a-zA-Z_ ]+\]`) + e.DocumentationHCLText = re1.ReplaceAllString(e.DocumentationHCLText, "") + e.DocumentationHCLText = re2.ReplaceAllString(e.DocumentationHCLText, "") + + testVars := make(map[string]string) + testTestEnvVars := make(map[string]string) // Override vars to inject test values into configs - will have // - "a-example-var-value%{random_suffix}"" // - "%{my_var}" for overrides that have custom Golang values - for key, value := range e.Vars { + for key, value := range originalVars { var newVal string if strings.Contains(value, "-") { newVal = fmt.Sprintf("tf-test-%s", value) @@ -221,15 +230,29 @@ func (e *Examples) SetHCLText() { if len(newVal) > 54 { newVal = newVal[:54] } - e.Vars[key] = fmt.Sprintf("%s%%{random_suffix}", newVal) + testVars[key] = fmt.Sprintf("%s%%{random_suffix}", newVal) } // Apply overrides from YAML for key := range e.TestVarsOverrides { - e.Vars[key] = fmt.Sprintf("%%{%s}", key) + testVars[key] = fmt.Sprintf("%%{%s}", key) + } + for key := range originalTestEnvVars { + testTestEnvVars[key] = fmt.Sprintf("%%{%s}", key) } + e.Vars = testVars + e.TestEnvVars = testTestEnvVars e.TestHCLText = ExecuteTemplate(e, e.ConfigPath, true) + e.TestHCLText = regexp.MustCompile(`\n\n$`).ReplaceAllString(e.TestHCLText, "\n") + // Remove region tags + e.TestHCLText = re1.ReplaceAllString(e.TestHCLText, "") + e.TestHCLText = re2.ReplaceAllString(e.TestHCLText, "") + e.TestHCLText = SubstituteTestPaths(e.TestHCLText) + + // Reset the example + e.Vars = originalVars + e.TestEnvVars = originalTestEnvVars } func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { @@ -290,27 +313,23 @@ func (e *Examples) ResourceType(terraformName string) string { return terraformName } -// rubocop:disable Layout/LineLength -// func (e *Examples) substitute_test_paths(config) { -// config.gsub!('../static/img/header-logo.png', 'test-fixtures/header-logo.png') -// config.gsub!('path/to/private.key', 'test-fixtures/test.key') -// config.gsub!('path/to/certificate.crt', 'test-fixtures/test.crt') -// config.gsub!('path/to/index.zip', '%{zip_path}') -// config.gsub!('verified-domain.com', 'tf-test-domain%{random_suffix}.gcp.tfacc.hashicorptest.com') -// config.gsub!('path/to/id_rsa.pub', 'test-fixtures/ssh_rsa.pub') -// config -// } +func SubstituteExamplePaths(config string) string { + config = strings.ReplaceAll(config, "../static/img/header-logo.png", "../static/header-logo.png") + config = strings.ReplaceAll(config, "path/to/private.key", "../static/ssl_cert/test.key") + config = strings.ReplaceAll(config, "path/to/id_rsa.pub", "../static/ssh_rsa.pub") + config = strings.ReplaceAll(config, "path/to/certificate.crt", "../static/ssl_cert/test.crt") + return config +} -// func (e *Examples) substitute_example_paths(config) { -// config.gsub!('../static/img/header-logo.png', '../static/header-logo.png') -// config.gsub!('path/to/private.key', '../static/ssl_cert/test.key') -// config.gsub!('path/to/id_rsa.pub', '../static/ssh_rsa.pub') -// config.gsub!('path/to/certificate.crt', '../static/ssl_cert/test.crt') -// config -// end -// // rubocop:enable Layout/LineLength -// // rubocop:enable Style/FormatStringToken -// } +func SubstituteTestPaths(config string) string { + config = strings.ReplaceAll(config, "../static/img/header-logo.png", "test-fixtures/header-logo.png") + config = strings.ReplaceAll(config, "path/to/private.key", "test-fixtures/test.key") + config = strings.ReplaceAll(config, "path/to/certificate.crt", "test-fixtures/test.crt") + config = strings.ReplaceAll(config, "path/to/index.zip", "%{zip_path}") + config = strings.ReplaceAll(config, "verified-domain.com", "tf-test-domain%{random_suffix}.gcp.tfacc.hashicorptest.com") + config = strings.ReplaceAll(config, "path/to/id_rsa.pub", "test-fixtures/ssh_rsa.pub") + return config +} // func (e *Examples) validate() { // super diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 7e125f55f44b..572e58c4014e 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -211,16 +211,16 @@ type Type struct { // because in Terraform the key has to be a property of the object. // // The name of the key. Used in the Terraform schema as a field name. - KeyName string `yaml:"key_name` + KeyName string `yaml:"key_name"` // A description of the key's format. Used in Terraform to describe // the field in documentation. - KeyDescription string `yaml:"key_description` + KeyDescription string `yaml:"key_description"` // ==================== // KeyValuePairs Fields // ==================== - IgnoreWrite bool `yaml:"ignore_write` + IgnoreWrite bool `yaml:"ignore_write"` // ==================== // Schema Modifications @@ -562,7 +562,7 @@ func (t Type) AtLeastOneOfList() []string { // Returns list of properties that needs exactly one of their fields set. // func (t *Type) exactly_one_of_list() { func (t Type) ExactlyOneOfList() []string { - if t.ResourceMetadata == nil { + if t.ResourceMetadata == nil || t.Parent() != nil { return []string{} } @@ -581,7 +581,7 @@ func (t Type) ExactlyOneOfList() []string { // Returns list of properties that needs required with their fields set. // func (t *Type) required_with_list() { func (t Type) RequiredWithList() []string { - if t.ResourceMetadata == nil { + if t.ResourceMetadata == nil || t.Parent() != nil { return []string{} } @@ -1270,14 +1270,14 @@ func (t Type) PropertyNsPrefix() []string { // information from the "object" variable func (t Type) NamespaceProperty() string { - name := google.Camelize(t.Name, "lower") + name := google.Camelize(t.Name, "upper") p := t for p.Parent() != nil { p = *p.Parent() - name = fmt.Sprintf("%s%s", google.Camelize(p.Name, "lower"), name) + name = fmt.Sprintf("%s%s", google.Camelize(p.Name, "upper"), name) } - return fmt.Sprintf("%s%s%s", google.Camelize(t.ApiName, "lower"), t.ResourceMetadata.Name, name) + return fmt.Sprintf("%s%s%s", google.Camelize(t.ResourceMetadata.ProductMetadata.ApiName, "lower"), t.ResourceMetadata.Name, name) } // def namespace_property_from_object(property, object) @@ -1303,11 +1303,11 @@ func (t *Type) GetIdFormat() string { func (t *Type) GoLiteral(value interface{}) string { switch v := value.(type) { case int: - return fmt.Sprintf("\"%d\"", v) + return fmt.Sprintf("%d", v) case float64: - return fmt.Sprintf("\"%f\"", v) + return fmt.Sprintf("%.1f", v) case bool: - return fmt.Sprintf("\"%v\"", v) + return fmt.Sprintf("%v", v) case string: if !strings.HasPrefix(v, "\"") { return fmt.Sprintf("\"%s\"", v) diff --git a/mmv1/google/template_utils.go b/mmv1/google/template_utils.go index 04fb5738e2e6..ac47f6bf3374 100644 --- a/mmv1/google/template_utils.go +++ b/mmv1/google/template_utils.go @@ -44,18 +44,19 @@ func subtract(a, b int) int { } var TemplateFunctions = template.FuncMap{ - "title": SpaceSeparatedTitle, - "replace": strings.Replace, - "replaceAll": strings.ReplaceAll, - "camelize": Camelize, - "underscore": Underscore, - "plural": Plural, - "contains": strings.Contains, - "join": strings.Join, - "lower": strings.ToLower, - "upper": strings.ToUpper, - "dict": wrapMultipleParams, - "format2regex": Format2Regex, - "hasPrefix": strings.HasPrefix, - "sub": subtract, + "title": SpaceSeparatedTitle, + "replace": strings.Replace, + "replaceAll": strings.ReplaceAll, + "camelize": Camelize, + "underscore": Underscore, + "plural": Plural, + "contains": strings.Contains, + "join": strings.Join, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "dict": wrapMultipleParams, + "format2regex": Format2Regex, + "hasPrefix": strings.HasPrefix, + "sub": subtract, + "firstSentence": FirstSentence, } diff --git a/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl b/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl index 78e75ee81683..72aef2ce1b0c 100644 --- a/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/datasource_iam.html.markdown.tmpl @@ -56,11 +56,12 @@ description: |- # `{{ $.IamTerraformName }}_policy` Retrieves the current IAM policy data for {{ lower $.Name }} -{{ if or (eq $.MinVersionObj.Name "beta") (eq $.IamPolicy.MinVersion "beta") }} +{{- if or (eq $.MinVersionObj.Name "beta") (eq $.IamPolicy.MinVersion "beta") }} ~> **Warning:** This datasource is in beta, and should be used with the terraform-provider-google-beta provider. See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. {{- end }} + ## example ```hcl diff --git a/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl index f0472fbe4430..5f7a441a23d7 100644 --- a/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/test_file.go.tmpl @@ -36,7 +36,7 @@ import ( "{{ $.ImportPath }}/tpgresource" transport_tpg "{{ $.ImportPath }}/transport" ) -{{ range $e := $.Res.Examples }} +{{ range $e := $.Res.TestExamples }} func TestAcc{{ $e.TestSlug $.Res.ProductMetadata.Name $.Res.Name }}(t *testing.T) { {{- if $e.SkipVcr }} acctest.SkipIfVcr(t) @@ -53,10 +53,10 @@ func TestAcc{{ $e.TestSlug $.Res.ProductMetadata.Name $.Res.Name }}(t *testing.T acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, - {{- if not (and $e.MinVersion (not (eq $e.MinVersion "ga"))) }} - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - {{- else }} + {{- if $.Res.VersionedProvider $e.MinVersion }} ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + {{- else }} + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), {{- end }} {{- if $e.ExternalProviders }} ExternalProviders: map[string]resource.ExternalProvider{ @@ -104,8 +104,8 @@ func testAccCheck{{ $.Res.ResourceName }}DestroyProducer(t *testing.T) func(s *t if strings.HasPrefix(name, "data.") { continue } - {{- if $.Res.CustomCode.TestCheckDestroy }} -{{/*TODO Q2: Custom template for TestCheckDestroy */}} + {{ if $.Res.CustomCode.TestCheckDestroy }} + {{ $.Res.CustomTemplate $.Res.CustomCode.TestCheckDestroy false -}} {{- else }} config := acctest.GoogleProviderConfig(t) diff --git a/mmv1/templates/terraform/expand_property_method.go.tmpl b/mmv1/templates/terraform/expand_property_method.go.tmpl index 46ed73dc3b99..0ece77d4299d 100644 --- a/mmv1/templates/terraform/expand_property_method.go.tmpl +++ b/mmv1/templates/terraform/expand_property_method.go.tmpl @@ -25,9 +25,9 @@ func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.T for _, raw := range v.(*schema.Set).List() { original := raw.(map[string]interface{}) transformed := make(map[string]interface{}) - {{- range $prop := $.NestedProperties }} {{- if not (eq $prop.Name $prop.KeyName) }} + transformed{{$prop.TitlelizeProperty}}, err := expand{{$.GetPrefix}}{{$.TitlelizeProperty}}{{$prop.TitlelizeProperty}}(original["{{ underscore $prop.Name }}"], d, config) if err != nil { return nil, err diff --git a/mmv1/templates/terraform/flatten_property_method.go.tmpl b/mmv1/templates/terraform/flatten_property_method.go.tmpl index 4976f5747529..1f82fc54f02f 100644 --- a/mmv1/templates/terraform/flatten_property_method.go.tmpl +++ b/mmv1/templates/terraform/flatten_property_method.go.tmpl @@ -14,10 +14,12 @@ limitations under the License. */ -}} {{- define "flattenPropertyMethod" }} {{- if $.CustomFlatten }} - {{- $.CustomTemplate $.CustomFlatten true -}} -{{ else }} + {{- $.CustomTemplate $.CustomFlatten false -}} +{{- else -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { - {{- if $.IsA "NestedObject" }} + {{- if $.IgnoreRead }} + return d.Get("{{ $.TerraformLineage }}") + {{- else if $.IsA "NestedObject" }} if v == nil { return nil } @@ -143,9 +145,9 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso {{- end }} {{- else }} return v - {{- end }} +{{- end }} } - {{- if $.NestedProperties }} +{{- if $.NestedProperties }} {{- range $prop := $.NestedProperties }} {{ template "flattenPropertyMethod" $prop -}} {{- end }} diff --git a/mmv1/templates/terraform/nested_query.go.tmpl b/mmv1/templates/terraform/nested_query.go.tmpl index d44729050971..73af9ee3db31 100644 --- a/mmv1/templates/terraform/nested_query.go.tmpl +++ b/mmv1/templates/terraform/nested_query.go.tmpl @@ -10,9 +10,8 @@ func flattenNested{{ $.ResourceName }}(d *schema.ResourceData, meta interface{}, return nil, nil } res = v.(map[string]interface{}) - {{- end }} -{{- end }} +{{ end }} v, ok = res["{{ $.LastNestedQueryKey }}"] if !ok || v == nil { return nil,nil diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index 70275d948917..6b08df4ada86 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -21,7 +21,7 @@ (Deprecated) {{- end}} {{- end }} - {{ $.ResourceMetadata.FormatDocDescription $.Description -}} + {{ $.ResourceMetadata.FormatDocDescription $.Description true -}} {{- if and (and ($.IsA "Array") ($.ItemType.IsA "Enum")) (and (not $.Output) (not $.ItemType.SkipDocsValues))}} {{- if $.ItemType.DefaultValue }} Default value is `{{ $.ItemType.DefaultValue }}`. @@ -34,7 +34,7 @@ Possible values are: {{ $.EnumValuesToString "`" false }}. {{- end }} {{- if $.Sensitive }} - **Note**: This property is sensitive and will not be displayed in the plan. + **Note**: This property is sensitive and will not be displayed in the plan. {{- end }} {{- if and (not $.FlattenObject) $.NestedProperties }} Structure is [documented below](#nested_{{ underscore $.Name }}). diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index cd6475b97068..97f3863d2962 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -89,8 +89,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- end}} Delete: schema.DefaultTimeout({{ $.Timeouts.DeleteMinutes -}} * time.Minute), }, - -{{- if $.SchemaVersion }} +{{ if $.SchemaVersion }} SchemaVersion: {{ $.SchemaVersion -}}, {{- end}} {{- if $.MigrateState }} @@ -108,7 +107,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- end }} }, {{- end }} -{{if or (and (or $.HasProject $.HasRegion $.HasZone) (not $.SkipDefaultCdiff)) $.CustomDiff }} +{{- if or (and (or $.HasProject $.HasRegion $.HasZone) (not $.SkipDefaultCdiff)) $.CustomDiff }} CustomizeDiff: customdiff.All( {{- if $.CustomDiff -}} {{- range $cdiff := $.CustomDiff }} @@ -188,7 +187,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{- end}} config := meta.(*transport_tpg.Config) {{ if $.CustomCode.CustomCreate -}} - //TODO Q2 function to compile custom code lines + {{ $.CustomTemplate $.CustomCode.CustomCreate false -}} {{ else -}} userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -394,7 +393,9 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{ end -}} {{ end -}} {{ end -}} -{{/* TODO POST CREATE */}} +{{if $.CustomCode.PostCreate -}} + {{- $.CustomTemplate $.CustomCode.PostCreate false -}} +{{- end}} {{if $.GetAsync.Allow "Create" -}} {{if $.GetAsync.IsA "PollAsync" -}} @@ -548,7 +549,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) headers := make(http.Header) {{- if $.CustomCode.PreRead -}} - //todo preread + {{ $.CustomTemplate $.CustomCode.PreRead false -}} {{- end }} res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, @@ -612,7 +613,9 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{- end}} {{- end}} {{- end}} -{{if $.HasProject -}} +{{ if $.HasProject }} + + if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading {{ $.Name -}}: %s", err) } @@ -637,10 +640,9 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error reading {{ $.Name -}}: %s", err) } {{- end}} - -{{- range $prop := $.ReadProperties }} +{{ range $prop := $.ReadProperties }} {{if $prop.FlattenObject -}} -// Terraform must set the top level schema field, but since this $ contains collapsed properties +// Terraform must set the top level schema field, but since this object contains collapsed properties // it's difficult to know what the top level should be. Instead we just loop over the map returned from flatten. if flattenedProp := flatten{{ if $.NestedQuery -}}Nested{{end}}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}(res["{{ $prop.ApiName -}}"], d, config); flattenedProp != nil { if gerr, ok := flattenedProp.(*googleapi.Error); ok { @@ -655,7 +657,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) } } } -{{ else -}} +{{- else -}} if err := d.Set("{{ underscore $prop.Name -}}", flatten{{ if $.NestedQuery -}}Nested{{end}}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}(res["{{ $prop.ApiName -}}"], d, config)); err != nil { return fmt.Errorf("Error reading {{ $.Name -}}: %s", err) } @@ -678,7 +680,7 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ {{- end}} config := meta.(*transport_tpg.Config) {{ if $.CustomCode.CustomUpdate -}} -//TODO custom update + {{ $.CustomTemplate $.CustomCode.CustomUpdate false -}} {{ else -}} userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) if err != nil { @@ -820,13 +822,13 @@ if len(updateMask) > 0 { } {{- end}} {{- end}} -{{ if $.UpdateMask -}} +{{- if $.UpdateMask -}} } {{- end}} {{- end}}{{/*if not immutable*/}} {{ if $.FieldSpecificUpdateMethods }} d.Partial(true) -{{- range $index, $props := $.PropertiesByCustomUpdate }} +{{ range $index, $props := $.PropertiesByCustomUpdate }} if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\""}}") { obj := make(map[string]interface{}) {{ if $index.FingerprintName }} @@ -886,7 +888,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" `NullFields` is a special case of `send_empty_value` where the empty value in question is go's literal nil. -*/}} -{{ if $propsByKey.SendEmptyValue -}} +{{- if $propsByKey.SendEmptyValue -}} } else if v, ok := d.GetOkExists("{{ underscore $propsByKey.Name -}}"); ok || !reflect.DeepEqual(v, {{ $propsByKey.ApiName -}}Prop) { {{ else if $propsByKey.FlattenObject -}} } else if !tpgresource.IsEmptyValue(reflect.ValueOf({{ $propsByKey.ApiName -}}Prop)) { @@ -895,7 +897,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" {{- end}} obj["{{ $propsByKey.ApiName -}}"] = {{ $propsByKey.ApiName -}}Prop } -{{ end -}}{{/*range propsByKey*/}} +{{- end -}}{{/*range propsByKey*/}} {{/* We need to decide what encoder to use here - if there's an update encoder, use that! -*/}} {{ if $.CustomCode.UpdateEncoder -}} obj, err = resource{{ $.ResourceName -}}UpdateEncoder(d, meta, obj) @@ -920,7 +922,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" headers := make(http.Header) {{ if $.CustomCode.PreUpdate -}} -//TODO Preupdate + {{ $.CustomTemplate $.CustomCode.PreUpdate false -}} {{ end}} {{ if $.SupportsIndirectUserProjectOverride -}} if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { @@ -941,7 +943,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" UserAgent: userAgent, Body: obj, Timeout: d.Timeout(schema.TimeoutUpdate), -{{ if $.ErrorRetryPredicates -}} +{{- if $.ErrorRetryPredicates -}} ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{{"{"}}{{ join $.ErrorRetryPredicates "," -}}{{"}"}}, {{- end}} {{- if $.ErrorAbortPredicates -}} @@ -950,9 +952,9 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" Headers: headers, }) if err != nil { - return fmt.Errorf("Error updating {{ $.Name -}} %q: %s", d.Id(), err) + return fmt.Errorf("Error updating {{ $.Name }} %q: %s", d.Id(), err) } else { - log.Printf("[DEBUG] Finished updating {{ $.Name -}} %q: %#v", d.Id(), res) + log.Printf("[DEBUG] Finished updating {{ $.Name }} %q: %#v", d.Id(), res) } {{ if $.GetAsync.Allow "update" -}} @@ -960,27 +962,28 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" err = {{ $.ClientNamePascal -}}OperationWaitTime( config, res, {{if or $.HasProject $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Updating {{ $.Name -}}", userAgent, d.Timeout(schema.TimeoutUpdate)) - if err != nil { return err } -{{ else if $.GetAsync.IsA "PollAsync" -}} +{{- else if $.GetAsync.IsA "PollAsync" -}} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName -}}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncExistence -}}, "Updating {{ $.Name -}}", d.Timeout(schema.TimeoutUpdate), {{ $.GetAsync.TargetOccurrences -}}) if err != nil { -{{ if $.GetAsync.SuppressError -}} +{{- if $.GetAsync.SuppressError -}} log.Printf("[ERROR] Unable to confirm eventually consistent {{ $.Name -}} %q finished updating: %q", d.Id(), err) -{{ else -}} +{{- else -}} return err {{- end}} } {{- end}} {{- end}} } -{{- end }}{{/*range PropertiesByCustomUpdate*/}} +{{ end }}{{/*range PropertiesByCustomUpdate*/}} d.Partial(false) {{- end }}{{/*if FieldSpecificUpdateMethods*/}} -{{ if $.CustomCode.PostUpdate -}} //TODO POST UPDATE {{end}} +{{ if $.CustomCode.PostUpdate -}} + {{ $.CustomTemplate $.CustomCode.PostUpdate false -}} + {{end}} return resource{{ $.ResourceName -}}Read(d, meta) {{- end }}{{/*if CustomUpdate*/}} } @@ -1007,9 +1010,8 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} if err != nil { return err } - -{{- if $.CustomCode.CustomDelete }} -{{/* TODO CustomDelete */}} +{{ if $.CustomCode.CustomDelete }} +{{ $.CustomTemplate $.CustomCode.CustomDelete false -}} {{- else }} billingProject := "" @@ -1025,6 +1027,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} {{- end }} {{- end }} {{- if $.Mutex }} + lockName, err := tpgresource.ReplaceVars(d, config, "{{ $.Mutex }}") if err != nil { return err @@ -1065,7 +1068,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} headers := make(http.Header) {{- if $.CustomCode.PreDelete }} - {{/* TODO PreDelete */}} + {{ $.CustomTemplate $.CustomCode.PreDelete false -}} {{- end }} log.Printf("[DEBUG] Deleting {{ $.Name }} %q", d.Id()) @@ -1109,7 +1112,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} {{- end }} {{- end }} {{- if $.CustomCode.PostDelete }} -{{/* TODO PostDelete */}} + {{ $.CustomTemplate $.CustomCode.PostDelete false -}} {{- end }} log.Printf("[DEBUG] Finished deleting {{ $.Name }} %q: %#v", d.Id(), res) @@ -1121,7 +1124,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} {{ if not $.ExcludeImport -}} func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { {{- if $.CustomCode.CustomImport }} - {{/* TODO CustomImport */}} + {{ $.CustomTemplate $.CustomCode.CustomImport false -}} {{- else }} config := meta.(*transport_tpg.Config) if err := tpgresource.ParseImportId([]string{ @@ -1149,7 +1152,7 @@ func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{} {{- end }} {{- end }} {{- if $.CustomCode.PostImport }} - {{- $.CustomTemplate $.CustomCode.PostImport true -}} + {{- $.CustomTemplate $.CustomCode.PostImport false -}} {{- end }} return []*schema.ResourceData{d}, nil @@ -1158,7 +1161,7 @@ func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{} {{ end }} {{- range $prop := $.GettableProperties }} {{- if not $prop.IgnoreRead }} - {{- template "flattenPropertyMethod" $prop -}} + {{ template "flattenPropertyMethod" $prop -}} {{- end }} {{- end }} {{- range $prop := $.SettableProperties }} @@ -1166,13 +1169,13 @@ func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{} {{- end }} {{- if $.CustomCode.Encoder }} func resource{{ $.ResourceName -}}Encoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{- $.CustomTemplate $.CustomCode.Encoder true -}} +{{ $.CustomTemplate $.CustomCode.Encoder false -}} } -{{- end }} +{{ end -}} {{- if $.CustomCode.UpdateEncoder }} func resource{{ $.ResourceName -}}UpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{- $.CustomTemplate $.CustomCode.UpdateEncoder true -}} + {{ $.CustomTemplate $.CustomCode.UpdateEncoder false -}} } {{- end }} {{- if $.NestedQuery }} @@ -1180,12 +1183,12 @@ func resource{{ $.ResourceName -}}UpdateEncoder(d *schema.ResourceData, meta int {{- end }} {{- if $.CustomCode.Decoder }} func resource{{ $.ResourceName -}}Decoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { - {{- $.CustomTemplate $.CustomCode.Decoder true -}} + {{ $.CustomTemplate $.CustomCode.Decoder false -}} } {{- end }} {{- if $.CustomCode.PostCreateFailure }} func resource{{ $.ResourceName -}}PostCreateFailure(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { - {{- $.CustomTemplate $.CustomCode.PostCreateFailure true -}} + {{- $.CustomTemplate $.CustomCode.PostCreateFailure false -}} } {{- end }} {{/* TODO state upgraders */}} diff --git a/mmv1/templates/terraform/resource.html.markdown.tmpl b/mmv1/templates/terraform/resource.html.markdown.tmpl index d37f88b74341..cf13c07f9e3e 100644 --- a/mmv1/templates/terraform/resource.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource.html.markdown.tmpl @@ -27,7 +27,7 @@ # ---------------------------------------------------------------------------- subcategory: "{{$.ProductMetadata.DisplayName}}" description: |- - {{ $.FormatDocDescription $.Description }} + {{ $.FormatDocDescription (firstSentence $.Description) true }} --- # {{$.TerraformName}} @@ -35,9 +35,8 @@ description: |- ~> **Warning:** {{$.DeprecationMessage}} {{- end }} -{{$.Description}} - -{{- if eq $.MinVersion "beta"}} +{{ $.FormatDocDescription $.Description false }} +{{ if eq $.MinVersion "beta"}} ~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. {{- end }} @@ -66,7 +65,7 @@ To get more information about {{$.Name}}, see: ~> **Warning:** All arguments including the following potentially sensitive values will be stored in the raw state as plain text: {{ $.SensitivePropsToString }}. [Read more about sensitive data in state](https://www.terraform.io/language/state/sensitive-data). -{{- end }} +{{ end }} {{- if $.Examples }} {{- range $e := $.Examples }} {{- if not $e.SkipDocs }} diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 14fae40f8165..17dad86dc26c 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -15,8 +15,8 @@ {{- define "SchemaFields"}} {{- if .FlattenObject -}} {{- range $prop := .ResourceMetadata.OrderProperties .Properties -}} - {{- template "SchemaFields" $prop -}} - {{- end -}} + {{ template "SchemaFields" $prop }} + {{ end -}} {{- else -}} "{{underscore .Name -}}": { {{ if .IsSet -}} @@ -156,19 +156,19 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] {{ if .Sensitive -}} Sensitive: true, {{ end -}} -{{ if .DefaultValue -}} +{{ if not (eq .DefaultValue nil ) -}} Default: {{ .GoLiteral .DefaultValue -}}, {{ end -}} -{{ if .Conflicting -}} +{{ if or .Conflicting .Conflicts -}} ConflictsWith: {{ .GoLiteral .Conflicting -}}, {{ end -}} -{{ if .AtLeastOneOfList -}} +{{ if or .AtLeastOneOfList .AtLeastOneOf -}} AtLeastOneOf: {{ .GoLiteral .AtLeastOneOfList -}}, {{ end -}} -{{ if .ExactlyOneOfList -}} +{{ if or .ExactlyOneOfList .ExactlyOneOf -}} ExactlyOneOf: {{ .GoLiteral .ExactlyOneOfList -}}, {{ end -}} -{{ if .RequiredWithList -}} +{{ if or .RequiredWithList .RequiredWith -}} RequiredWith: {{ .GoLiteral .RequiredWithList -}}, {{ end -}} }, diff --git a/mmv1/templates/terraform/schema_subresource.go.tmpl b/mmv1/templates/terraform/schema_subresource.go.tmpl index 4a59519fa060..86e1f7a1c869 100644 --- a/mmv1/templates/terraform/schema_subresource.go.tmpl +++ b/mmv1/templates/terraform/schema_subresource.go.tmpl @@ -17,7 +17,7 @@ func {{ .NamespaceProperty }}Schema() *schema.Resource { return &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- range $prop := $.ItemType.Properties }} + {{- range $prop := $.ResourceMetadata.OrderProperties $.ItemType.Properties }} {{template "SchemaFields" $prop}} {{- end }} }, diff --git a/mmv1/templates/terraform/sweeper_file.go.tmpl b/mmv1/templates/terraform/sweeper_file.go.tmpl index e70186347b66..f5085436c048 100644 --- a/mmv1/templates/terraform/sweeper_file.go.tmpl +++ b/mmv1/templates/terraform/sweeper_file.go.tmpl @@ -150,8 +150,7 @@ func testSweep{{ $.ResourceName }}(region string) error { } zone := tpgresource.GetResourceNameFromSelfLink(obj["zone"].(string)) deleteTemplate = strings.Replace(deleteTemplate, "{{"{{zone}}"}}", zone, -1) - - {{- end }} +{{ end }} deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) if err != nil { log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) From c07e40b24de7aeed6faa445a659ea5ff28ff50f6 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Tue, 18 Jun 2024 11:10:15 -0700 Subject: [PATCH 162/356] [Prep for 6.0.0 release branch] Make downstream waiter compatible with non-linear history for main (#10939) --- .ci/magician/cmd/wait_for_commit.go | 48 ++++++++---------------- .ci/magician/cmd/wait_for_commit_test.go | 20 +++++++--- 2 files changed, 31 insertions(+), 37 deletions(-) diff --git a/.ci/magician/cmd/wait_for_commit.go b/.ci/magician/cmd/wait_for_commit.go index cea9a023032f..2059885e35c8 100644 --- a/.ci/magician/cmd/wait_for_commit.go +++ b/.ci/magician/cmd/wait_for_commit.go @@ -52,39 +52,23 @@ func execWaitForCommit(syncBranchPrefix, baseBranch, sha string, runner source.R } for { - if baseBranch != "main" { - output, err := gitRevParse("origin/"+syncBranch, runner) - if err != nil { - return err - } - syncHead := strings.TrimSpace(output) - - output, err = gitRevParse(sha+"~", runner) - if err != nil { - return err - } - baseParent := strings.TrimSpace(output) - if syncHead == baseParent { - return nil - } - fmt.Println("sync branch is at: ", syncHead) - fmt.Println("current commit is: ", sha) - } else { - output, err := runner.Run("git", []string{"log", "--pretty=%H", "--reverse", fmt.Sprintf("origin/%s..origin/main", syncBranch)}, nil) - if err != nil { - return err - } - commits := strings.Split(output, "\n") - commit := "" - if len(commits) > 0 { - commit = strings.TrimSpace(commits[0]) - } - if commit == sha { - return nil - } - fmt.Println("git log says waiting on: ", commit) - fmt.Println("command says waiting on: ", sha) + output, err := gitRevParse("origin/"+syncBranch, runner) + if err != nil { + return err + } + syncHead := strings.TrimSpace(output) + + output, err = gitRevParse(sha+"~", runner) + if err != nil { + return err + } + baseParent := strings.TrimSpace(output) + if syncHead == baseParent { + return nil } + fmt.Println("sync branch is at: ", syncHead) + fmt.Println("current commit is: ", sha) + if _, err := runner.Run("git", []string{"fetch", "origin", syncBranch}, nil); err != nil { return err } diff --git a/.ci/magician/cmd/wait_for_commit_test.go b/.ci/magician/cmd/wait_for_commit_test.go index e735a23bd870..afb0ffadb96e 100644 --- a/.ci/magician/cmd/wait_for_commit_test.go +++ b/.ci/magician/cmd/wait_for_commit_test.go @@ -44,9 +44,11 @@ func TestExecWaitForCommit(t *testing.T) { baseBranch: "main", calledMethods: []string{ "git merge-base --is-ancestor sha origin/sync-branch", - "git log --pretty=%H --reverse origin/sync-branch..origin/main", + "git rev-parse --short origin/sync-branch", + "git rev-parse --short sha~", "git fetch origin sync-branch", - "git log --pretty=%H --reverse origin/sync-branch..origin/main", + "git rev-parse --short origin/sync-branch", + "git rev-parse --short sha~", }, runResults: map[string][]runResult{ "cwd git [merge-base --is-ancestor sha origin/sync-branch] map[]": { @@ -55,12 +57,20 @@ func TestExecWaitForCommit(t *testing.T) { err: fmt.Errorf("exit error 1"), }, }, - "cwd git [log --pretty=%H --reverse origin/sync-branch..origin/main] map[]": { + "cwd git [rev-parse --short origin/sync-branch] map[]": { + { + out: "sha-x", + }, + { + out: "sha-z", + }, + }, + "cwd git [rev-parse --short sha~] map[]": { { - out: "sha2\nsha\n\n", + out: "sha-y", }, { - out: "sha\n\n", + out: "sha-z", }, }, "cwd git [fetch origin sync-branch] map[]": { From 68ea8612247a46f4aac90e5b17eb7ecd6cb891b3 Mon Sep 17 00:00:00 2001 From: Robert Teller <31879487+r-teller@users.noreply.github.com> Date: Tue, 18 Jun 2024 11:39:54 -0700 Subject: [PATCH 163/356] Fix firewall rule to support empty descritpion on update (#10950) --- mmv1/products/compute/Firewall.yaml | 1 + .../resource_compute_firewall_test.go.erb | 34 +++++++++++++++++++ .../tests/data/example_compute_firewall.json | 1 + mmv1/third_party/tgc/tests/data/firewall.json | 1 + .../tgc/tests/data/full_compute_firewall.json | 2 ++ 5 files changed, 39 insertions(+) diff --git a/mmv1/products/compute/Firewall.yaml b/mmv1/products/compute/Firewall.yaml index ffab94521dab..b549b9f09edb 100644 --- a/mmv1/products/compute/Firewall.yaml +++ b/mmv1/products/compute/Firewall.yaml @@ -167,6 +167,7 @@ properties: description: | An optional description of this resource. Provide this property when you create the resource. + send_empty_value: true - !ruby/object:Api::Type::Array name: 'destinationRanges' description: | diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_firewall_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_firewall_test.go.erb index 73c6bfd7507e..d00dce225af6 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_firewall_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_firewall_test.go.erb @@ -37,6 +37,17 @@ func TestAccComputeFirewall_update(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccComputeFirewall_nullDescription(networkName, firewallName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_firewall.foobar", "description", ""), + ), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccComputeFirewall_basic(networkName, firewallName), }, @@ -392,6 +403,29 @@ resource "google_compute_firewall" "foobar" { `, network, firewall) } +func testAccComputeFirewall_nullDescription(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = null + network = google_compute_network.foobar.self_link + source_tags = ["foo"] + target_tags = ["bar"] + + allow { + protocol = "tcp" + ports = ["80-255"] + } +} +`, network, firewall) +} + + func testAccComputeFirewall_priority(network, firewall string, priority int) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { diff --git a/mmv1/third_party/tgc/tests/data/example_compute_firewall.json b/mmv1/third_party/tgc/tests/data/example_compute_firewall.json index f50ef6504683..03f91e12109f 100644 --- a/mmv1/third_party/tgc/tests/data/example_compute_firewall.json +++ b/mmv1/third_party/tgc/tests/data/example_compute_firewall.json @@ -22,6 +22,7 @@ ] } ], + "description": "", "disabled": false, "logConfig": { "enable": false diff --git a/mmv1/third_party/tgc/tests/data/firewall.json b/mmv1/third_party/tgc/tests/data/firewall.json index 4673c8c11d19..aed343933718 100644 --- a/mmv1/third_party/tgc/tests/data/firewall.json +++ b/mmv1/third_party/tgc/tests/data/firewall.json @@ -22,6 +22,7 @@ ] } ], + "description": "", "disabled": false, "logConfig": { "enable": false diff --git a/mmv1/third_party/tgc/tests/data/full_compute_firewall.json b/mmv1/third_party/tgc/tests/data/full_compute_firewall.json index 188a33b22275..68c6f6acf17e 100644 --- a/mmv1/third_party/tgc/tests/data/full_compute_firewall.json +++ b/mmv1/third_party/tgc/tests/data/full_compute_firewall.json @@ -65,6 +65,7 @@ ] } ], + "description": "", "disabled": false, "logConfig": { "enable": false @@ -102,6 +103,7 @@ "IPProtocol": "icmp" } ], + "description": "", "disabled": false, "logConfig": { "enable": false From 672734df9f9288e14d854871815242b7af410e9f Mon Sep 17 00:00:00 2001 From: Maarc <62051944+Maarc-D@users.noreply.github.com> Date: Tue, 18 Jun 2024 21:24:16 +0200 Subject: [PATCH 164/356] =?UTF-8?q?fix(storage-bucket-object):=20do=20not?= =?UTF-8?q?=20delete=20object=20on=20update=20content,=20j=E2=80=A6=20(#10?= =?UTF-8?q?038)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../storage/resource_storage_bucket_object.go | 73 ++++++++++--------- .../resource_storage_bucket_object_test.go | 65 +++++++++++++++++ 2 files changed, 105 insertions(+), 33 deletions(-) diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go index 926e16c160bd..7addf93e3c18 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object.go @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: MPL-2.0 package storage import ( @@ -88,7 +89,6 @@ func ResourceStorageBucketObject() *schema.Resource { "content": { Type: schema.TypeString, Optional: true, - ForceNew: true, ExactlyOneOf: []string{"source"}, Sensitive: true, Computed: true, @@ -120,7 +120,6 @@ func ResourceStorageBucketObject() *schema.Resource { Type: schema.TypeString, // This field is not Computed because it needs to trigger a diff. Optional: true, - ForceNew: true, // Makes the diff message nicer: // detect_md5hash: "1XcnP/iFw/hNrbhXi7QTmQ==" => "different hash" (forces new resource) // Instead of the more confusing: @@ -376,45 +375,53 @@ func resourceStorageBucketObjectUpdate(d *schema.ResourceData, meta interface{}) bucket := d.Get("bucket").(string) name := d.Get("name").(string) - objectsService := storage.NewObjectsService(config.NewStorageClientWithTimeoutOverride(userAgent, d.Timeout(schema.TimeoutUpdate))) - getCall := objectsService.Get(bucket, name) + if d.HasChange("content") || d.HasChange("detect_md5hash") { + // The KMS key name are not able to be set on create : + // or you get error: Error uploading object test-maarc: googleapi: Error 400: Malformed Cloud KMS crypto key: projects/myproject/locations/myregion/keyRings/mykeyring/cryptoKeys/mykeyname/cryptoKeyVersions/1, invalid + d.Set("kms_key_name", nil) + return resourceStorageBucketObjectCreate(d, meta) + } else { - res, err := getCall.Do() - if err != nil { - return fmt.Errorf("Error retrieving object during update %s: %s", name, err) - } + objectsService := storage.NewObjectsService(config.NewStorageClientWithTimeoutOverride(userAgent, d.Timeout(schema.TimeoutUpdate))) + getCall := objectsService.Get(bucket, name) - hasRetentionChanges := d.HasChange("retention") - if hasRetentionChanges { - if v, ok := d.GetOk("retention"); ok { - res.Retention = expandObjectRetention(v) - } else { - res.Retention = nil - res.NullFields = append(res.NullFields, "Retention") + res, err := getCall.Do() + if err != nil { + return fmt.Errorf("Error retrieving object during update %s: %s", name, err) } - } - if d.HasChange("event_based_hold") { - v := d.Get("event_based_hold") - res.EventBasedHold = v.(bool) - } + hasRetentionChanges := d.HasChange("retention") + if hasRetentionChanges { + if v, ok := d.GetOk("retention"); ok { + res.Retention = expandObjectRetention(v) + } else { + res.Retention = nil + res.NullFields = append(res.NullFields, "Retention") + } + } - if d.HasChange("temporary_hold") { - v := d.Get("temporary_hold") - res.TemporaryHold = v.(bool) - } + if d.HasChange("event_based_hold") { + v := d.Get("event_based_hold") + res.EventBasedHold = v.(bool) + } - updateCall := objectsService.Update(bucket, name, res) - if hasRetentionChanges { - updateCall.OverrideUnlockedRetention(true) - } - _, err = updateCall.Do() + if d.HasChange("temporary_hold") { + v := d.Get("temporary_hold") + res.TemporaryHold = v.(bool) + } - if err != nil { - return fmt.Errorf("Error updating object %s: %s", name, err) - } + updateCall := objectsService.Update(bucket, name, res) + if hasRetentionChanges { + updateCall.OverrideUnlockedRetention(true) + } + _, err = updateCall.Do() - return nil + if err != nil { + return fmt.Errorf("Error updating object %s: %s", name, err) + } + + return nil + } } func resourceStorageBucketObjectRead(d *schema.ResourceData, meta interface{}) error { diff --git a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go index 54d0dbd78dfc..65eb43de233e 100644 --- a/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go +++ b/mmv1/third_party/terraform/services/storage/resource_storage_bucket_object_test.go @@ -457,6 +457,55 @@ func TestAccStorageObject_retention(t *testing.T) { }) } +func TestResourceStorageBucketObjectUpdate_ContentChange(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + initialContent := []byte("initial content") + updatedContent := []byte("updated content") + h := md5.New() + if _, err := h.Write(initialContent); err != nil { + t.Errorf("error calculating md5: %v", err) + } + dataMd5 := base64.StdEncoding.EncodeToString(h.Sum(nil)) + + h2 := md5.New() + if _, err := h2.Write(updatedContent); err != nil { + t.Errorf("error calculating md5: %v", err) + } + newDataMd5 := base64.StdEncoding.EncodeToString(h2.Sum(nil)) + // Update the object content and verify + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageObjectDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleStorageBucketsObjectCustomContent(bucketName, string(initialContent)), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(t, bucketName, objectName, dataMd5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", + "content", + string(initialContent), + ), + ), + }, + { + Config: testGoogleStorageBucketsObjectCustomContent(bucketName, string(updatedContent)), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleStorageObject(t, bucketName, objectName, newDataMd5), + resource.TestCheckResourceAttr( + "google_storage_bucket_object.object", + "content", + string(updatedContent), + ), + ), + }, + }, + }) +} + func testAccCheckGoogleStorageObject(t *testing.T, bucket, object, md5 string) resource.TestCheckFunc { return testAccCheckGoogleStorageObjectWithEncryption(t, bucket, object, md5, "") } @@ -537,6 +586,22 @@ func testAccStorageObjectDestroyProducer(t *testing.T) func(s *terraform.State) } } +func testGoogleStorageBucketsObjectCustomContent(bucketName string, customContent string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_storage_bucket_object" "object" { + name = "%s" + bucket = google_storage_bucket.bucket.name + content = "%s" +} +`, bucketName, objectName, customContent) +} + func testGoogleStorageBucketsObjectContent(bucketName string) string { return fmt.Sprintf(` resource "google_storage_bucket" "bucket" { From 4ca8ae7dcc88e01572fa7b33a9e09f9cda302bec Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 18 Jun 2024 12:33:22 -0700 Subject: [PATCH 165/356] Go rewrite handwritten templates script (#10996) --- mmv1/main.go | 7 + mmv1/provider/template_data.go | 3 +- mmv1/provider/terraform.go | 38 +- mmv1/template-converter.go | 609 +- ..._context_manager_access_level_test.go.tmpl | 240 + ...context_manager_access_policy_test.go.tmpl | 145 + ...ext_manager_service_perimeter_test.go.tmpl | 412 + .../resource_alloydb_cluster_sweeper.go.tmpl | 126 + ...source_api_gateway_api_config_test.go.tmpl | 106 + .../go/resource_api_gateway_api_test.go.tmpl | 46 + .../resource_api_gateway_gateway_test.go.tmpl | 67 + ...ource_apigee_environment_type_test.go.tmpl | 177 + ..._artifact_registry_repository_test.go.tmpl | 296 + ...source_backup_dr_management_server.go.tmpl | 104 + ...e_backup_dr_management_server_test.go.tmpl | 101 + .../go/resource_bigquery_dataset_test.go.tmpl | 867 ++ .../go/resource_bigquery_table.go.tmpl | 2961 +++++ .../go/resource_bigquery_table_test.go.tmpl | 4261 ++++++ ...binary_authorization_attestor_test.go.tmpl | 255 + ...source_binary_authorization_policy_test.go | 363 + ..._binary_authorization_attestor_test.go.erb | 2 +- ...e_cloud_asset_resources_search_all.go.tmpl | 212 + ...ud_asset_resources_search_all_test.go.tmpl | 51 + ...google_cloud_asset_search_all_resources.go | 269 + ...e_cloud_asset_search_all_resources_test.go | 49 + ...source_cloudbuild_worker_pool_test.go.tmpl | 249 + ...ource_cloudfunctions_function_test.go.tmpl | 1293 ++ ...data_source_cloud_identity_group_lookup.go | 101 + ...e_cloud_identity_group_memberships.go.tmpl | 106 + .../data_source_cloud_identity_groups.go.tmpl | 103 + ...oud_identity_group_membership_test.go.tmpl | 563 + ...ource_cloud_identity_group_sweeper.go.tmpl | 131 + ...resource_cloud_identity_group_test.go.tmpl | 178 + .../resource_cloud_run_service_test.go.tmpl | 1438 ++ .../go/resource_cloud_run_v2_job_test.go.tmpl | 566 + ...resource_cloud_run_v2_service_test.go.tmpl | 1051 ++ .../go/resource_cloud_tasks_queue_test.go | 226 + .../composer/go/composer_operation.go.tmpl | 37 + ...composer_user_workloads_config_map.go.tmpl | 51 + ...ser_user_workloads_config_map_test.go.tmpl | 59 + ...gle_composer_user_workloads_secret.go.tmpl | 63 + ...omposer_user_workloads_secret_test.go.tmpl | 102 + .../go/resource_composer_environment.go.tmpl | 3121 +++++ ...resource_composer_environment_test.go.tmpl | 3536 +++++ ...ser_user_workloads_config_map_test.go.tmpl | 170 + ...rce_composer_user_workloads_secret.go.tmpl | 271 + ...omposer_user_workloads_secret_test.go.tmpl | 183 + .../compute_backend_service_helpers.go.tmpl | 18 + .../go/compute_instance_helpers.go.tmpl | 871 ++ ...instance_network_interface_helpers.go.tmpl | 165 + .../compute/go/compute_operation.go.tmpl | 228 + .../compute/go/compute_operation_test.go.tmpl | 308 + ...ta_source_google_compute_addresses.go.tmpl | 207 + ...urce_google_compute_addresses_test.go.tmpl | 189 + .../data_source_google_compute_image.go.tmpl | 253 + .../go/data_source_google_compute_instance.go | 204 + ...urce_google_compute_instance_group_test.go | 335 + ...e_google_compute_instance_template.go.tmpl | 119 + ...ource_google_compute_instance_test.go.tmpl | 233 + ...ource_google_compute_machine_types.go.tmpl | 251 + ...a_source_google_compute_node_types.go.tmpl | 87 + ...ogle_compute_region_instance_group.go.tmpl | 169 + ...ogle_compute_region_instance_group_test.go | 76 + ...e_compute_region_instance_template.go.tmpl | 130 + ...pute_region_instance_template_test.go.tmpl | 232 + ...data_source_google_compute_regions.go.tmpl | 87 + ...a_source_google_compute_resource_policy.go | 51 + ...ource_google_compute_router_status.go.tmpl | 135 + ...ata_source_google_compute_snapshot.go.tmpl | 123 + ...a_source_google_compute_subnetwork.go.tmpl | 152 + ..._source_google_compute_vpn_gateway.go.tmpl | 99 + .../data_source_google_compute_zones.go.tmpl | 101 + .../services/compute/go/metadata.go.tmpl | 231 + .../go/resource_compute_attached_disk.go.tmpl | 285 + .../resource_compute_autoscaler_test.go.tmpl | 408 + ...ource_compute_backend_service_test.go.tmpl | 2011 +++ ...rce_compute_disk_async_replication.go.tmpl | 303 + ...ompute_disk_async_replication_test.go.tmpl | 150 + .../go/resource_compute_disk_test.go.tmpl | 1656 +++ ...ource_compute_firewall_policy_rule_test.go | 695 + .../resource_compute_firewall_policy_test.go | 80 + .../go/resource_compute_firewall_test.go | 593 + ...ource_compute_forwarding_rule_test.go.tmpl | 913 ++ .../resource_compute_global_address_test.go | 76 + ...ompute_global_forwarding_rule_test.go.tmpl | 614 + ...ce_compute_global_network_endpoint_test.go | 88 + ...resource_compute_health_check_test.go.tmpl | 384 + ...rce_compute_http_health_check_test.go.tmpl | 128 + .../go/resource_compute_image_test.go.tmpl | 524 + .../go/resource_compute_instance.go.tmpl | 3104 +++++ ...ompute_instance_from_machine_image.go.tmpl | 284 + ...e_instance_from_machine_image_test.go.tmpl | 684 + ...rce_compute_instance_from_template.go.tmpl | 326 + ...ompute_instance_from_template_test.go.tmpl | 1551 +++ .../resource_compute_instance_group.go.tmpl | 471 + ...rce_compute_instance_group_manager.go.tmpl | 1638 +++ ...stance_group_manager_internal_test.go.tmpl | 319 + ...ompute_instance_group_manager_test.go.tmpl | 2034 +++ ...ute_instance_group_membership_test.go.tmpl | 232 + ...source_compute_instance_group_test.go.tmpl | 632 + .../resource_compute_instance_migrate.go.tmpl | 529 + ...urce_compute_instance_migrate_test.go.tmpl | 962 ++ ...resource_compute_instance_settings_test.go | 98 + ...resource_compute_instance_template.go.tmpl | 1978 +++ ...rce_compute_instance_template_test.go.tmpl | 4197 ++++++ .../go/resource_compute_instance_test.go.tmpl | 9534 +++++++++++++ ...work_edge_security_service_sweeper.go.tmpl | 64 + ...network_edge_security_service_test.go.tmpl | 79 + ...rce_compute_network_endpoint_group_test.go | 124 + ...urce_compute_network_endpoint_test.go.tmpl | 239 + ...rce_compute_network_endpoints_test.go.tmpl | 326 + ...mpute_network_firewall_policy_rule_test.go | 665 + .../resource_compute_network_peering.go.tmpl | 382 + .../go/resource_compute_network_test.go.tmpl | 465 + .../go/resource_compute_node_group_test.go | 184 + ...nization_security_policy_rule_test.go.tmpl | 111 + ..._organization_security_policy_test.go.tmpl | 63 + ...source_compute_per_instance_config_test.go | 738 ++ ...mpute_project_default_network_tier.go.tmpl | 125 + .../resource_compute_project_metadata.go.tmpl | 164 + ...urce_compute_project_metadata_item.go.tmpl | 240 + ...rce_compute_region_autoscaler_test.go.tmpl | 298 + ...ompute_region_backend_service_test.go.tmpl | 1149 ++ .../resource_compute_region_disk_test.go.tmpl | 566 + ...source_compute_region_health_check_test.go | 385 + ...pute_region_instance_group_manager.go.tmpl | 1207 ++ ...region_instance_group_manager_test.go.tmpl | 1919 +++ ...e_compute_region_instance_template.go.tmpl | 1466 ++ ...on_instance_template_internal_test.go.tmpl | 223 + ...pute_region_instance_template_test.go.tmpl | 3753 ++++++ ...region_network_endpoint_group_test.go.tmpl | 78 + ...mpute_region_network_endpoint_test.go.tmpl | 319 + ...compute_region_per_instance_config_test.go | 695 + ...e_region_security_policy_rule_test.go.tmpl | 1035 ++ ...ompute_region_security_policy_test.go.tmpl | 169 + ...rce_compute_region_ssl_policy_test.go.tmpl | 282 + ...e_compute_region_target_http_proxy_test.go | 179 + ..._compute_region_target_https_proxy_test.go | 630 + ...ce_compute_region_target_tcp_proxy_test.go | 155 + .../resource_compute_region_url_map_test.go | 1207 ++ .../resource_compute_router_bgp_peer_test.go | 1701 +++ .../resource_compute_router_interface.go.tmpl | 429 + ...urce_compute_router_interface_test.go.tmpl | 619 + .../resource_compute_router_nat_test.go.tmpl | 1933 +++ .../go/resource_compute_router_peer.go.tmpl | 1600 +++ .../resource_compute_router_peer_test.go.tmpl | 187 + .../go/resource_compute_router_test.go | 340 + .../resource_compute_security_policy.go.tmpl | 1705 +++ ...ource_compute_security_policy_rule_test.go | 1052 ++ ...ource_compute_security_policy_test.go.tmpl | 1907 +++ ...compute_shared_vpc_service_project.go.tmpl | 194 + .../resource_compute_ssl_policy_test.go.tmpl | 227 + .../resource_compute_subnetwork_test.go.tmpl | 843 ++ ...ce_compute_target_https_proxy_test.go.tmpl | 343 + ...ource_compute_target_instance_test.go.tmpl | 157 + .../go/resource_compute_target_pool.go.tmpl | 590 + .../resource_compute_target_pool_test.go.tmpl | 344 + ...urce_compute_target_ssl_proxy_test.go.tmpl | 345 + .../go/resource_compute_url_map_test.go | 1687 +++ .../go/resource_usage_export_bucket.go.tmpl | 165 + .../security_policy_association_utils.go.tmpl | 28 + .../compute/resource_compute_instance.go.erb | 2 +- .../container/go/container_operation.go.tmpl | 130 + .../services/container/go/node_config.go.tmpl | 1837 +++ .../go/resource_container_cluster.go.tmpl | 6805 ++++++++++ ...ce_container_cluster_internal_test.go.tmpl | 297 + ...source_container_cluster_migratev1.go.tmpl | 1845 +++ .../resource_container_cluster_test.go.tmpl | 11037 ++++++++++++++++ .../go/resource_container_node_pool.go.tmpl | 2176 +++ .../resource_container_node_pool_test.go.tmpl | 4874 +++++++ ...ource_container_analysis_note_test.go.tmpl | 79 + ...esource_dataflow_flex_template_job.go.tmpl | 843 ++ ...dataflow_flex_template_job_migrate.go.tmpl | 204 + ...ce_dataflow_flex_template_job_test.go.tmpl | 1894 +++ .../dataflow/go/resource_dataflow_job.go | 715 + .../go/resource_dataflow_job_test.go.tmpl | 1316 ++ .../resource_dataform_repository_test.go.tmpl | 128 + .../dataproc/go/iam_dataproc_cluster.go.tmpl | 142 + .../dataproc/go/iam_dataproc_job.go.tmpl | 161 + .../go/resource_dataproc_cluster_test.go | 2561 ++++ .../dataproc/go/resource_dataproc_job.go | 1324 ++ .../go/resource_dataproc_job_test.go.tmpl | 900 ++ ...dataproc_workflow_template_migrate.go.tmpl | 2125 +++ ...ce_dataproc_workflow_template_test.go.tmpl | 237 + ..._source_dataproc_metastore_service.go.tmpl | 43 + ..._source_dataproc_metastore_service_test.go | 51 + ...dataproc_metastore_service_diff_supress.go | 24 + ...esource_dataproc_metastore_service_test.go | 169 + .../go/deployment_manager_operation.go.tmpl | 104 + .../go/resource_dialogflow_agent_test.go | 132 + .../resource_dialogflow_entity_type_test.go | 141 + .../resource_dialogflow_fulfillment_test.go | 135 + .../go/resource_dialogflow_intent_test.go | 209 + ...resource_healthcare_fhir_store_test.go.erb | 2 - .../go/data_source_google_iam_policy.go | 262 + .../go/iam_service_account.go.tmpl | 117 + ...esource_google_project_iam_binding_test.go | 384 + ...resource_google_project_iam_member_test.go | 252 + ...resource_google_project_iam_policy_test.go | 499 + ...ce_google_service_account_iam_test.go.tmpl | 582 + .../resource_project_service_identity.go.tmpl | 129 + ...urce_project_service_identity_test.go.tmpl | 57 + .../go/resource_sql_database_instance.go.tmpl | 2549 ++++ 203 files changed, 151198 insertions(+), 255 deletions(-) create mode 100644 mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_level_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_service_perimeter_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/alloydb/go/resource_alloydb_cluster_sweeper.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_gateway_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/artifactregistry/go/resource_artifact_registry_repository_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server.go.tmpl create mode 100644 mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl create mode 100644 mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/binaryauthorization/go/resource_binary_authorization_attestor_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/binaryauthorization/go/resource_binary_authorization_policy_test.go create mode 100644 mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources.go create mode 100644 mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources_test.go create mode 100644 mmv1/third_party/terraform/services/cloudbuild/go/resource_cloudbuild_worker_pool_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudfunctions/go/resource_cloudfunctions_function_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_lookup.go create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_memberships.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_groups.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_membership_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_sweeper.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/cloudtasks/go/resource_cloud_tasks_queue_test.go create mode 100644 mmv1/third_party/terraform/services/composer/go/composer_operation.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/resource_composer_environment.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/resource_composer_environment_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_config_map_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret.go.tmpl create mode 100644 mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/compute_backend_service_helpers.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/compute_instance_network_interface_helpers.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/compute_operation.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/compute_operation_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_image.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_group_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_machine_types.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_node_types.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_regions.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_resource_policy.go create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_router_status.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_snapshot.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_subnetwork.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_vpn_gateway.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/data_source_google_compute_zones.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/metadata.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_attached_disk.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_autoscaler_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_backend_service_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_disk_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_rule_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_global_address_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_global_network_endpoint_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_http_health_check_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_image_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_internal_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_membership_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_sweeper.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_group_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoints_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_peering.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_network_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_node_group_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_rule_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_per_instance_config_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_project_default_network_tier.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata_item.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_autoscaler_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_disk_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_health_check_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_internal_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_group_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_per_instance_config_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_rule_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_ssl_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_http_proxy_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_tcp_proxy_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_region_url_map_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_bgp_peer_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_nat_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_router_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_rule_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_shared_vpc_service_project.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_ssl_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_target_instance_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_target_ssl_proxy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_usage_export_bucket.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/security_policy_association_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/container_operation.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/node_config.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/resource_container_cluster_internal_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/resource_container_node_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/containeranalysis/go/resource_container_analysis_note_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_migrate.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_job.go create mode 100644 mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_job_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_cluster.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_job.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go create mode 100644 mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job.go create mode 100644 mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_migrate.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service_test.go create mode 100644 mmv1/third_party/terraform/services/dataprocmetastore/go/dataproc_metastore_service_diff_supress.go create mode 100644 mmv1/third_party/terraform/services/dataprocmetastore/go/resource_dataproc_metastore_service_test.go create mode 100644 mmv1/third_party/terraform/services/deploymentmanager/go/deployment_manager_operation.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go create mode 100644 mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_entity_type_test.go create mode 100644 mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_fulfillment_test.go create mode 100644 mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_intent_test.go create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/data_source_google_iam_policy.go create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/iam_service_account.go.tmpl create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/resource_google_service_account_iam_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl create mode 100644 mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl diff --git a/mmv1/main.go b/mmv1/main.go index 0ef3e24c2b30..284ab3383b0c 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -33,6 +33,9 @@ var yamlMode = flag.Bool("yaml", false, "copy text over from ruby yaml to go yam // Example usage: --template var templateMode = flag.Bool("template", false, "copy templates over from .erb to go .tmpl") +// Example usage: --handwritten +var handwrittenMode = flag.Bool("handwritten", false, "copy handwritten files over from .erb to go .tmpl") + func main() { flag.Parse() @@ -46,6 +49,10 @@ func main() { convertTemplates() } + if *handwrittenMode { + convertAllHandwrittenFiles() + } + if outputPath == nil || *outputPath == "" { log.Printf("No output path specified, exiting") return diff --git a/mmv1/provider/template_data.go b/mmv1/provider/template_data.go index cf8aa1898400..7f7105eb38da 100644 --- a/mmv1/provider/template_data.go +++ b/mmv1/provider/template_data.go @@ -21,6 +21,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "text/template" @@ -201,7 +202,7 @@ func (td *TemplateData) GenerateFile(filePath, templatePath string, input any, g glog.Exit(err) } - if goFormat { + if goFormat && !strings.Contains(templatePath, "third_party/terraform") { cmd := exec.Command("goimports", "-w", filePath) if err := cmd.Run(); err != nil { log.Fatal(err) diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 4e22602653d7..893c98c36587 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -990,22 +990,22 @@ func languageFromFilename(filename string) string { } } -// -// # Returns the id format of an object, or self_link_uri if none is explicitly defined -// # We prefer the long name of a resource as the id so that users can reference -// # resources in a standard way, and most APIs accept short name, long name or self_link -// def id_format(object) -// object.id_format || object.self_link_uri -// end -// -// -// # Returns the extension for DCL packages for the given version. This is needed -// # as the DCL uses "alpha" for preview resources, while we use "private" -// def dcl_version(version) -// return '' if version == 'ga' -// return '/beta' if version == 'beta' -// return '/alpha' if version == 'private' -// end -// end -//end -// +// # Returns the id format of an object, or self_link_uri if none is explicitly defined +// # We prefer the long name of a resource as the id so that users can reference +// # resources in a standard way, and most APIs accept short name, long name or self_link +// def id_format(object) +// object.id_format || object.self_link_uri +// end + +// Returns the extension for DCL packages for the given version. This is needed +// as the DCL uses "alpha" for preview resources, while we use "private" +func (t Terraform) DCLVersion() string { + switch t.TargetVersionName { + case "beta": + return "/beta" + case "private": + return "/alpha" + default: + return "" + } +} diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index b74f8d4558cf..cab87cd86b63 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -41,7 +41,7 @@ func convertTemplates() { func convertTemplate(folder string) int { rubyDir := fmt.Sprintf("templates/terraform/%s", folder) - goDir := fmt.Sprintf("templates/terraform/%s/go", folder) + goDir := fmt.Sprintf("%s/go", rubyDir) if err := os.MkdirAll(goDir, os.ModePerm); err != nil { glog.Error(fmt.Errorf("error creating directory %v: %v", goDir, err)) @@ -60,252 +60,385 @@ func convertTemplate(folder string) int { log.Fatalf("Cannot open the file: %v", file) } - // Replace {{}} - r, err := regexp.Compile(`{{(.*?)}}`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{"{{"}}$1{{"}}"}}`)) + data = replace(data) - // Replace primary_resource_id - r, err = regexp.Compile(`<%=\s*ctx\[:primary_resource_id\]\s*-?%>`) + goTemplate := strings.Replace(file, "erb", "tmpl", 1) + err = ioutil.WriteFile(path.Join(goDir, goTemplate), data, 0644) if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) + glog.Exit(err) } - data = r.ReplaceAll(data, []byte("{{$.PrimaryResourceId}}")) + } - // Replace vars - r, err = regexp.Compile(`<%=\s*ctx\[:vars\]\[('|")([a-zA-Z0-9_-]+)('|")\]\s*-?%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{index $.Vars "$2"}}`)) + return len(templates) +} - // Replace test_env_vars - r, err = regexp.Compile(`<%=\s*ctx\[:test_env_vars\]\[('|")([a-zA-Z0-9_-]+)('|")\]\s*-?%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{index $.TestEnvVars "$2"}}`)) +func convertAllHandwrittenFiles() int { + folders := []string{} - // Replace <% unless compiler == "terraformgoogleconversion-codegen" -%> - r, err = regexp.Compile(`<% unless compiler == "terraformgoogleconversion-codegen" -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if ne $.Compiler "terraformgoogleconversion-codegen" }}`)) + // Get all of the service folders + servicesRoot := "third_party/terraform/services" + servicesFolders, err := ioutil.ReadDir(servicesRoot) + if err != nil { + log.Fatal(err) + } + for _, serviceFolder := range servicesFolders { + rubyDir := fmt.Sprintf("%s/%s", "third_party/terraform/services", serviceFolder.Name()) + folders = append(folders, rubyDir) + } - // Replace <% unless version == 'ga' -%> - r, err = regexp.Compile(`<% unless version == ['|"]ga['|"] -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if ne $.TargetVersionName "ga" }}`)) + counts := 0 + for _, folder := range folders { + counts += convertHandwrittenFiles(folder) + } + log.Printf("%d service handwritten files in total", counts) - // Replace <% if version == 'ga' -%> - r, err = regexp.Compile(`<% if version == 'ga' -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if eq $.TargetVersionName "ga" }}`)) + return counts +} - // Replace <% else -%> - r, err = regexp.Compile(`<% else -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- else }}`)) +func convertHandwrittenFiles(folder string) int { + goDir := fmt.Sprintf("%s/go", folder) - // Replace <%= object.name -%> - r, err = regexp.Compile(`<%= object.name -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.Name}}`)) + if err := os.MkdirAll(goDir, os.ModePerm); err != nil { + glog.Error(fmt.Errorf("error creating directory %v: %v", goDir, err)) + } - // Replace <%= object.resource_name -%> - r, err = regexp.Compile(`<%= object.resource_name -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.ResourceName}}`)) + files := find(folder, ".erb") + log.Printf("%d handwritten files in folder %s", len(files), folder) - // Replace <%=object.self_link_uri-%> - r, err = regexp.Compile(`<%=object.self_link_uri-%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.SelfLinkUri}}`)) + for _, file := range files { + filePath := path.Join(folder, file) - // Replace <%=object.create_uri-%> - r, err = regexp.Compile(`<%=object.create_uri-%>`) + data, err := os.ReadFile(filePath) if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) + log.Fatalf("Cannot open the file: %v", file) } - data = r.ReplaceAll(data, []byte(`{{$.CreateUri}}`)) - - // Replace <%=object.base_url-%> - r, err = regexp.Compile(`<%=object.base_url-%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) + data = replace(data) + goTemplate := "" + if strings.Contains(string(data), "{{") { + goTemplate = strings.Replace(file, ".erb", ".tmpl", 1) + } else { + goTemplate = strings.Replace(file, ".erb", "", 1) } - data = r.ReplaceAll(data, []byte(`{{$.BaseUrl}}`)) - - // Replace <%=object.__product.name-%> - r, err = regexp.Compile(`<%=object.__product.name-%>`) + err = ioutil.WriteFile(path.Join(goDir, goTemplate), data, 0644) if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) + glog.Exit(err) } - data = r.ReplaceAll(data, []byte(`{{$.ProductMetadata.Name}}`)) + log.Printf("Converting %s to %s", file, goTemplate) + } - // Replace <% if object.name == 'Disk' -%> - r, err = regexp.Compile(`<% if object.name == 'Disk' -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if eq $.Name "Disk" }}`)) + return len(files) +} - // Replace <% elsif object.name == 'RegionDisk' -%> - r, err = regexp.Compile(`<% elsif object.name == 'RegionDisk' -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- else if eq $.Name "RegionDisk" }}`)) +func replace(data []byte) []byte { + // Replace {{}} + r, err := regexp.Compile(`(?s){{(.*?)}}`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{"{{"}}$1{{"}}"}}`)) - // Replace <% if object.properties.any?{ |p| p.name == "labels" } -%> - r, err = regexp.Compile(`<% if object\.properties.any\?\{ \|p\| p\.name == "labels" \} -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if $.HasLabelsField }}`)) + // Replace primary_resource_id + r, err = regexp.Compile(`<%=\s*ctx\[:primary_resource_id\]\s*-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("{{$.PrimaryResourceId}}")) - // Replace <% if object.error_retry_predicates -%> - r, err = regexp.Compile(`<% if object.error_retry_predicates -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if $.ErrorRetryPredicates }}`)) + // Replace vars + r, err = regexp.Compile(`<%=\s*ctx\[:vars\]\[('|")([a-zA-Z0-9_-]+)('|")\]\s*-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{index $.Vars "$2"}}`)) - // Replace <% if object.error_abort_predicates -%> - r, err = regexp.Compile(`<% if object.error_abort_predicates -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if $.ErrorAbortPredicates }}`)) + // Replace test_env_vars + r, err = regexp.Compile(`<%=\s*ctx\[:test_env_vars\]\[('|")([a-zA-Z0-9_-]+)('|")\]\s*-?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{index $.TestEnvVars "$2"}}`)) - // Replace <%= object.error_retry_predicates.join(',') -%> - r, err = regexp.Compile(`<%= object.error_retry_predicates.join\(','\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(` {{- join $.ErrorRetryPredicates "," -}} `)) + // Replace <% unless compiler == "terraformgoogleconversion-codegen" -%> + r, err = regexp.Compile(`<% unless compiler == "terraformgoogleconversion-codegen" -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if ne $.Compiler "terraformgoogleconversion-codegen" }}`)) - // Replace <%= object.error_abort_predicates.join(',') -%> - r, err = regexp.Compile(`<%= object.error_abort_predicates.join\(','\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(` {{- join $.ErrorAbortPredicates "," -}} `)) + // Replace \n\n<% unless version == 'ga' -%> + r, err = regexp.Compile(`\n\n(\s*)<% unless version == ['|"]ga['|"] (-)%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("\n\n$1{{ if ne $.TargetVersionName `ga` $2}}")) - // Replace <%= object.name.camelize(:lower) -%> - r, err = regexp.Compile(`<%= object.name.camelize\(:lower\) -?%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{camelize $.Name "lower"}}`)) + // Replace <% unless version == 'ga' -%> + r, err = regexp.Compile(`<% unless version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if ne $.TargetVersionName "ga" }}`)) - // Replace <%= object.name.plural.camelize(:lower) -%> - r, err = regexp.Compile(`<%= object.name.plural.camelize\(:lower\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{camelize (plural $.Name) "lower"}}`)) + // Replace \n\n<% if version == 'ga' -%> + r, err = regexp.Compile(`\n\n(\s*)<% if version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("\n\n$1{{ if eq $.TargetVersionName `ga` }}")) - // Replace <%= id_format(object) -%> - r, err = regexp.Compile(`<%= id_format\(object\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.GetIdFormat}}`)) + // Replace <% if version == 'ga' -%> + r, err = regexp.Compile(`<% if version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if eq $.TargetVersionName "ga" }}`)) - // Replace <%= prefix -%> - r, err = regexp.Compile(`<%= prefix -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.GetPrefix}}`)) + // Replace \n\n<% unless version.nil? || version == ['|"]ga['|"] -%> + r, err = regexp.Compile(`\n\n(\s*)<% unless version\.nil\? \|\| version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("\n\n$1{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }}")) - // Replace <%= titlelize_property(property) -%> - r, err = regexp.Compile(`<%= titlelize_property\(property\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.TitlelizeProperty}}`)) + // Replace <% unless version.nil? || version == ['|"]ga['|"] -%> + r, err = regexp.Compile(`<% unless version\.nil\? \|\| version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }}`)) - // Replace <%= prop_path -%> - r, err = regexp.Compile(`<%= prop_path -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.PropPath}}`)) + // Replace <%= dcl_version(version) -%> + r, err = regexp.Compile(`<%= dcl_version\(version\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $.DCLVersion }}`)) - // Replace <%= go_literal(property.default_value) -%> - r, err = regexp.Compile(`<%= go_literal\(property.default_value\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.GoLiteral $.DefaultValue}}`)) + // Replace <%= version -%> + r, err = regexp.Compile(`<%= version -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $.TargetVersionName }}`)) - // Replace <%= build_expand_resource_ref('v.(string)', property, pwd) %> - r, err = regexp.Compile(`<%= build_expand_resource_ref\('v\.\(string\)', property, pwd\) %>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{ template "expandResourceRef" dict "VarName" "v.(string)" "ResourceRef" $.ResourceRef "ResourceType" $.ResourceType}}`)) + // Replace <%= "%s" %> + r, err = regexp.Compile(`<%= "%s" %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ "%s" }}`)) - // Replace <%= build_expand_resource_ref('raw.(string)', property.item_type, pwd) %> - r, err = regexp.Compile(`<%= build_expand_resource_ref\('raw\.\(string\)', property\.item_type, pwd\) %>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{ template "expandResourceRef" dict "VarName" "raw.(string)" "ResourceRef" $.ItemType.ResourceRef "ResourceType" $.ItemType.ResourceType}}`)) + // Replace <% else -%> + r, err = regexp.Compile(`<% else[\s-]*%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- else }}`)) - // Replace <%- if property.is_a?(Api::Type::Integer) -%> - r, err = regexp.Compile(`<%- if property.is_a\?\(Api::Type::Integer\) -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if $.IsA "Integer" }}`)) + // Replace <%= object.name -%> + r, err = regexp.Compile(`<%= object.name -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.Name}}`)) - // Replace <%= property.name.underscore -%> - r, err = regexp.Compile(`<%= property.name.underscore -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{underscore $.Name}}`)) + // Replace <%= object.resource_name -%> + r, err = regexp.Compile(`<%= object.resource_name -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.ResourceName}}`)) - // Replace <%= resource_type -%> - r, err = regexp.Compile(`<%= resource_type -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{$.ResourceType}}`)) + // Replace <%=object.self_link_uri-%> + r, err = regexp.Compile(`<%=object.self_link_uri-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.SelfLinkUri}}`)) - // Replace <% if property.is_set -%> - r, err = regexp.Compile(`<% if property.is_set -%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- if $.IsSet }}`)) + // Replace <%=object.create_uri-%> + r, err = regexp.Compile(`<%=object.create_uri-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.CreateUri}}`)) - // Replace <% end -%> - r, err = regexp.Compile(`<%[\s-]*end[\s-]*%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- end }}`)) + // Replace <%=object.base_url-%> + r, err = regexp.Compile(`<%=object.base_url-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.BaseUrl}}`)) + + // Replace <%=object.__product.name-%> + r, err = regexp.Compile(`<%=object.__product.name-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.ProductMetadata.Name}}`)) + + // Replace <% if object.name == 'Disk' -%> + r, err = regexp.Compile(`<% if object.name == 'Disk' -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if eq $.Name "Disk" }}`)) - copyRight := `{{/* + // Replace <% elsif object.name == 'RegionDisk' -%> + r, err = regexp.Compile(`<% elsif object.name == 'RegionDisk' -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- else if eq $.Name "RegionDisk" }}`)) + + // Replace <% if object.properties.any?{ |p| p.name == "labels" } -%> + r, err = regexp.Compile(`<% if object\.properties.any\?\{ \|p\| p\.name == "labels" \} -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.HasLabelsField }}`)) + + // Replace <% if object.error_retry_predicates -%> + r, err = regexp.Compile(`<% if object.error_retry_predicates -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.ErrorRetryPredicates }}`)) + + // Replace <% if object.error_abort_predicates -%> + r, err = regexp.Compile(`<% if object.error_abort_predicates -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.ErrorAbortPredicates }}`)) + + // Replace <%= object.error_retry_predicates.join(',') -%> + r, err = regexp.Compile(`<%= object.error_retry_predicates.join\(','\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(` {{- join $.ErrorRetryPredicates "," -}} `)) + + // Replace <%= object.error_abort_predicates.join(',') -%> + r, err = regexp.Compile(`<%= object.error_abort_predicates.join\(','\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(` {{- join $.ErrorAbortPredicates "," -}} `)) + + // Replace <%= object.name.camelize(:lower) -%> + r, err = regexp.Compile(`<%= object.name.camelize\(:lower\) -?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{camelize $.Name "lower"}}`)) + + // Replace <%= object.name.plural.camelize(:lower) -%> + r, err = regexp.Compile(`<%= object.name.plural.camelize\(:lower\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{camelize (plural $.Name) "lower"}}`)) + + // Replace <%= id_format(object) -%> + r, err = regexp.Compile(`<%= id_format\(object\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.GetIdFormat}}`)) + + // Replace <%= prefix -%> + r, err = regexp.Compile(`<%= prefix -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.GetPrefix}}`)) + + // Replace <%= titlelize_property(property) -%> + r, err = regexp.Compile(`<%= titlelize_property\(property\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.TitlelizeProperty}}`)) + + // Replace <%= prop_path -%> + r, err = regexp.Compile(`<%= prop_path -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.PropPath}}`)) + + // Replace <%= go_literal(property.default_value) -%> + r, err = regexp.Compile(`<%= go_literal\(property.default_value\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.GoLiteral $.DefaultValue}}`)) + + // Replace <%= build_expand_resource_ref('v.(string)', property, pwd) %> + r, err = regexp.Compile(`<%= build_expand_resource_ref\('v\.\(string\)', property, pwd\) %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ template "expandResourceRef" dict "VarName" "v.(string)" "ResourceRef" $.ResourceRef "ResourceType" $.ResourceType}}`)) + + // Replace <%= build_expand_resource_ref('raw.(string)', property.item_type, pwd) %> + r, err = regexp.Compile(`<%= build_expand_resource_ref\('raw\.\(string\)', property\.item_type, pwd\) %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ template "expandResourceRef" dict "VarName" "raw.(string)" "ResourceRef" $.ItemType.ResourceRef "ResourceType" $.ItemType.ResourceType}}`)) + + // Replace <%- if property.is_a?(Api::Type::Integer) -%> + r, err = regexp.Compile(`<%- if property.is_a\?\(Api::Type::Integer\) -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.IsA "Integer" }}`)) + + // Replace <%= property.name.underscore -%> + r, err = regexp.Compile(`<%= property.name.underscore -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{underscore $.Name}}`)) + + // Replace <%= resource_type -%> + r, err = regexp.Compile(`<%= resource_type -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{$.ResourceType}}`)) + + // Replace <% if property.is_set -%> + r, err = regexp.Compile(`<% if property.is_set -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $.IsSet }}`)) + + // Replace \n\n<% end -%> + r, err = regexp.Compile(`\n\n(\s*)<%[\s-]*end[\s-]*%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("\n\n$1{{ end }}")) + + // Replace <% end -%>\n\n + r, err = regexp.Compile(`<%[\s-]*end[\s-]*%>\n\n`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte("{{- end }}\n\n")) + + // Replace <% end -%> + r, err = regexp.Compile(`<%[\s-]*end[\s-]*%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- end }}`)) + + copyRight := `{{/* The license inside this block applies to this file Copyright 2024 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); @@ -317,35 +450,49 @@ func convertTemplate(folder string) int { See the License for the specific language governing permissions and limitations under the License. */ -}}` - // Replace copyright - r, err = regexp.Compile(`(?s)<%[-\s#]*[tT]he license inside this.*?limitations under the License..*?%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(copyRight)) + // Replace copyright + r, err = regexp.Compile(`(?s)<%[-\s#]*[tT]he license inside this.*?limitations under the License..*?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(copyRight)) - // Replace comments - r, err = regexp.Compile(`(?s)<%#-?\s?(.*?)\s?-?%>`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`{{- /* $1 */}}`)) + // Replace comments + r, err = regexp.Compile(`(?s)<%#-?\s?(.*?)\s?-%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{/* $1 */ -}}`)) - // Replace .erb - r, err = regexp.Compile(`\.erb`) - if err != nil { - log.Fatalf("Cannot compile the regular expression: %v", err) - } - data = r.ReplaceAll(data, []byte(`.tmpl`)) + // Replace comments + r, err = regexp.Compile(`(?s)<%#-?\s?(.*?)\s?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{/* $1 */}}`)) - goTemplate := strings.Replace(file, "erb", "tmpl", 1) - err = ioutil.WriteFile(path.Join(goDir, goTemplate), data, 0644) - if err != nil { - glog.Exit(err) - } + // Replace <% autogen_exception -%> + r, err = regexp.Compile(`<% autogen_exception -%>\n`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) } + data = r.ReplaceAll(data, []byte(``)) - return len(templates) + // Replace <%= "-" + version unless version == 'ga' -%> + r, err = regexp.Compile(`<%= "-" \+ version unless version == 'ga'[\s-]*%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}`)) + + // Replace .erb + r, err = regexp.Compile(`\.erb`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`.tmpl`)) + + return data } func checkExceptionList(filePath string) bool { diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_level_test.go.tmpl b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_level_test.go.tmpl new file mode 100644 index 000000000000..65d60f762176 --- /dev/null +++ b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_level_test.go.tmpl @@ -0,0 +1,240 @@ +package accesscontextmanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Since each test here is acting on the same organization and only one AccessPolicy +// can exist, they need to be run serially. See AccessPolicy for the test runner. + +func testAccAccessContextManagerAccessLevel_basicTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + vpcName := fmt.Sprintf("test-vpc-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerAccessLevelDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerAccessLevel_basic(org, "my policy", "level", vpcName), + }, + { + ResourceName: "google_access_context_manager_access_level.test-access", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessContextManagerAccessLevel_basicUpdated(org, "my new policy", "level"), + }, + { + ResourceName: "google_access_context_manager_access_level.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAccessContextManagerAccessLevel_fullTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerAccessLevelDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerAccessLevel_full(org, "my policy", "level"), + }, + { + ResourceName: "google_access_context_manager_access_level.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAccessContextManagerAccessLevelDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_access_context_manager_access_level" { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}AccessContextManagerBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("AccessLevel still exists at %s", url) + } + } + + return nil + } +} + +func testAccAccessContextManagerAccessLevel_customTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerAccessLevelDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerAccessLevel_custom(org, "my policy", "level"), + }, + { + ResourceName: "google_access_context_manager_access_level.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAccessContextManagerAccessLevel_basic(org, policyTitle, levelTitleName, vpcName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "AND" + conditions { + ip_subnetworks = ["192.0.4.0/24"] + } + } +} + +resource "google_compute_network" "vpc_network" { + name = "%s" +} + +resource "google_access_context_manager_access_level" "test-access2" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s2" + title = "%s2" + description = "hello2" + basic { + combining_function = "AND" + conditions { + vpc_network_sources { + vpc_subnetwork { + network = "//compute.googleapis.com/${google_compute_network.vpc_network.id}" + vpc_ip_subnetworks = ["20.0.5.0/24"] + } + } + } + } +} + +`, org, policyTitle, levelTitleName, levelTitleName, vpcName, levelTitleName, levelTitleName) +} + +func testAccAccessContextManagerAccessLevel_custom(org, policyTitle, levelTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + custom { + expr { + expression = "device.os_type == OsType.DESKTOP_MAC" + } + } +} +`, org, policyTitle, levelTitleName, levelTitleName) +} + +func testAccAccessContextManagerAccessLevel_basicUpdated(org, policyTitle, levelTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "OR" + conditions { + ip_subnetworks = ["192.0.2.0/24"] + } + } +} +`, org, policyTitle, levelTitleName, levelTitleName) +} + +func testAccAccessContextManagerAccessLevel_full(org, policyTitle, levelTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "AND" + conditions { + ip_subnetworks = ["192.0.4.0/24"] + members = ["user:test@google.com", "user:test2@google.com"] + negate = false + device_policy { + require_screen_lock = false + require_admin_approval = false + require_corp_owned = true + os_constraints { + os_type = "DESKTOP_CHROME_OS" + require_verified_chrome_os = true + } + } + regions = [ + "IT", + "US", + ] + } + } +} +`, org, policyTitle, levelTitleName, levelTitleName) +} diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl new file mode 100644 index 000000000000..64b32f9390a6 --- /dev/null +++ b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_access_policy_test.go.tmpl @@ -0,0 +1,145 @@ +package accesscontextmanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Since each test here is acting on the same organization and only one AccessPolicy +// can exist, they need to be run serially +func TestAccAccessContextManager(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "access_policy": testAccAccessContextManagerAccessPolicy_basicTest, + "access_policy_scoped": testAccAccessContextManagerAccessPolicy_scopedTest, + "service_perimeter": testAccAccessContextManagerServicePerimeter_basicTest, + "service_perimeter_update": testAccAccessContextManagerServicePerimeter_updateTest, + "service_perimeter_resource": testAccAccessContextManagerServicePerimeterResource_basicTest, + "service_perimeter_dry_run_resource": testAccAccessContextManagerServicePerimeterResource_basicTest, + "access_level": testAccAccessContextManagerAccessLevel_basicTest, + "access_level_full": testAccAccessContextManagerAccessLevel_fullTest, + "access_level_custom": testAccAccessContextManagerAccessLevel_customTest, + "access_levels": testAccAccessContextManagerAccessLevels_basicTest, + "access_level_condition": testAccAccessContextManagerAccessLevelCondition_basicTest, + "service_perimeter_egress_policy": testAccAccessContextManagerServicePerimeterEgressPolicy_basicTest, + "service_perimeter_ingress_policy": testAccAccessContextManagerServicePerimeterIngressPolicy_basicTest, + "service_perimeters": testAccAccessContextManagerServicePerimeters_basicTest, + "gcp_user_access_binding": testAccAccessContextManagerGcpUserAccessBinding_basicTest, + "authorized_orgs_desc": testAccAccessContextManagerAuthorizedOrgsDesc_basicTest, + } + + for name, tc := range testCases { + // shadow the tc variable into scope so that when + // the loop continues, if t.Run hasn't executed tc(t) + // yet, we don't have a race condition + // see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccAccessContextManagerAccessPolicy_basicTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerAccessPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerAccessPolicy_basic(org, "my policy"), + }, + { + ResourceName: "google_access_context_manager_access_policy.test-access", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessContextManagerAccessPolicy_basic(org, "my new policy"), + }, + { + ResourceName: "google_access_context_manager_access_policy.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAccessContextManagerAccessPolicyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_access_context_manager_access_policy" { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}AccessContextManagerBasePath{{"}}"}}accessPolicies/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("AccessPolicy still exists at %s", url) + } + } + + return nil + } +} + +func testAccAccessContextManagerAccessPolicy_basic(org, title string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} +`, org, title) +} + +func testAccAccessContextManagerAccessPolicy_scopedTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerAccessPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerAccessPolicy_scoped(org, "scoped policy"), + }, + { + ResourceName: "google_access_context_manager_access_policy.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAccessContextManagerAccessPolicy_scoped(org, title string) string { + return fmt.Sprintf(` +data "google_project" "project" { +} +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" + scopes = ["projects/${data.google_project.project.number}"] +} +`, org, title) +} diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_service_perimeter_test.go.tmpl b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_service_perimeter_test.go.tmpl new file mode 100644 index 000000000000..d87947ff8187 --- /dev/null +++ b/mmv1/third_party/terraform/services/accesscontextmanager/go/resource_access_context_manager_service_perimeter_test.go.tmpl @@ -0,0 +1,412 @@ +package accesscontextmanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Since each test here is acting on the same organization and only one AccessPolicy +// can exist, they need to be run serially. See AccessPolicy for the test runner. +func testAccAccessContextManagerServicePerimeter_basicTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerServicePerimeterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerServicePerimeter_basic(org, "my policy", "level", "perimeter"), + }, + { + ResourceName: "google_access_context_manager_service_perimeter.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccAccessContextManagerServicePerimeter_updateTest(t *testing.T) { + org := envvar.GetTestOrgFromEnv(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckAccessContextManagerServicePerimeterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccAccessContextManagerServicePerimeter_basic(org, "my policy", "level", "perimeter"), + }, + { + ResourceName: "google_access_context_manager_service_perimeter.test-access", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessContextManagerServicePerimeter_update(org, "my policy", "level", "perimeter"), + }, + { + ResourceName: "google_access_context_manager_service_perimeter.test-access", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessContextManagerServicePerimeter_updateAllowed(org, "my policy", "level", "perimeter"), + }, + { + ResourceName: "google_access_context_manager_service_perimeter.test-access", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessContextManagerServicePerimeter_updateDryrun(org, "my policy", "level", "perimeter"), + }, + { + ResourceName: "google_access_context_manager_service_perimeter.test-access", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccAccessContextManagerServicePerimeter_updateAllowed(org, "my policy", "level", "perimeter"), + }, + { + ResourceName: "google_access_context_manager_service_perimeter.test-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckAccessContextManagerServicePerimeterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_access_context_manager_service_perimeter" { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}AccessContextManagerBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ServicePerimeter still exists at %s", url) + } + } + + return nil + } +} + +func testAccAccessContextManagerServicePerimeter_basic(org, policyTitle, levelTitleName, perimeterTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "AND" + conditions { + ip_subnetworks = ["192.0.4.0/24"] + } + } +} + +resource "google_access_context_manager_service_perimeter" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/%s" + title = "%s" + perimeter_type = "PERIMETER_TYPE_REGULAR" + status { + restricted_services = ["storage.googleapis.com"] + } +} +`, org, policyTitle, levelTitleName, levelTitleName, perimeterTitleName, perimeterTitleName) +} + +func testAccAccessContextManagerServicePerimeter_update(org, policyTitle, levelTitleName, perimeterTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "AND" + conditions { + ip_subnetworks = ["192.0.4.0/24"] + } + } +} + +resource "google_access_context_manager_service_perimeter" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/%s" + title = "%s" + perimeter_type = "PERIMETER_TYPE_REGULAR" + status { + restricted_services = ["bigquery.googleapis.com"] + access_levels = [google_access_context_manager_access_level.test-access.name] + } +} +`, org, policyTitle, levelTitleName, levelTitleName, perimeterTitleName, perimeterTitleName) +} + +func testAccAccessContextManagerServicePerimeter_updateAllowed(org, policyTitle, levelTitleName, perimeterTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "AND" + conditions { + ip_subnetworks = ["192.0.4.0/24"] + } + } +} + +resource "google_access_context_manager_service_perimeter" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/%s" + title = "%s" + perimeter_type = "PERIMETER_TYPE_REGULAR" + use_explicit_dry_run_spec = true + spec { + restricted_services = ["bigquery.googleapis.com", "storage.googleapis.com"] + access_levels = [google_access_context_manager_access_level.test-access.name] + + vpc_accessible_services { + enable_restriction = true + allowed_services = ["bigquery.googleapis.com", "storage.googleapis.com"] + } + + ingress_policies { + ingress_from { + sources { + access_level = google_access_context_manager_access_level.test-access.name + } + identity_type = "ANY_IDENTITY" + } + + ingress_to { + resources = [ "*" ] + operations { + service_name = "bigquery.googleapis.com" + + method_selectors { + method = "BigQueryStorage.ReadRows" + } + + method_selectors { + method = "TableService.ListTables" + } + + method_selectors { + permission = "bigquery.jobs.get" + } + } + + operations { + service_name = "storage.googleapis.com" + + method_selectors { + method = "google.storage.objects.create" + } + } + } + } + ingress_policies { + ingress_from { + identities = ["user:test@google.com"] + } + ingress_to { + resources = ["*"] + } + } + + egress_policies { + egress_from { + identity_type = "ANY_USER_ACCOUNT" + sources { + access_level = google_access_context_manager_access_level.test-access.name + } + source_restriction = "SOURCE_RESTRICTION_ENABLED" + } + egress_to { + operations { + service_name = "bigquery.googleapis.com" + method_selectors { + permission = "externalResource.read" + } + } + external_resources = ["s3://bucket1"] + } + } + egress_policies { + egress_from { + identities = ["user:test@google.com"] + } + egress_to { + resources = ["*"] + } + } + } + status { + restricted_services = ["bigquery.googleapis.com", "storage.googleapis.com"] + access_levels = [google_access_context_manager_access_level.test-access.name] + + vpc_accessible_services { + enable_restriction = true + allowed_services = ["bigquery.googleapis.com", "storage.googleapis.com"] + } + + ingress_policies { + ingress_from { + sources { + access_level = google_access_context_manager_access_level.test-access.name + } + identity_type = "ANY_IDENTITY" + } + + ingress_to { + resources = [ "*" ] + operations { + service_name = "bigquery.googleapis.com" + + method_selectors { + method = "BigQueryStorage.ReadRows" + } + + method_selectors { + method = "TableService.ListTables" + } + + method_selectors { + permission = "bigquery.jobs.get" + } + } + + operations { + service_name = "storage.googleapis.com" + + method_selectors { + method = "google.storage.objects.create" + } + } + } + } + ingress_policies { + ingress_from { + identities = ["user:test@google.com"] + } + ingress_to { + resources = ["*"] + } + } + + egress_policies { + egress_from { + identity_type = "ANY_USER_ACCOUNT" + sources { + access_level = google_access_context_manager_access_level.test-access.name + } + source_restriction = "SOURCE_RESTRICTION_ENABLED" + } + egress_to { + operations { + service_name = "bigquery.googleapis.com" + method_selectors { + permission = "externalResource.read" + } + } + external_resources = ["s3://bucket1"] + } + } + egress_policies { + egress_from { + identities = ["user:test@google.com"] + } + egress_to { + resources = ["*"] + } + } + } +} +`, org, policyTitle, levelTitleName, levelTitleName, perimeterTitleName, perimeterTitleName) +} + +func testAccAccessContextManagerServicePerimeter_updateDryrun(org, policyTitle, levelTitleName, perimeterTitleName string) string { + return fmt.Sprintf(` +resource "google_access_context_manager_access_policy" "test-access" { + parent = "organizations/%s" + title = "%s" +} + +resource "google_access_context_manager_access_level" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/accessLevels/%s" + title = "%s" + description = "hello" + basic { + combining_function = "AND" + conditions { + ip_subnetworks = ["192.0.4.0/24"] + } + } +} + +resource "google_access_context_manager_service_perimeter" "test-access" { + parent = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}" + name = "accessPolicies/${google_access_context_manager_access_policy.test-access.name}/servicePerimeters/%s" + title = "%s" + perimeter_type = "PERIMETER_TYPE_REGULAR" + status { + restricted_services = ["bigquery.googleapis.com"] + } + + spec { + restricted_services = ["storage.googleapis.com"] + access_levels = [google_access_context_manager_access_level.test-access.name] + } + + use_explicit_dry_run_spec = true +} +`, org, policyTitle, levelTitleName, levelTitleName, perimeterTitleName, perimeterTitleName) +} + diff --git a/mmv1/third_party/terraform/services/alloydb/go/resource_alloydb_cluster_sweeper.go.tmpl b/mmv1/third_party/terraform/services/alloydb/go/resource_alloydb_cluster_sweeper.go.tmpl new file mode 100644 index 000000000000..b87f1df6a66e --- /dev/null +++ b/mmv1/third_party/terraform/services/alloydb/go/resource_alloydb_cluster_sweeper.go.tmpl @@ -0,0 +1,126 @@ +package alloydb + +import ( + "context" + "fmt" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("AlloydbCluster", testSweepAlloydbCluster) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepAlloydbCluster(region string) error { + resourceName := "AlloydbCluster" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + // manual patch: use aggregated list instead of sweeper-specific location. This will clear secondary clusters. +{{- if ne $.TargetVersionName "ga" }} + listTemplate := strings.Split("https://alloydb.googleapis.com/v1beta/projects/{{"{{"}}project{{"}}"}}/locations/-/clusters", "?")[0] +{{- else }} + listTemplate := strings.Split("https://alloydb.googleapis.com/v1/projects/{{"{{"}}project{{"}}"}}/locations/-/clusters", "?")[0] +{{- end }} + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["clusters"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + + // manual patch: use raw name for url instead of constructing it, so that resource locations are supplied through aggregated list + // manual patch: Using the force=true ensures that we delete instances as well. + name := obj["name"].(string) + shortname := tpgresource.GetResourceNameFromSelfLink(name) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(shortname) { + nonPrefixCount++ + continue + } + + {{ if ne $.TargetVersionName `ga` -}} + deleteTemplate := "https://alloydb.googleapis.com/v1beta/%s?force=true" + {{- else }} + deleteTemplate := "https://alloydb.googleapis.com/v1/%s?force=true" + {{- end }} + deleteUrl := fmt.Sprintf(deleteTemplate, name) + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", name, shortname) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_config_test.go.tmpl b/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_config_test.go.tmpl new file mode 100644 index 000000000000..6a8d4d14f407 --- /dev/null +++ b/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_config_test.go.tmpl @@ -0,0 +1,106 @@ +package apigateway_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccApiGatewayApiConfig_apigatewayApiConfigBasicExampleUpdated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckApiGatewayApiConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApiGatewayApiConfig_apigatewayApiConfigBasicExample(context), + }, + { + Config: testAccApiGatewayApiConfig_apigatewayApiConfigBasicExampleUpdated(context), + }, + }, + }) +} + +func TestAccApiGatewayApiConfig_generatedPrefix(t *testing.T) { + // Random generated id within resource + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckApiGatewayApiConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApiGatewayApiConfig_generatedPrefix(context), + }, + }, + }) +} + +func testAccApiGatewayApiConfig_apigatewayApiConfigBasicExampleUpdated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_api_gateway_api" "api_cfg" { + provider = google-beta + api_id = "tf-test-api-cfg%{random_suffix}" +} + +resource "google_api_gateway_api_config" "api_cfg" { + provider = google-beta + api = google_api_gateway_api.api_cfg.api_id + api_config_id = "tf-test-api-cfg%{random_suffix}" + display_name = "MM Dev API Config" + labels = { + environment = "dev" + } + + openapi_documents { + document { + path = "spec.yaml" + contents = filebase64("test-fixtures/openapi.yaml") + } + } +} +`, context) +} + +func testAccApiGatewayApiConfig_generatedPrefix(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_api_gateway_api" "api_cfg" { + provider = google-beta + api_id = "tf-test-api-cfg%{random_suffix}" +} + +resource "google_api_gateway_api_config" "api_cfg" { + provider = google-beta + api = google_api_gateway_api.api_cfg.api_id + api_config_id_prefix = "tf-test-" + display_name = "MM Dev API Config" + labels = { + environment = "dev" + } + + openapi_documents { + document { + path = "spec.yaml" + contents = filebase64("test-fixtures/openapi.yaml") + } + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_test.go.tmpl b/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_test.go.tmpl new file mode 100644 index 000000000000..3ceb89719d95 --- /dev/null +++ b/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_api_test.go.tmpl @@ -0,0 +1,46 @@ +package apigateway_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccApiGatewayApi_apigatewayApiBasicExampleUpdated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckApiGatewayApiDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApiGatewayApi_apigatewayApiBasicExample(context), + }, + { + Config: testAccApiGatewayApi_apigatewayApiBasicExampleUpdated(context), + }, + }, + }) +} + +func testAccApiGatewayApi_apigatewayApiBasicExampleUpdated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_api_gateway_api" "api" { + provider = google-beta + api_id = "api%{random_suffix}" + display_name = "Magical API" + labels = { + environment = "dev" + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_gateway_test.go.tmpl b/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_gateway_test.go.tmpl new file mode 100644 index 000000000000..ea707d273b4e --- /dev/null +++ b/mmv1/third_party/terraform/services/apigateway/go/resource_api_gateway_gateway_test.go.tmpl @@ -0,0 +1,67 @@ +package apigateway_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccApiGatewayGateway_apigatewayGatewayBasicExampleUpdated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckApiGatewayGatewayDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApiGatewayGateway_apigatewayGatewayBasicExample(context), + }, + { + Config: testAccApiGatewayGateway_apigatewayGatewayBasicExampleUpdated(context), + }, + }, + }) +} + +func testAccApiGatewayGateway_apigatewayGatewayBasicExampleUpdated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_api_gateway_api" "api_gw" { + provider = google-beta + api_id = "tf-test-api-gw%{random_suffix}" +} + +resource "google_api_gateway_api_config" "api_gw" { + provider = google-beta + api = google_api_gateway_api.api_gw.api_id + api_config_id = "tf-test-api-gw%{random_suffix}" + lifecycle { + create_before_destroy = true + } + + openapi_documents { + document { + path = "spec.yaml" + contents = filebase64("test-fixtures/openapi.yaml") + } + } +} + +resource "google_api_gateway_gateway" "api_gw" { + provider = google-beta + api_config = google_api_gateway_api_config.api_gw.id + gateway_id = "tf-test-api-gw%{random_suffix}" + display_name = "MM Dev API Gateway" + labels = { + environment = "dev" + } +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl b/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl new file mode 100644 index 000000000000..f28376428e1e --- /dev/null +++ b/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl @@ -0,0 +1,177 @@ +package apigee_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccApigeeEnvironment_apigeeEnvironmentPatchUpdateTestExampleUpdate(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckApigeeEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccApigeeEnvironment_apigeeEnvironmentPatchUpdateTestExample(context), + }, + { + ResourceName: "google_apigee_environment.apigee_environment", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"org_id"}, + }, + { + Config: testAccApigeeEnvironment_apigeeEnvironmentPatchUpdateTestExampleUpdate(context), + }, + { + ResourceName: "google_apigee_environment.apigee_environment", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"org_id"}, + }, + }, + }) +} + +func testAccApigeeEnvironment_apigeeEnvironmentPatchUpdateTestExampleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + provider = google-beta + + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "apigee" { + provider = google-beta + + project = google_project.project.project_id + service = "apigee.googleapis.com" +} + +resource "google_project_service" "compute" { + provider = google-beta + + project = google_project.project.project_id + service = "compute.googleapis.com" +} + +resource "google_project_service" "servicenetworking" { + provider = google-beta + + project = google_project.project.project_id + service = "servicenetworking.googleapis.com" +} + +resource "google_project_service" "kms" { + provider = google-beta + + project = google_project.project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_compute_network" "apigee_network" { + provider = google-beta + + name = "apigee-network" + project = google_project.project.project_id + depends_on = [google_project_service.compute] +} + +resource "google_compute_global_address" "apigee_range" { + provider = google-beta + + name = "tf-test-apigee-range%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.apigee_network.id + project = google_project.project.project_id +} + +resource "google_service_networking_connection" "apigee_vpc_connection" { + provider = google-beta + + network = google_compute_network.apigee_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.apigee_range.name] + depends_on = [google_project_service.servicenetworking] +} + +resource "google_kms_key_ring" "apigee_keyring" { + provider = google-beta + + name = "apigee-keyring" + location = "us-central1" + project = google_project.project.project_id + depends_on = [google_project_service.kms] +} + +resource "google_kms_crypto_key" "apigee_key" { + provider = google-beta + + name = "apigee-key" + key_ring = google_kms_key_ring.apigee_keyring.id +} + +resource "google_project_service_identity" "apigee_sa" { + provider = google-beta + + project = google_project.project.project_id + service = google_project_service.apigee.service +} + +resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { + provider = google-beta + + crypto_key_id = google_kms_crypto_key.apigee_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" +} + +resource "google_apigee_organization" "apigee_org" { + provider = google-beta + + analytics_region = "us-central1" + project_id = google_project.project.project_id + authorized_network = google_compute_network.apigee_network.id + billing_type = "PAYG" + runtime_database_encryption_key_name = google_kms_crypto_key.apigee_key.id + + depends_on = [ + google_service_networking_connection.apigee_vpc_connection, + google_project_service.apigee, + google_kms_crypto_key_iam_member.apigee_sa_keyuser, + ] +} + +resource "google_apigee_environment" "apigee_environment" { + provider = google-beta + + org_id = google_apigee_organization.apigee_org.id + name = "tf-test%{random_suffix}" + description = "Apigee Environment" + display_name = "tf-test%{random_suffix}" + type = "INTERMEDIATE" + forward_proxy_uri = "http://test:456" +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/artifactregistry/go/resource_artifact_registry_repository_test.go.tmpl b/mmv1/third_party/terraform/services/artifactregistry/go/resource_artifact_registry_repository_test.go.tmpl new file mode 100644 index 000000000000..7219203924be --- /dev/null +++ b/mmv1/third_party/terraform/services/artifactregistry/go/resource_artifact_registry_repository_test.go.tmpl @@ -0,0 +1,296 @@ +package artifactregistry_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccArtifactRegistryRepository_update(t *testing.T) { + t.Parallel() + + repositoryID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_update(repositoryID), + }, + { + ResourceName: "google_artifact_registry_repository.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccArtifactRegistryRepository_update2(repositoryID), + }, + { + ResourceName: "google_artifact_registry_repository.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccArtifactRegistryRepository_createMvnSnapshot(t *testing.T) { + t.Parallel() + + repositoryID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_createMvnWithVersionPolicy(repositoryID, "SNAPSHOT"), + }, + { + ResourceName: "google_artifact_registry_repository.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccArtifactRegistryRepository_createMvnRelease(t *testing.T) { + t.Parallel() + + repositoryID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_createMvnWithVersionPolicy(repositoryID, "RELEASE"), + }, + { + ResourceName: "google_artifact_registry_repository.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccArtifactRegistryRepository_kfp(t *testing.T) { + t.Parallel() + + repositoryID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_kfp(repositoryID), + }, + { + ResourceName: "google_artifact_registry_repository.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccArtifactRegistryRepository_update(repositoryID string) string { + return fmt.Sprintf(` +resource "google_artifact_registry_repository" "test" { + repository_id = "%s" + location = "us-central1" + description = "pre-update" + format = "DOCKER" + + labels = { + my_key = "my_val" + other_key = "other_val" + } +} +`, repositoryID) +} + +func testAccArtifactRegistryRepository_update2(repositoryID string) string { + return fmt.Sprintf(` +resource "google_artifact_registry_repository" "test" { + repository_id = "%s" + location = "us-central1" + description = "post-update" + format = "DOCKER" + + labels = { + my_key = "my_val" + other_key = "new_val" + } +} +`, repositoryID) +} + +func testAccArtifactRegistryRepository_createMvnWithVersionPolicy(repositoryID string, versionPolicy string) string { + return fmt.Sprintf(` +resource "google_artifact_registry_repository" "test" { + repository_id = "%s" + location = "us-central1" + description = "post-update" + format = "MAVEN" + maven_config { + version_policy = "%s" + } +} +`, repositoryID, versionPolicy) +} + +func testAccArtifactRegistryRepository_kfp(repositoryID string) string { + return fmt.Sprintf(` +resource "google_artifact_registry_repository" "test" { + repository_id = "%s" + location = "us-central1" + description = "my-kfp-repository" + format = "KFP" +} +`, repositoryID) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccArtifactRegistryRepository_virtual(t *testing.T) { + t.Parallel() + + upstreamRepositoryID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + repositoryID := fmt.Sprintf("%s-virtual", upstreamRepositoryID) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_virtual(repositoryID, upstreamRepositoryID, false), + }, + { + ResourceName: "google_artifact_registry_repository.vr-test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccArtifactRegistryRepository_virtual(repositoryID, upstreamRepositoryID, true), + }, + { + ResourceName: "google_artifact_registry_repository.vr-test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccArtifactRegistryRepository_remote(t *testing.T) { + t.Parallel() + + repositoryID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckArtifactRegistryRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccArtifactRegistryRepository_remote(repositoryID, "upstream"), + }, + { + ResourceName: "google_artifact_registry_repository.rr-test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccArtifactRegistryRepository_remote(repositoryID, "docker hub"), + }, + { + ResourceName: "google_artifact_registry_repository.rr-test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccArtifactRegistryRepository_virtual(repositoryID string, upstreamRepositoryID string, two_policies bool) string { + policy_a := ` + upstream_policies { + id = "upstream-a" + repository = google_artifact_registry_repository.test-a.id + priority = 1 + } +` + policy_b := ` + upstream_policies { + id = "upstream-b" + repository = google_artifact_registry_repository.test-b.id + priority = 2 + } +` + if !two_policies { + policy_b = "" + } + + return fmt.Sprintf(` +resource "google_artifact_registry_repository" "test-a" { + repository_id = "%s-a" + location = "us-central1" + description = "upstream repo" + format = "DOCKER" +} +resource "google_artifact_registry_repository" "test-b" { + repository_id = "%s-b" + location = "us-central1" + description = "alt upstream repo" + format = "DOCKER" +} +resource "google_artifact_registry_repository" "vr-test" { + repository_id = "%s" + location = "us-central1" + description = "virtual repo" + format = "DOCKER" + mode = "VIRTUAL_REPOSITORY" + + virtual_repository_config { +%s +%s + } +} +`,upstreamRepositoryID, upstreamRepositoryID, repositoryID, policy_a, policy_b) +} + +func testAccArtifactRegistryRepository_remote(repositoryID string, remoteDescription string) string { + return fmt.Sprintf(` +resource "google_artifact_registry_repository" "rr-test" { + repository_id = "%s" + location = "us-central1" + description = "remote repo" + format = "DOCKER" + mode = "REMOTE_REPOSITORY" + + remote_repository_config { + description = "%s" + docker_repository { + public_repository = "DOCKER_HUB" + } + } +} +`, repositoryID, remoteDescription) +} + +{{ end }} + diff --git a/mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server.go.tmpl b/mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server.go.tmpl new file mode 100644 index 000000000000..d2eef4e2eb3e --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server.go.tmpl @@ -0,0 +1,104 @@ +package backupdr +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "strings" +) + +func DataSourceGoogleCloudBackupDRService() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceBackupDRManagementServer().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + + return &schema.Resource{ + Read: dataSourceGoogleCloudBackupDRServiceRead, + Schema: dsSchema, + } +} +func flattenBackupDRManagementServerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenBackupDRManagementServerResourceResp(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) map[string]interface{} { + if v == nil { + fmt.Printf("Interface is nil: %s", v) + } + fmt.Printf("Interface is : %s", v) + l := v.([]interface{}) + for _, raw := range l { + // Management server is a singleton resource. It is only present in one location per project. Hence returning only resource present. + return flattenBackupDRManagementServerResource(raw, d, config) + } + return nil +} +func flattenBackupDRManagementServerResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) map[string]interface{} { + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = flattenBackupDRManagementServerType(original["type"], d, config) + transformed["networks"] = flattenBackupDRManagementServerNetworks(original["networks"], d, config) + transformed["oauth2ClientId"] = flattenBackupDRManagementServerOauth2ClientId(original["oauth2ClientId"], d, config) + transformed["managementUri"] = flattenBackupDRManagementServerManagementUri(original["managementUri"], d, config) + transformed["name"] = flattenBackupDRManagementServerName(original["name"], d, config) + return transformed +} + +func dataSourceGoogleCloudBackupDRServiceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + billingProject := project + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}BackupDRBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/managementServers") + if err != nil { + return err + } + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + resourceResponse := flattenBackupDRManagementServerResourceResp(res["managementServers"], d, config) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + + if err := d.Set("type", resourceResponse["type"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + if err := d.Set("networks", resourceResponse["networks"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + if err := d.Set("oauth2_client_id", resourceResponse["oauth2ClientId"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + if err := d.Set("management_uri", resourceResponse["managementUri"]); err != nil { + return fmt.Errorf("Error reading ManagementServer: %s", err) + } + + id := fmt.Sprintf("%s", resourceResponse["name"]) + d.SetId(id) + name := id[strings.LastIndex(id, "/")+1:] + d.Set("name", name) + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server_test.go.tmpl b/mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server_test.go.tmpl new file mode 100644 index 000000000000..722300dd9262 --- /dev/null +++ b/mmv1/third_party/terraform/services/backupdr/go/data_source_backup_dr_management_server_test.go.tmpl @@ -0,0 +1,101 @@ +package backupdr_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "fmt" + "strings" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +) + +func TestAccDataSourceGoogleBackupDRManagementServer_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "backupdr-managementserver-basic"), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBackupDRManagementServerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleBackupDRManagementServer_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_backup_dr_management_server.foo", "google_backup_dr_management_server.foo"), + ), + }, + }, + }) +} + +func testAccCheckBackupDRManagementServerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_backup_dr_management_server" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}BackupDRBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/managementServers/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("BackupDRManagementServer still exists at %s", url) + } + } + + return nil + } +} + + +func testAccDataSourceGoogleBackupDRManagementServer_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_network" "default" { + name = "%{network_name}" +} + +resource "google_backup_dr_management_server" "foo" { + location = "us-central1" + name = "tf-test-management-server%{random_suffix}" + type = "BACKUP_RESTORE" + networks { + network = data.google_compute_network.default.id + peering_mode = "PRIVATE_SERVICE_ACCESS" + } +} + +data "google_backup_dr_management_server" "foo" { + location = "us-central1" + depends_on = [ google_backup_dr_management_server.foo ] +} +`, context) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl new file mode 100644 index 000000000000..e8471dd74584 --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_dataset_test.go.tmpl @@ -0,0 +1,867 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package bigquery_test + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "google.golang.org/api/bigquery/v2" +) + +func TestAccBigQueryDataset_basic(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset_withoutLabels(datasetID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), + resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), + ), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBigQueryDataset(datasetID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), + ), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + // The labels field in the state is decided by the configuration. + // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetUpdated(datasetID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "bar"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "7200000"), + + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "bar"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "7200000"), + ), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetUpdated2(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDataset_withoutLabels(datasetID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), + resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), + ), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBigQueryDataset_withComputedLabels(t *testing.T) { + // Skip it in VCR test because of the randomness of uuid in "labels" field + // which causes the replaying mode after recording mode failing in VCR test + acctest.SkipIfVcr(t) + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + }, + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), + ), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + // The labels field in the state is decided by the configuration. + // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetUpdated_withComputedLabels(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataset_withProvider5(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + oldVersion := map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.75.0", // a version that doesn't separate user defined labels and system labels + Source: "registry.terraform.io/hashicorp/google", + }, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset_withoutLabels(datasetID), + ExternalProviders: oldVersion, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "labels.%"), + resource.TestCheckNoResourceAttr("google_bigquery_dataset.test", "effective_labels.%"), + ), + }, + { + Config: testAccBigQueryDataset(datasetID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.env", "foo"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "labels.default_table_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.env", "foo"), + resource.TestCheckResourceAttr("google_bigquery_dataset.test", "effective_labels.default_table_expiration_ms", "3600000"), + ), + }, + }, + }) +} + +func TestAccBigQueryDataset_withOutOfBandLabels(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + Check: addOutOfBandLabels(t, datasetID), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetUpdated(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetUpdated_withOutOfBandLabels(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataset_datasetWithContents(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDatasetDeleteContents(datasetID), + Check: testAccAddTable(t, datasetID, tableID), + }, + { + ResourceName: "google_bigquery_dataset.contents_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"delete_contents_on_destroy", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataset_access(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_access_%s", acctest.RandString(t, 10)) + otherDatasetID := fmt.Sprintf("tf_test_other_%s", acctest.RandString(t, 10)) + otherTableID := fmt.Sprintf("tf_test_other_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDatasetWithOneAccess(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.access_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetWithThreeAccess(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.access_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetWithOneAccess(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.access_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDatasetWithViewAccess(datasetID, otherDatasetID, otherTableID), + }, + { + ResourceName: "google_bigquery_dataset.access_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataset_regionalLocation(t *testing.T) { + t.Parallel() + + datasetID1 := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryRegionalDataset(datasetID1, "asia-south1"), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataset_cmek(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKeyInLocation(t, "us") + pid := envvar.GetTestProjectFromEnv() + datasetID1 := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset_cmek(pid, datasetID1, kms.CryptoKey.Name), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBigQueryDataset_storageBillModel(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDatasetStorageBillingModel(datasetID), + }, + { + ResourceName: "google_bigquery_dataset.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataset_invalidCharacterInID(t *testing.T) { + t.Parallel() + // Not an acceptance test. + acctest.SkipIfVcr(t) + + datasetID := fmt.Sprintf("tf_test_%s-with-hyphens", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + ExpectError: regexp.MustCompile("must contain only letters.+numbers.+or underscores.+"), + }, + }, + }) +} + +func TestAccBigQueryDataset_invalidLongID(t *testing.T) { + t.Parallel() + // Not an acceptance test. + acctest.SkipIfVcr(t) + + datasetSuffix := acctest.RandString(t, 10) + datasetID := fmt.Sprintf("tf_test_%s", strings.Repeat(datasetSuffix, 200)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset(datasetID), + ExpectError: regexp.MustCompile(".+cannot be greater than 1,024 characters"), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccBigQueryDataset_bigqueryDatasetResourceTags_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckBigQueryDatasetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context), + }, + { + ResourceName: "google_bigquery_dataset.dataset", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context), + }, + { + ResourceName: "google_bigquery_dataset.dataset", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +{{ end }} +func testAccAddTable(t *testing.T, datasetID string, tableID string) resource.TestCheckFunc { + // Not actually a check, but adds a table independently of terraform + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + table := &bigquery.Table{ + TableReference: &bigquery.TableReference{ + DatasetId: datasetID, + TableId: tableID, + ProjectId: config.Project, + }, + } + _, err := config.NewBigQueryClient(config.UserAgent).Tables.Insert(config.Project, datasetID, table).Do() + if err != nil { + return fmt.Errorf("Could not create table") + } + return nil + } +} + +func addOutOfBandLabels(t *testing.T, datasetID string) resource.TestCheckFunc { + // Not actually a check, but adds labels independently of terraform + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + dataset, err := config.NewBigQueryClient(config.UserAgent).Datasets.Get(config.Project, datasetID).Do() + if err != nil { + return fmt.Errorf("Could not get dataset with ID %s", datasetID) + } + + dataset.Labels["outband_key"] = "test" + _, err = config.NewBigQueryClient(config.UserAgent).Datasets.Patch(config.Project, datasetID, dataset).Do() + if err != nil { + return fmt.Errorf("Could not update labele for the dataset") + } + return nil + } +} + +func testAccBigQueryDataset_withoutLabels(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_partition_expiration_ms = 3600000 + default_table_expiration_ms = 3600000 +} +`, datasetID) +} + +func testAccBigQueryDataset(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_partition_expiration_ms = 3600000 + default_table_expiration_ms = 3600000 + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, datasetID) +} + +func testAccBigQueryDatasetUpdated(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_partition_expiration_ms = 7200000 + default_table_expiration_ms = 7200000 + + labels = { + env = "bar" + default_table_expiration_ms = 7200000 + } +} +`, datasetID) +} + +func testAccBigQueryDatasetUpdated_withOutOfBandLabels(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_partition_expiration_ms = 7200000 + default_table_expiration_ms = 7200000 + + labels = { + env = "bar" + default_table_expiration_ms = 7200000 + outband_key = "test-update" + } +} +`, datasetID) +} + +func testAccBigQueryDatasetUpdated2(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + # friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_partition_expiration_ms = 7200000 + default_table_expiration_ms = 7200000 + + labels = { + env = "bar" + default_table_expiration_ms = 7200000 + } +} +`, datasetID) +} + +func testAccBigQueryDatasetUpdated_withComputedLabels(datasetID string) string { + return fmt.Sprintf(` +resource "random_uuid" "test" { +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + # friendly_name = "bar" + description = "This is a bar description" + location = "EU" + default_partition_expiration_ms = 7200000 + default_table_expiration_ms = 7200000 + + labels = { + env = "${random_uuid.test.result}" + default_table_expiration_ms = 7200000 + } +} +`, datasetID) +} + +func testAccBigQueryDatasetDeleteContents(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "contents_test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_partition_expiration_ms = 3600000 + default_table_expiration_ms = 3600000 + delete_contents_on_destroy = true + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, datasetID) +} + +func testAccBigQueryRegionalDataset(datasetID string, location string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "%s" + default_table_expiration_ms = 3600000 + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, datasetID, location) +} + +func testAccBigQueryDatasetWithOneAccess(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "access_test" { + dataset_id = "%s" + + access { + role = "OWNER" + user_by_email = "Joe@example.com" + } + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, datasetID) +} + +func testAccBigQueryDatasetWithThreeAccess(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "access_test" { + dataset_id = "%s" + + access { + role = "OWNER" + user_by_email = "Joe@example.com" + } + access { + role = "READER" + domain = "hashicorp.com" + } + access { + role = "READER" + iam_member = "allUsers" + } + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, datasetID) +} + +func testAccBigQueryDatasetWithViewAccess(datasetID, otherDatasetID, otherTableID string) string { + // Note that we have to add a non-view access to prevent BQ from creating 4 default + // access entries. + return fmt.Sprintf(` +resource "google_bigquery_dataset" "other_dataset" { + dataset_id = "%s" +} + +resource "google_bigquery_table" "table_with_view" { + deletion_protection = false + table_id = "%s" + dataset_id = google_bigquery_dataset.other_dataset.dataset_id + + time_partitioning { + type = "DAY" + } + + view { + query = "SELECT state FROM [lookerdata:cdc.project_tycho_reports]" + use_legacy_sql = true + } +} + +resource "google_bigquery_dataset" "access_test" { + dataset_id = "%s" + + access { + role = "OWNER" + user_by_email = "Joe@example.com" + } + access { + view { + project_id = google_bigquery_dataset.other_dataset.project + dataset_id = google_bigquery_dataset.other_dataset.dataset_id + table_id = google_bigquery_table.table_with_view.table_id + } + } + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, otherDatasetID, otherTableID, datasetID) +} + +func testAccBigQueryDataset_cmek(pid, datasetID, kmsKey string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_kms_crypto_key_iam_member" "kms-member" { + crypto_key_id = "%s" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:bq-${data.google_project.project.number}@bigquery-encryption.iam.gserviceaccount.com" +} + +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "test" + description = "This is a test description" + location = "US" + default_table_expiration_ms = 3600000 + + default_encryption_configuration { + kms_key_name = "%s" + } + + depends_on = [google_kms_crypto_key_iam_member.kms-member] +} +`, pid, kmsKey, datasetID, kmsKey) +} + +func testAccBigQueryDatasetStorageBillingModel(datasetID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" + friendly_name = "foo" + description = "This is a foo description" + location = "EU" + default_partition_expiration_ms = 3600000 + default_table_expiration_ms = 3600000 + storage_billing_model = "PHYSICAL" + + labels = { + env = "foo" + default_table_expiration_ms = 3600000 + } +} +`, datasetID) +} +{{- if ne $.TargetVersionName "ga" }} + +func testAccBigQueryDataset_bigqueryDatasetResourceTags_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = "google-beta" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" +} + +resource "google_tags_tag_key" "tag_key2" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key2%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value2" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" + short_name = "tf_test_tag_value2%{random_suffix}" +} + +resource "google_bigquery_dataset" "dataset" { + provider = google-beta + + dataset_id = "dataset%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + + resource_tags = { + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key1.short_name}" = "${google_tags_tag_value.tag_value1.short_name}" + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key2.short_name}" = "${google_tags_tag_value.tag_value2.short_name}" + } +} +`, context) +} + +func testAccBigQueryDataset_bigqueryDatasetResourceTags_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = "google-beta" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" +} + +resource "google_tags_tag_key" "tag_key2" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key2%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value2" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" + short_name = "tf_test_tag_value2%{random_suffix}" +} + +resource "google_bigquery_dataset" "dataset" { + provider = google-beta + + dataset_id = "dataset%{random_suffix}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + + resource_tags = { + } +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl new file mode 100644 index 000000000000..b1b80240c433 --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table.go.tmpl @@ -0,0 +1,2961 @@ +package bigquery + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/bigquery/v2" +) + +func bigQueryTableSortArrayByName(array []interface{}) { + sort.Slice(array, func(i, k int) bool { + return array[i].(map[string]interface{})["name"].(string) < array[k].(map[string]interface{})["name"].(string) + }) +} + +func bigQueryArrayToMapIndexedByName(array []interface{}) map[string]interface{} { + out := map[string]interface{}{} + for _, v := range array { + name := v.(map[string]interface{})["name"].(string) + out[name] = v + } + return out +} + +func bigQueryTablecheckNameExists(jsonList []interface{}) error { + for _, m := range jsonList { + if _, ok := m.(map[string]interface{})["name"]; !ok { + return fmt.Errorf("No name in schema %+v", m) + } + } + + return nil +} + +// Compares two json's while optionally taking in a compareMapKeyVal function. +// This function will override any comparison of a given map[string]interface{} +// on a specific key value allowing for a separate equality in specific scenarios +func jsonCompareWithMapKeyOverride(key string, a, b interface{}, compareMapKeyVal func(key string, val1, val2 map[string]interface{}) bool) (bool, error) { + switch a.(type) { + case []interface{}: + arrayA := a.([]interface{}) + arrayB, ok := b.([]interface{}) + if !ok { + return false, nil + } else if len(arrayA) != len(arrayB) { + return false, nil + } + + // Sort fields by name so reordering them doesn't cause a diff. + if key == "schema" || key == "fields" { + if err := bigQueryTablecheckNameExists(arrayA); err != nil { + return false, err + } + bigQueryTableSortArrayByName(arrayA) + if err := bigQueryTablecheckNameExists(arrayB); err != nil { + return false, err + } + bigQueryTableSortArrayByName(arrayB) + } + for i := range arrayA { + eq, err := jsonCompareWithMapKeyOverride(strconv.Itoa(i), arrayA[i], arrayB[i], compareMapKeyVal) + if err != nil { + return false, err + } else if !eq { + return false, nil + } + } + return true, nil + case map[string]interface{}: + objectA := a.(map[string]interface{}) + objectB, ok := b.(map[string]interface{}) + if !ok { + return false, nil + } + + var unionOfKeys map[string]bool = make(map[string]bool) + for subKey := range objectA { + unionOfKeys[subKey] = true + } + for subKey := range objectB { + unionOfKeys[subKey] = true + } + + for subKey := range unionOfKeys { + eq := compareMapKeyVal(subKey, objectA, objectB) + if !eq { + valA, ok1 := objectA[subKey] + valB, ok2 := objectB[subKey] + if !ok1 || !ok2 { + return false, nil + } + eq, err := jsonCompareWithMapKeyOverride(subKey, valA, valB, compareMapKeyVal) + if err != nil || !eq { + return false, err + } + } + } + return true, nil + case string, float64, bool, nil: + return a == b, nil + default: + log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") + return false, errors.New("unable to compare values") + } +} + +// checks if the value is within the array, only works for generics +// because objects and arrays will take the reference comparison +func valueIsInArray(value interface{}, array []interface{}) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +func bigQueryTableMapKeyOverride(key string, objectA, objectB map[string]interface{}) bool { + // we rely on the fallback to nil if the object does not have the key + valA := objectA[key] + valB := objectB[key] + switch key { + case "mode": + eq := bigQueryTableNormalizeMode(valA) == bigQueryTableNormalizeMode(valB) + return eq + case "description": + equivalentSet := []interface{}{nil, ""} + eq := valueIsInArray(valA, equivalentSet) && valueIsInArray(valB, equivalentSet) + return eq + case "type": + if valA == nil || valB == nil { + return false + } + return bigQueryTableTypeEq(valA.(string), valB.(string)) + case "policyTags": + eq := bigQueryTableNormalizePolicyTags(valA) == nil && bigQueryTableNormalizePolicyTags(valB) == nil + return eq + } + + // otherwise rely on default behavior + return false +} + +// Compare the JSON strings are equal +func bigQueryTableSchemaDiffSuppress(name, old, new string, _ *schema.ResourceData) bool { + // The API can return an empty schema which gets encoded to "null" during read. + if old == "null" { + old = "[]" + } + var a, b interface{} + if err := json.Unmarshal([]byte(old), &a); err != nil { + log.Printf("[DEBUG] unable to unmarshal old json - %v", err) + } + if err := json.Unmarshal([]byte(new), &b); err != nil { + log.Printf("[DEBUG] unable to unmarshal new json - %v", err) + } + + eq, err := jsonCompareWithMapKeyOverride(name, a, b, bigQueryTableMapKeyOverride) + if err != nil { + log.Printf("[DEBUG] %v", err) + log.Printf("[DEBUG] Error comparing JSON: %v, %v", old, new) + } + + return eq +} + +func bigQueryTableConnectionIdSuppress(name, old, new string, _ *schema.ResourceData) bool { + // API accepts connectionId in below two formats + // "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or + // "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}". + // but always returns "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" + + if tpgresource.IsEmptyValue(reflect.ValueOf(old)) || tpgresource.IsEmptyValue(reflect.ValueOf(new)) { + return false + } + + // Old is in the dot format, and new is in the slash format. + // They represent the same connection if the project, locaition, and IDs are + // the same. + // Location should use a case-insenstive comparison. + dotRe := regexp.MustCompile(`(.+)\.(.+)\.(.+)`) + slashRe := regexp.MustCompile("projects/(.+)/(?:locations|regions)/(.+)/connections/(.+)") + dotMatches := dotRe.FindStringSubmatch(old) + slashMatches := slashRe.FindStringSubmatch(new) + if dotMatches != nil && slashMatches != nil { + sameProject := dotMatches[1] == slashMatches[1] + sameLocation := strings.EqualFold(dotMatches[2], slashMatches[2]) + sameId := dotMatches[3] == slashMatches[3] + return sameProject && sameLocation && sameId + } + + return false +} + +func bigQueryTableTypeEq(old, new string) bool { + // Do case-insensitive comparison. https://github.com/hashicorp/terraform-provider-google/issues/9472 + oldUpper := strings.ToUpper(old) + newUpper := strings.ToUpper(new) + + equivalentSet1 := []interface{}{"INTEGER", "INT64"} + equivalentSet2 := []interface{}{"FLOAT", "FLOAT64"} + equivalentSet3 := []interface{}{"BOOLEAN", "BOOL"} + eq0 := oldUpper == newUpper + eq1 := valueIsInArray(oldUpper, equivalentSet1) && valueIsInArray(newUpper, equivalentSet1) + eq2 := valueIsInArray(oldUpper, equivalentSet2) && valueIsInArray(newUpper, equivalentSet2) + eq3 := valueIsInArray(oldUpper, equivalentSet3) && valueIsInArray(newUpper, equivalentSet3) + eq := eq0 || eq1 || eq2 || eq3 + return eq +} + +func bigQueryTableNormalizeMode(mode interface{}) string { + if mode == nil { + return "NULLABLE" + } + // Upper-case to get case-insensitive comparisons. https://github.com/hashicorp/terraform-provider-google/issues/9472 + return strings.ToUpper(mode.(string)) +} + +func bigQueryTableModeIsForceNew(old, new string) bool { + eq := old == new + reqToNull := old == "REQUIRED" && new == "NULLABLE" + return !eq && !reqToNull +} + +func bigQueryTableNormalizePolicyTags(val interface{}) interface{} { + if val == nil { + return nil + } + if policyTags, ok := val.(map[string]interface{}); ok { + // policyTags = {} is same as nil. + if len(policyTags) == 0 { + return nil + } + // policyTags = {names = []} is same as nil. + if names, ok := policyTags["names"].([]interface{}); ok && len(names) == 0 { + return nil + } + } + return val +} + +// Compares two existing schema implementations and decides if +// it is changeable.. pairs with a force new on not changeable +func resourceBigQueryTableSchemaIsChangeable(old, new interface{}, isExternalTable bool, topLevel bool) (bool, error) { + switch old.(type) { + case []interface{}: + arrayOld := old.([]interface{}) + arrayNew, ok := new.([]interface{}) + sameNameColumns := 0 + droppedColumns := 0 + if !ok { + // if not both arrays not changeable + return false, nil + } + if err := bigQueryTablecheckNameExists(arrayOld); err != nil { + return false, err + } + mapOld := bigQueryArrayToMapIndexedByName(arrayOld) + if err := bigQueryTablecheckNameExists(arrayNew); err != nil { + return false, err + } + mapNew := bigQueryArrayToMapIndexedByName(arrayNew) + for key := range mapNew { + // making unchangeable if an newly added column is with REQUIRED mode + if _, ok := mapOld[key]; !ok { + items := mapNew[key].(map[string]interface{}) + for k := range items { + if k == "mode" && fmt.Sprintf("%v", items[k]) == "REQUIRED" { + return false, nil + } + } + } + } + for key := range mapOld { + // dropping top level columns can happen in-place + // but this doesn't apply to external tables + if _, ok := mapNew[key]; !ok { + if !topLevel || isExternalTable { + return false, nil + } + droppedColumns += 1 + continue + } + + isChangable, err := resourceBigQueryTableSchemaIsChangeable(mapOld[key], mapNew[key], isExternalTable, false) + if err != nil || !isChangable { + return false, err + } else if isChangable && topLevel { + // top level column that exists in the new schema + sameNameColumns += 1 + } + } + // in-place column dropping alongside column additions is not allowed + // as of now because user intention can be ambiguous (e.g. column renaming) + newColumns := len(arrayNew) - sameNameColumns + return (droppedColumns == 0) || (newColumns == 0), nil + case map[string]interface{}: + objectOld := old.(map[string]interface{}) + objectNew, ok := new.(map[string]interface{}) + if !ok { + // if both aren't objects + return false, nil + } + var unionOfKeys map[string]bool = make(map[string]bool) + for key := range objectOld { + unionOfKeys[key] = true + } + for key := range objectNew { + unionOfKeys[key] = true + } + for key := range unionOfKeys { + valOld := objectOld[key] + valNew := objectNew[key] + switch key { + case "name": + if valOld != valNew { + return false, nil + } + case "type": + if valOld == nil || valNew == nil { + // This is invalid, so it shouldn't require a ForceNew + return true, nil + } + if !bigQueryTableTypeEq(valOld.(string), valNew.(string)) { + return false, nil + } + case "mode": + if bigQueryTableModeIsForceNew( + bigQueryTableNormalizeMode(valOld), + bigQueryTableNormalizeMode(valNew), + ) { + return false, nil + } + case "fields": + return resourceBigQueryTableSchemaIsChangeable(valOld, valNew, isExternalTable, false) + + // other parameters: description, policyTags and + // policyTags.names[] are changeable + } + } + return true, nil + case string, float64, bool, nil: + // realistically this shouldn't hit + log.Printf("[DEBUG] comparison of generics hit... not expected") + return old == new, nil + default: + log.Printf("[DEBUG] tried to iterate through json but encountered a non native type to json deserialization... please ensure you are passing a json object from json.Unmarshall") + return false, errors.New("unable to compare values") + } +} + +func resourceBigQueryTableSchemaCustomizeDiffFunc(d tpgresource.TerraformResourceDiff) error { + if _, hasSchema := d.GetOk("schema"); hasSchema { + oldSchema, newSchema := d.GetChange("schema") + oldSchemaText := oldSchema.(string) + newSchemaText := newSchema.(string) + if oldSchemaText == "null" { + // The API can return an empty schema which gets encoded to "null" during read. + oldSchemaText = "[]" + } + if newSchemaText == "null" { + newSchemaText = "[]" + } + var old, new interface{} + if err := json.Unmarshal([]byte(oldSchemaText), &old); err != nil { + // don't return error, its possible we are going from no schema to schema + // this case will be cover on the conparision regardless. + log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) + } + if err := json.Unmarshal([]byte(newSchemaText), &new); err != nil { + // same as above + log.Printf("[DEBUG] unable to unmarshal json customized diff - %v", err) + } + _, isExternalTable := d.GetOk("external_data_configuration") + isChangeable, err := resourceBigQueryTableSchemaIsChangeable(old, new, isExternalTable, true) + if err != nil { + return err + } + if !isChangeable { + if err := d.ForceNew("schema"); err != nil { + return err + } + } + return nil + } + return nil +} + +func resourceBigQueryTableSchemaCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + return resourceBigQueryTableSchemaCustomizeDiffFunc(d) +} + +func validateBigQueryTableSchema(v interface{}, k string) (warnings []string, errs []error) { + if v == nil { + return + } + + if _, e := validation.StringIsJSON(v, k); e != nil { + errs = append(errs, e...) + return + } + + var jsonList []interface{} + if err := json.Unmarshal([]byte(v.(string)), &jsonList); err != nil { + errs = append(errs, fmt.Errorf("\"schema\" is not a JSON array: %s", err)) + return + } + + for _, v := range jsonList { + if v == nil { + errs = append(errs, errors.New("\"schema\" contains a nil element")) + return + } + } + + return +} + +func ResourceBigQueryTable() *schema.Resource { + return &schema.Resource{ + Create: resourceBigQueryTableCreate, + Read: resourceBigQueryTableRead, + Delete: resourceBigQueryTableDelete, + Update: resourceBigQueryTableUpdate, + Importer: &schema.ResourceImporter{ + State: resourceBigQueryTableImport, + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + resourceBigQueryTableSchemaCustomizeDiff, + tpgresource.SetLabelsDiff, + ), + Schema: map[string]*schema.Schema{ + // TableId: [Required] The ID of the table. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. + "table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A unique ID for the resource. Changing this forces a new resource to be created.`, + }, + + // DatasetId: [Required] The ID of the dataset containing this table. + "dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The dataset ID to create the table in. Changing this forces a new resource to be created.`, + }, + + // ProjectId: [Required] The ID of the project containing this table. + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs.`, + }, + + // Description: [Optional] A user-friendly description of this table. + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The field description.`, + }, + + // ExpirationTime: [Optional] The time when this table expires, in + // milliseconds since the epoch. If not present, the table will persist + // indefinitely. Expired tables will be deleted and their storage + // reclaimed. + "expiration_time": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: `The time when this table expires, in milliseconds since the epoch. If not present, the table will persist indefinitely. Expired tables will be deleted and their storage reclaimed.`, + }, + + // ExternalDataConfiguration [Optional] Describes the data format, + // location, and other properties of a table stored outside of BigQuery. + // By defining these properties, the data source can then be queried as + // if it were a standard BigQuery table. + "external_data_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Describes the data format, location, and other properties of a table stored outside of BigQuery. By defining these properties, the data source can then be queried as if it were a standard BigQuery table.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Autodetect : [Required] If true, let BigQuery try to autodetect the + // schema and format of the table. + "autodetect": { + Type: schema.TypeBool, + Required: true, + Description: `Let BigQuery try to autodetect the schema and format of the table.`, + }, + // SourceFormat [Required] The data format. + "source_format": { + Type: schema.TypeString, + Optional: true, + Description: `Please see sourceFormat under ExternalDataConfiguration in Bigquery's public API documentation (https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externaldataconfiguration) for supported formats. To use "GOOGLE_SHEETS" the scopes must include "googleapis.com/auth/drive.readonly".`, + ValidateFunc: validation.StringInSlice([]string{ + "CSV", "GOOGLE_SHEETS", "NEWLINE_DELIMITED_JSON", "AVRO", "ICEBERG", "DATASTORE_BACKUP", "PARQUET", "ORC", "BIGTABLE", + }, false), + }, + // SourceURIs [Required] The fully-qualified URIs that point to your data in Google Cloud. + "source_uris": { + Type: schema.TypeList, + Required: true, + Description: `A list of the fully-qualified URIs that point to your data in Google Cloud.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + // FileSetSpecType: [Optional] Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems. + "file_set_spec_type": { + Type: schema.TypeString, + Optional: true, + Description: `Specifies how source URIs are interpreted for constructing the file set to load. By default source URIs are expanded against the underlying storage. Other options include specifying manifest files. Only applicable to object storage systems.`, + }, + // Compression: [Optional] The compression type of the data source. + "compression": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "GZIP"}, false), + Default: "NONE", + Description: `The compression type of the data source. Valid values are "NONE" or "GZIP".`, + }, + // Schema: [Optional] The schema for the data. + // Schema is required for CSV and JSON formats if autodetect is not on. + // Schema is disallowed for Google Cloud Bigtable, Cloud Datastore backups, Avro, Iceberg, ORC, and Parquet formats. + "schema": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validateBigQueryTableSchema, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + Description: `A JSON schema for the external table. Schema is required for CSV and JSON formats and is disallowed for Google Cloud Bigtable, Cloud Datastore backups, and Avro formats when using external tables.`, + }, + // CsvOptions: [Optional] Additional properties to set if + // sourceFormat is set to CSV. + "csv_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional properties to set if source_format is set to "CSV".`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Quote: [Required] The value that is used to quote data + // sections in a CSV file. + "quote": { + Type: schema.TypeString, + Required: true, + Description: `The value that is used to quote data sections in a CSV file. If your data does not contain quoted sections, set the property value to an empty string. If your data contains quoted newline characters, you must also set the allow_quoted_newlines property to true. The API-side default is ", specified in Terraform escaped as \". Due to limitations with Terraform default values, this value is required to be explicitly set.`, + }, + // AllowJaggedRows: [Optional] Indicates if BigQuery should + // accept rows that are missing trailing optional columns. + "allow_jagged_rows": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Indicates if BigQuery should accept rows that are missing trailing optional columns.`, + }, + // AllowQuotedNewlines: [Optional] Indicates if BigQuery + // should allow quoted data sections that contain newline + // characters in a CSV file. The default value is false. + "allow_quoted_newlines": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. The default value is false.`, + }, + // Encoding: [Optional] The character encoding of the data. + // The supported values are UTF-8 or ISO-8859-1. + "encoding": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"ISO-8859-1", "UTF-8"}, false), + Default: "UTF-8", + Description: `The character encoding of the data. The supported values are UTF-8 or ISO-8859-1.`, + }, + // FieldDelimiter: [Optional] The separator for fields in a CSV file. + "field_delimiter": { + Type: schema.TypeString, + Optional: true, + Default: ",", + Description: `The separator for fields in a CSV file.`, + }, + // SkipLeadingRows: [Optional] The number of rows at the top + // of a CSV file that BigQuery will skip when reading the data. + "skip_leading_rows": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: `The number of rows at the top of a CSV file that BigQuery will skip when reading the data.`, + }, + }, + }, + }, + // jsonOptions: [Optional] Additional properties to set if sourceFormat is set to JSON. + "json_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional properties to set if sourceFormat is set to JSON.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "encoding": { + Type: schema.TypeString, + Optional: true, + Default: "UTF-8", + ValidateFunc: validation.StringInSlice([]string{"UTF-8", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE"}, false), + Description: `The character encoding of the data. The supported values are UTF-8, UTF-16BE, UTF-16LE, UTF-32BE, and UTF-32LE. The default value is UTF-8.`, + }, + }, + }, + }, + + "json_extension": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"GEOJSON"}, false), + Description: `Load option to be used together with sourceFormat newline-delimited JSON to indicate that a variant of JSON is being loaded. To load newline-delimited GeoJSON, specify GEOJSON (and sourceFormat must be set to NEWLINE_DELIMITED_JSON).`, + }, + + "bigtable_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional options if sourceFormat is set to BIGTABLE.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column_family": { + Type: schema.TypeList, + Optional: true, + Description: `A list of column families to expose in the table schema along with their types. This list restricts the column families that can be referenced in queries and specifies their value types. You can use this list to do type conversions - see the 'type' field for more details. If you leave this list empty, all column families are present in the table schema and their values are read as BYTES. During a query only the column families referenced in that query are read from Bigtable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "column": { + Type: schema.TypeList, + Optional: true, + Description: `A List of columns that should be exposed as individual fields as opposed to a list of (column name, value) pairs. All columns whose qualifier matches a qualifier in this list can be accessed as Other columns can be accessed as a list through column field`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "qualifier_encoded": { + Type: schema.TypeString, + Optional: true, + Description: `Qualifier of the column. Columns in the parent column family that has this exact qualifier are exposed as . field. If the qualifier is valid UTF-8 string, it can be specified in the qualifierString field. Otherwise, a base-64 encoded value must be set to qualifierEncoded. The column field name is the same as the column qualifier. However, if the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as fieldName.`, + }, + "qualifier_string": { + Type: schema.TypeString, + Optional: true, + Description: `Qualifier string.`, + }, + "field_name": { + Type: schema.TypeString, + Optional: true, + Description: `If the qualifier is not a valid BigQuery field identifier i.e. does not match [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field name and is used as field name in queries.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The type to convert the value in cells of this column. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON", Default type is "BYTES". 'type' can also be set at the column family level. However, the setting at this level takes precedence if 'type' is set at both levels.`, + }, + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. 'encoding' can also be set at the column family level. However, the setting at this level takes precedence if 'encoding' is set at both levels.`, + }, + "only_read_latest": { + Type: schema.TypeBool, + Optional: true, + Description: `If this is set, only the latest version of value in this column are exposed. 'onlyReadLatest' can also be set at the column family level. However, the setting at this level takes precedence if 'onlyReadLatest' is set at both levels.`, + }, + }, + }, + }, + "family_id": { + Type: schema.TypeString, + Optional: true, + Description: `Identifier of the column family.`, + }, + "type": { + Type: schema.TypeString, + Optional: true, + Description: `The type to convert the value in cells of this column family. The values are expected to be encoded using HBase Bytes.toBytes function when using the BINARY encoding value. Following BigQuery types are allowed (case-sensitive): "BYTES", "STRING", "INTEGER", "FLOAT", "BOOLEAN", "JSON". Default type is BYTES. This can be overridden for a specific column by listing that column in 'columns' and specifying a type for it.`, + }, + "encoding": { + Type: schema.TypeString, + Optional: true, + Description: `The encoding of the values when the type is not STRING. Acceptable encoding values are: TEXT - indicates values are alphanumeric text strings. BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions. This can be overridden for a specific column by listing that column in 'columns' and specifying an encoding for it.`, + }, + "only_read_latest": { + Type: schema.TypeBool, + Optional: true, + Description: `If this is set only the latest version of value are exposed for all columns in this column family. This can be overridden for a specific column by listing that column in 'columns' and specifying a different setting for that column.`, + }, + }, + }, + }, + "ignore_unspecified_column_families": { + Type: schema.TypeBool, + Optional: true, + Description: `If field is true, then the column families that are not specified in columnFamilies list are not exposed in the table schema. Otherwise, they are read with BYTES type values. The default value is false.`, + }, + "read_rowkey_as_string": { + Type: schema.TypeBool, + Optional: true, + Description: `If field is true, then the rowkey column families will be read and converted to string. Otherwise they are read with BYTES type values and users need to manually cast them with CAST if necessary. The default value is false.`, + }, + "output_column_families_as_json": { + Type: schema.TypeBool, + Optional: true, + Description: `If field is true, then each column family will be read as a single JSON column. Otherwise they are read as a repeated cell structure containing timestamp/value tuples. The default value is false.`, + }, + }, + }, + }, + + "parquet_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional properties to set if sourceFormat is set to PARQUET.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enum_as_string": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default.`, + }, + "enable_list_inference": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates whether to use schema inference specifically for Parquet LIST logical type.`, + }, + }, + }, + }, + // GoogleSheetsOptions: [Optional] Additional options if sourceFormat is set to GOOGLE_SHEETS. + "google_sheets_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional options if source_format is set to "GOOGLE_SHEETS".`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Range: [Optional] Range of a sheet to query from. Only used when non-empty. + // Typical format: !: + "range": { + Type: schema.TypeString, + Optional: true, + Description: `Range of a sheet to query from. Only used when non-empty. At least one of range or skip_leading_rows must be set. Typical format: "sheet_name!top_left_cell_id:bottom_right_cell_id" For example: "sheet1!A1:B20`, + AtLeastOneOf: []string{ + "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", + "external_data_configuration.0.google_sheets_options.0.range", + }, + }, + // SkipLeadingRows: [Optional] The number of rows at the top + // of the sheet that BigQuery will skip when reading the data. + "skip_leading_rows": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of rows at the top of the sheet that BigQuery will skip when reading the data. At least one of range or skip_leading_rows must be set.`, + AtLeastOneOf: []string{ + "external_data_configuration.0.google_sheets_options.0.skip_leading_rows", + "external_data_configuration.0.google_sheets_options.0.range", + }, + }, + }, + }, + }, + + // HivePartitioningOptions:: [Optional] Options for configuring hive partitioning detect. + "hive_partitioning_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `When set, configures hive partitioning support. Not all storage formats support hive partitioning -- requesting hive partitioning on an unsupported format will lead to an error, as will providing an invalid specification.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Mode: [Optional] [Experimental] When set, what mode of hive partitioning to use when reading data. + // Two modes are supported. + //* AUTO: automatically infer partition key name(s) and type(s). + //* STRINGS: automatically infer partition key name(s). + "mode": { + Type: schema.TypeString, + Optional: true, + Description: `When set, what mode of hive partitioning to use when reading data.`, + }, + // RequirePartitionFilter: [Optional] If set to true, queries over this table + // require a partition filter that can be used for partition elimination to be + // specified. + "require_partition_filter": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, + }, + // SourceUriPrefix: [Optional] [Experimental] When hive partition detection is requested, a common for all source uris must be required. + // The prefix must end immediately before the partition key encoding begins. + "source_uri_prefix": { + Type: schema.TypeString, + Optional: true, + Description: `When hive partition detection is requested, a common for all source uris must be required. The prefix must end immediately before the partition key encoding begins.`, + }, + }, + }, + }, + // AvroOptions: [Optional] Additional options if sourceFormat is set to AVRO. + "avro_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional options if source_format is set to "AVRO"`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "use_avro_logical_types": { + Type: schema.TypeBool, + Required: true, + Description: `If sourceFormat is set to "AVRO", indicates whether to interpret logical types as the corresponding BigQuery data type (for example, TIMESTAMP), instead of using the raw type (for example, INTEGER).`, + }, + }, + }, + }, + + // IgnoreUnknownValues: [Optional] Indicates if BigQuery should + // allow extra values that are not represented in the table schema. + // If true, the extra values are ignored. If false, records with + // extra columns are treated as bad records, and if there are too + // many bad records, an invalid error is returned in the job result. + // The default value is false. + "ignore_unknown_values": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false.`, + }, + // MaxBadRecords: [Optional] The maximum number of bad records that + // BigQuery can ignore when reading data. + "max_bad_records": { + Type: schema.TypeInt, + Optional: true, + Description: `The maximum number of bad records that BigQuery can ignore when reading data.`, + }, + // ConnectionId: [Optional] The connection specifying the credentials + // to be used to read external storage, such as Azure Blob, + // Cloud Storage, or S3. The connectionId can have the form + // "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or + // "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}". + "connection_id": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: bigQueryTableConnectionIdSuppress, + Description: `The connection specifying the credentials to be used to read external storage, such as Azure Blob, Cloud Storage, or S3. The connectionId can have the form "{{"{{"}}project{{"}}"}}.{{"{{"}}location{{"}}"}}.{{"{{"}}connection_id{{"}}"}}" or "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/connections/{{"{{"}}connection_id{{"}}"}}".`, + }, + "reference_file_schema_uri": { + Type: schema.TypeString, + Optional: true, + Description: `When creating an external table, the user can provide a reference file with the table schema. This is enabled for the following formats: AVRO, PARQUET, ORC.`, + }, + "metadata_cache_mode": { + Type: schema.TypeString, + Optional: true, + Description: `Metadata Cache Mode for the table. Set this to enable caching of metadata from external data source.`, + ValidateFunc: validation.StringInSlice([]string{"AUTOMATIC", "MANUAL"}, false), + }, + "object_metadata": { + Type: schema.TypeString, + Optional: true, + Description: `Object Metadata is used to create Object Tables. Object Tables contain a listing of objects (with their metadata) found at the sourceUris. If ObjectMetadata is set, sourceFormat should be omitted.`, + ConflictsWith: []string{"external_data_configuration.0.source_format"}, + }, + }, + }, + }, + + // FriendlyName: [Optional] A descriptive name for this table. + "friendly_name": { + Type: schema.TypeString, + Optional: true, + Description: `A descriptive name for the table.`, + }, + + // max_staleness: [Optional] The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of sql IntervalValue type. + "max_staleness": { + Type: schema.TypeString, + Optional: true, + Description: `The maximum staleness of data that could be returned when the table (or stale MV) is queried. Staleness encoded as a string encoding of [SQL IntervalValue type](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#interval_type).`, + }, + + // Labels: [Experimental] The labels associated with this table. You can + // use these to organize and group your tables. Label keys and values + // can be no longer than 63 characters, can only contain lowercase + // letters, numeric characters, underscores and dashes. International + // characters are allowed. Label values are optional. Label keys must + // start with a letter and each label in the list must have a different + // key. + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A mapping of labels to assign to the resource. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + // Schema: [Optional] Describes the schema of this table. + "schema": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateBigQueryTableSchema, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, + DiffSuppressFunc: bigQueryTableSchemaDiffSuppress, + Description: `A JSON schema for the table.`, + }, + // View: [Optional] If specified, configures this table as a view. + "view": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `If specified, configures this table as a view.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Query: [Required] A query that BigQuery executes when the view is + // referenced. + "query": { + Type: schema.TypeString, + Required: true, + Description: `A query that BigQuery executes when the view is referenced.`, + }, + + // UseLegacySQL: [Optional] Specifies whether to use BigQuery's + // legacy SQL for this view. The default value is true. If set to + // false, the view will use BigQuery's standard SQL: + "use_legacy_sql": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Specifies whether to use BigQuery's legacy SQL for this view. The default value is true. If set to false, the view will use BigQuery's standard SQL`, + }, + }, + }, + }, + + // Materialized View: [Optional] If specified, configures this table as a materialized view. + "materialized_view": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `If specified, configures this table as a materialized view.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // EnableRefresh: [Optional] Enable automatic refresh of + // the materialized view when the base table is updated. The default + // value is "true". + "enable_refresh": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Specifies if BigQuery should automatically refresh materialized view when the base table is updated. The default is true.`, + }, + + // RefreshIntervalMs: [Optional] The maximum frequency + // at which this materialized view will be refreshed. The default value + // is 1800000 (30 minutes). + "refresh_interval_ms": { + Type: schema.TypeInt, + Default: 1800000, + Optional: true, + Description: `Specifies maximum frequency at which this materialized view will be refreshed. The default is 1800000.`, + }, + + "allow_non_incremental_definition": { + Type: schema.TypeBool, + Default: false, + Optional: true, + ForceNew: true, + Description: `Allow non incremental materialized view definition. The default value is false.`, + }, + + // Query: [Required] A query whose result is persisted + "query": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A query whose result is persisted.`, + }, + }, + }, + }, + + // TimePartitioning: [Experimental] If specified, configures time-based + // partitioning for this table. + "time_partitioning": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `If specified, configures time-based partitioning for this table.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ExpirationMs: [Optional] Number of milliseconds for which to keep the storage for a + // partition. If unspecified when the table is created in a dataset that has + // `defaultPartitionExpirationMs`, it will inherit the value of + // `defaultPartitionExpirationMs` from the dataset. + // To specify a unlimited expiration, set the value to 0. + "expiration_ms": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: `Number of milliseconds for which to keep the storage for a partition.`, + }, + + // Type: [Required] The supported types are DAY, HOUR, MONTH, and YEAR, which will generate + // one partition per day, hour, month, and year, respectively. + "type": { + Type: schema.TypeString, + Required: true, + Description: `The supported types are DAY, HOUR, MONTH, and YEAR, which will generate one partition per day, hour, month, and year, respectively.`, + ValidateFunc: validation.StringInSlice([]string{"DAY", "HOUR", "MONTH", "YEAR"}, false), + }, + + // Field: [Optional] The field used to determine how to create a time-based + // partition. If time-based partitioning is enabled without this value, the + // table is partitioned based on the load time. + "field": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The field used to determine how to create a time-based partition. If time-based partitioning is enabled without this value, the table is partitioned based on the load time.`, + }, + + // RequirePartitionFilter: [Optional] If set to true, queries over this table + // require a partition filter that can be used for partition elimination to be + // specified. + "require_partition_filter": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, + Deprecated: `This field is deprecated and will be removed in a future major release; please use the top level field with the same name instead.`, + ConflictsWith: []string{"require_partition_filter"}, + }, + }, + }, + }, + + // RangePartitioning: [Optional] If specified, configures range-based + // partitioning for this table. + "range_partitioning": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `If specified, configures range-based partitioning for this table.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Field: [Required] The field used to determine how to create a range-based + // partition. + "field": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The field used to determine how to create a range-based partition.`, + }, + + // Range: [Required] Information required to partition based on ranges. + "range": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `Information required to partition based on ranges. Structure is documented below.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Start: [Required] Start of the range partitioning, inclusive. + "start": { + Type: schema.TypeInt, + Required: true, + Description: `Start of the range partitioning, inclusive.`, + }, + + // End: [Required] End of the range partitioning, exclusive. + "end": { + Type: schema.TypeInt, + Required: true, + Description: `End of the range partitioning, exclusive.`, + }, + + // Interval: [Required] The width of each range within the partition. + "interval": { + Type: schema.TypeInt, + Required: true, + Description: `The width of each range within the partition.`, + }, + }, + }, + }, + }, + }, + }, + + // RequirePartitionFilter: [Optional] If set to true, queries over this table + // require a partition filter that can be used for partition elimination to be + // specified. + "require_partition_filter": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, queries over this table require a partition filter that can be used for partition elimination to be specified.`, + ConflictsWith: []string{"time_partitioning.0.require_partition_filter"}, + }, + + // Clustering: [Optional] Specifies column names to use for data clustering. Up to four + // top-level columns are allowed, and should be specified in descending priority order. + "clustering": { + Type: schema.TypeList, + Optional: true, + MaxItems: 4, + Description: `Specifies column names to use for data clustering. Up to four top-level columns are allowed, and should be specified in descending priority order.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "encryption_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `Specifies how the table should be encrypted. If left blank, the table will be encrypted with a Google-managed key; that process is transparent to the user.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `The self link or full name of a key which should be used to encrypt this table. Note that the default bigquery service account will need to have encrypt/decrypt permissions on this key - you may want to see the google_bigquery_default_service_account datasource and the google_kms_crypto_key_iam_binding resource.`, + }, + "kms_key_version": { + Type: schema.TypeString, + Computed: true, + Description: `The self link or full name of the kms key version used to encrypt this table.`, + }, + }, + }, + }, + + // CreationTime: [Output-only] The time when this table was created, in + // milliseconds since the epoch. + "creation_time": { + Type: schema.TypeInt, + Computed: true, + Description: `The time when this table was created, in milliseconds since the epoch.`, + }, + + // Etag: [Output-only] A hash of this resource. + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `A hash of the resource.`, + }, + + // LastModifiedTime: [Output-only] The time when this table was last + // modified, in milliseconds since the epoch. + "last_modified_time": { + Type: schema.TypeInt, + Computed: true, + Description: `The time when this table was last modified, in milliseconds since the epoch.`, + }, + + // Location: [Output-only] The geographic location where the table + // resides. This value is inherited from the dataset. + "location": { + Type: schema.TypeString, + Computed: true, + Description: `The geographic location where the table resides. This value is inherited from the dataset.`, + }, + + // NumBytes: [Output-only] The size of this table in bytes, excluding + // any data in the streaming buffer. + "num_bytes": { + Type: schema.TypeInt, + Computed: true, + Description: `The geographic location where the table resides. This value is inherited from the dataset.`, + }, + + // NumLongTermBytes: [Output-only] The number of bytes in the table that + // are considered "long-term storage". + "num_long_term_bytes": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of bytes in the table that are considered "long-term storage".`, + }, + + // NumRows: [Output-only] The number of rows of data in this table, + // excluding any data in the streaming buffer. + "num_rows": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of rows of data in this table, excluding any data in the streaming buffer.`, + }, + + // SelfLink: [Output-only] A URL that can be used to access this + // resource again. + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + // Type: [Output-only] Describes the table type. The following values + // are supported: TABLE: A normal BigQuery table. VIEW: A virtual table + // defined by a SQL query. EXTERNAL: A table that references data stored + // in an external storage system, such as Google Cloud Storage. The + // default value is TABLE. + "type": { + Type: schema.TypeString, + Computed: true, + Description: `Describes the table type.`, + }, + + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Whether Terraform will be prevented from destroying the instance. When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the table will fail. When the field is set to false, deleting the table is allowed.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "allow_resource_tags_on_deletion": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether or not to allow table deletion when there are still resource tags attached.`, + }, + + {{ end }} + // TableConstraints: [Optional] Defines the primary key and foreign keys. + "table_constraints": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Defines the primary key and foreign keys.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // PrimaryKey: [Optional] Represents the primary key constraint + // on a table's columns. Present only if the table has a primary key. + // The primary key is not enforced. + "primary_key": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Represents a primary key constraint on a table's columns. Present only if the table has a primary key. The primary key is not enforced.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + //Columns: [Required] The columns that are composed of the primary key constraint. + "columns": { + Type: schema.TypeList, + Required: true, + Description: `The columns that are composed of the primary key constraint.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + // ForeignKeys: [Optional] Present only if the table has a foreign key. + // The foreign key is not enforced. + "foreign_keys": { + Type: schema.TypeList, + Optional: true, + Description: `Present only if the table has a foreign key. The foreign key is not enforced.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // Name: [Optional] Set only if the foreign key constraint is named. + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Set only if the foreign key constraint is named.`, + }, + + // ReferencedTable: [Required] The table that holds the primary key + // and is referenced by this foreign key. + "referenced_table": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `The table that holds the primary key and is referenced by this foreign key.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ProjectId: [Required] The ID of the project containing this table. + "project_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the project containing this table.`, + }, + + // DatasetId: [Required] The ID of the dataset containing this table. + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the dataset containing this table.`, + }, + + // TableId: [Required] The ID of the table. The ID must contain only + // letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum + // length is 1,024 characters. Certain operations allow suffixing of + // the table ID with a partition decorator, such as + // sample_table$20190123. + "table_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the table. The ID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). The maximum length is 1,024 characters. Certain operations allow suffixing of the table ID with a partition decorator, such as sample_table$20190123.`, + }, + }, + }, + }, + + // ColumnReferences: [Required] The pair of the foreign key column and primary key column. + "column_references": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `The pair of the foreign key column and primary key column.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // ReferencingColumn: [Required] The column that composes the foreign key. + "referencing_column": { + Type: schema.TypeString, + Required: true, + Description: `The column that composes the foreign key.`, + }, + + // ReferencedColumn: [Required] The column in the primary key that are + // referenced by the referencingColumn + "referenced_column": { + Type: schema.TypeString, + Required: true, + Description: `The column in the primary key that are referenced by the referencingColumn.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + // TableReplicationInfo: [Optional] Replication info of a table created using `AS REPLICA` DDL like: `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv`. + "table_replication_info": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `Replication info of a table created using "AS REPLICA" DDL like: "CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv".`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the source project.`, + }, + "source_dataset_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the source dataset.`, + }, + "source_table_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the source materialized view.`, + }, + "replication_interval_ms": { + Type: schema.TypeInt, + Default: 300000, + Optional: true, + ForceNew: true, + Description: `The interval at which the source materialized view is polled for updates. The default is 300000.`, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "resource_tags": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The tags attached to this table. Tag keys are globally unique. Tag key is expected to be in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production".`, + }, + {{- end }} + }, + UseJSONNumber: true, + } +} + +func resourceTable(d *schema.ResourceData, meta interface{}) (*bigquery.Table, error) { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + table := &bigquery.Table{ + TableReference: &bigquery.TableReference{ + DatasetId: d.Get("dataset_id").(string), + TableId: d.Get("table_id").(string), + ProjectId: project, + }, + } + + if v, ok := d.GetOk("view"); ok { + table.View = expandView(v) + } + + if v, ok := d.GetOk("materialized_view"); ok { + table.MaterializedView = expandMaterializedView(v) + } + + if v, ok := d.GetOk("description"); ok { + table.Description = v.(string) + } + + if v, ok := d.GetOk("expiration_time"); ok { + table.ExpirationTime = int64(v.(int)) + } + + if v, ok := d.GetOk("external_data_configuration"); ok { + externalDataConfiguration, err := expandExternalDataConfiguration(v) + if err != nil { + return nil, err + } + + table.ExternalDataConfiguration = externalDataConfiguration + } + + if v, ok := d.GetOk("friendly_name"); ok { + table.FriendlyName = v.(string) + } + + if v, ok := d.GetOk("max_staleness"); ok { + table.MaxStaleness = v.(string) + } + + if v, ok := d.GetOk("encryption_configuration.0.kms_key_name"); ok { + table.EncryptionConfiguration = &bigquery.EncryptionConfiguration{ + KmsKeyName: v.(string), + } + } + + if v, ok := d.GetOk("effective_labels"); ok { + labels := map[string]string{} + + for k, v := range v.(map[string]interface{}) { + labels[k] = v.(string) + } + + table.Labels = labels + } + + if v, ok := d.GetOk("schema"); ok { + _, viewPresent := d.GetOk("view") + _, materializedViewPresent := d.GetOk("materialized_view") + managePolicyTags := !viewPresent && !materializedViewPresent + schema, err := expandSchema(v, managePolicyTags) + if err != nil { + return nil, err + } + table.Schema = schema + } + + if v, ok := d.GetOk("time_partitioning"); ok { + table.TimePartitioning = expandTimePartitioning(v) + } + + if v, ok := d.GetOk("range_partitioning"); ok { + rangePartitioning, err := expandRangePartitioning(v) + if err != nil { + return nil, err + } + + table.RangePartitioning = rangePartitioning + } + + if v, ok := d.GetOk("require_partition_filter"); ok { + table.RequirePartitionFilter = v.(bool) + } + + if v, ok := d.GetOk("clustering"); ok { + table.Clustering = &bigquery.Clustering{ + Fields: tpgresource.ConvertStringArr(v.([]interface{})), + ForceSendFields: []string{"Fields"}, + } + } + + if v, ok := d.GetOk("table_constraints"); ok { + tableConstraints, err := expandTableConstraints(v) + if err != nil { + return nil, err + } + + table.TableConstraints = tableConstraints + } + + {{ if ne $.TargetVersionName `ga` -}} + table.ResourceTags = tpgresource.ExpandStringMap(d, "resource_tags") + + {{ end }} + return table, nil +} + +func resourceBigQueryTableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + + if v, ok := d.GetOk("table_replication_info"); ok { + if table.Schema != nil || table.View != nil || table.MaterializedView != nil { + return errors.New("Schema, view, or materialized view cannot be specified when table replication info is present") + } + + replicationDDL := fmt.Sprintf("CREATE MATERIALIZED VIEW %s.%s.%s", d.Get("project").(string), d.Get("dataset_id").(string), d.Get("table_id").(string)) + + tableReplicationInfo := expandTableReplicationInfo(v) + replicationIntervalMs := tableReplicationInfo["replication_interval_ms"].(int64) + if replicationIntervalMs > 0 { + replicationIntervalSeconds := replicationIntervalMs / 1000 + replicationDDL = fmt.Sprintf("%s OPTIONS(replication_interval_seconds=%d)", replicationDDL, replicationIntervalSeconds) + } + + replicationDDL = fmt.Sprintf("%s AS REPLICA OF %s.%s.%s", replicationDDL, tableReplicationInfo["source_project_id"], tableReplicationInfo["source_dataset_id"], tableReplicationInfo["source_table_id"]) + useLegacySQL := false + + req := &bigquery.QueryRequest{ + Query: replicationDDL, + UseLegacySql: &useLegacySQL, + } + + log.Printf("[INFO] Creating a replica materialized view with DDL: '%s'", replicationDDL) + + _, err := config.NewBigQueryClient(userAgent).Jobs.Query(project, req).Do() + + id := fmt.Sprintf("projects/%s/datasets/%s/tables/%s", project, datasetID, d.Get("table_id").(string)) + if err != nil { + if deleteErr := resourceBigQueryTableDelete(d, meta); deleteErr != nil { + log.Printf("[INFO] Unable to clean up table %s: %s", id, deleteErr) + } + return err + } + + log.Printf("[INFO] BigQuery table %s has been created", id) + d.SetId(id) + + return resourceBigQueryTableRead(d, meta) + } + + if table.View != nil && table.Schema != nil { + + log.Printf("[INFO] Removing schema from table definition because BigQuery does not support setting schema on view creation") + schemaBack := table.Schema + table.Schema = nil + + log.Printf("[INFO] Creating BigQuery table: %s without schema", table.TableReference.TableId) + + res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery table %s has been created", res.Id) + d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + + table.Schema = schemaBack + log.Printf("[INFO] Updating BigQuery table: %s with schema", table.TableReference.TableId) + if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, res.TableReference.TableId, table).Do(); err != nil { + return err + } + + log.Printf("[INFO] BigQuery table %s has been updated with schema", res.Id) + } else { + log.Printf("[INFO] Creating BigQuery table: %s", table.TableReference.TableId) + + res, err := config.NewBigQueryClient(userAgent).Tables.Insert(project, datasetID, table).Do() + if err != nil { + return err + } + + log.Printf("[INFO] BigQuery table %s has been created", res.Id) + d.SetId(fmt.Sprintf("projects/%s/datasets/%s/tables/%s", res.TableReference.ProjectId, res.TableReference.DatasetId, res.TableReference.TableId)) + } + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + log.Printf("[INFO] Reading BigQuery table: %s", d.Id()) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + tableID := d.Get("table_id").(string) + + res, err := config.NewBigQueryClient(userAgent).Tables.Get(project, datasetID, tableID).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("BigQuery table %q", tableID)) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("description", res.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("expiration_time", res.ExpirationTime); err != nil { + return fmt.Errorf("Error setting expiration_time: %s", err) + } + if err := d.Set("friendly_name", res.FriendlyName); err != nil { + return fmt.Errorf("Error setting friendly_name: %s", err) + } + if err := d.Set("max_staleness", res.MaxStaleness); err != nil { + return fmt.Errorf("Error setting max_staleness: %s", err) + } + if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(res.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) + } + if err := d.Set("creation_time", res.CreationTime); err != nil { + return fmt.Errorf("Error setting creation_time: %s", err) + } + if err := d.Set("etag", res.Etag); err != nil { + return fmt.Errorf("Error setting etag: %s", err) + } + if err := d.Set("last_modified_time", res.LastModifiedTime); err != nil { + return fmt.Errorf("Error setting last_modified_time: %s", err) + } + if err := d.Set("location", res.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("num_bytes", res.NumBytes); err != nil { + return fmt.Errorf("Error setting num_bytes: %s", err) + } + if err := d.Set("table_id", res.TableReference.TableId); err != nil { + return fmt.Errorf("Error setting table_id: %s", err) + } + if err := d.Set("dataset_id", res.TableReference.DatasetId); err != nil { + return fmt.Errorf("Error setting dataset_id: %s", err) + } + if err := d.Set("num_long_term_bytes", res.NumLongTermBytes); err != nil { + return fmt.Errorf("Error setting num_long_term_bytes: %s", err) + } + if err := d.Set("num_rows", res.NumRows); err != nil { + return fmt.Errorf("Error setting num_rows: %s", err) + } + if err := d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("type", res.Type); err != nil { + return fmt.Errorf("Error setting type: %s", err) + } + + // determine whether the deprecated require_partition_filter field is used + use_old_rpf := false + if _, ok := d.GetOk("time_partitioning.0.require_partition_filter"); ok { + use_old_rpf = true + } else if err := d.Set("require_partition_filter", res.RequirePartitionFilter); err != nil { + return fmt.Errorf("Error setting require_partition_filter: %s", err) + } + + if res.ExternalDataConfiguration != nil { + externalDataConfiguration, err := flattenExternalDataConfiguration(res.ExternalDataConfiguration) + if err != nil { + return err + } + + if v, ok := d.GetOk("external_data_configuration"); ok { + // The API response doesn't return the `external_data_configuration.schema` + // used when creating the table and it cannot be queried. + // After creation, a computed schema is stored in the toplevel `schema`, + // which combines `external_data_configuration.schema` + // with any hive partioning fields found in the `source_uri_prefix`. + // So just assume the configured schema has been applied after successful + // creation, by copying the configured value back into the resource schema. + // This avoids that reading back this field will be identified as a change. + // The `ForceNew=true` on `external_data_configuration.schema` will ensure + // the users' expectation that changing the configured input schema will + // recreate the resource. + edc := v.([]interface{})[0].(map[string]interface{}) + if edc["schema"] != nil { + externalDataConfiguration[0]["schema"] = edc["schema"] + } + } + + if err := d.Set("external_data_configuration", externalDataConfiguration); err != nil { + return fmt.Errorf("Error setting external_data_configuration: %s", err) + } + } + + if res.TimePartitioning != nil { + if err := d.Set("time_partitioning", flattenTimePartitioning(res.TimePartitioning, use_old_rpf)); err != nil { + return err + } + } + + if res.RangePartitioning != nil { + if err := d.Set("range_partitioning", flattenRangePartitioning(res.RangePartitioning)); err != nil { + return err + } + } + + if res.Clustering != nil { + if err := d.Set("clustering", res.Clustering.Fields); err != nil { + return fmt.Errorf("Error setting clustering: %s", err) + } + } + if res.EncryptionConfiguration != nil { + if err := d.Set("encryption_configuration", flattenEncryptionConfiguration(res.EncryptionConfiguration)); err != nil { + return err + } + } + + if res.Schema != nil { + schema, err := flattenSchema(res.Schema) + if err != nil { + return err + } + if err := d.Set("schema", schema); err != nil { + return fmt.Errorf("Error setting schema: %s", err) + } + } + + if res.View != nil { + view := flattenView(res.View) + if err := d.Set("view", view); err != nil { + return fmt.Errorf("Error setting view: %s", err) + } + } + + if res.MaterializedView != nil { + materialized_view := flattenMaterializedView(res.MaterializedView) + + if err := d.Set("materialized_view", materialized_view); err != nil { + return fmt.Errorf("Error setting materialized view: %s", err) + } + } + + if res.TableConstraints != nil { + table_constraints := flattenTableConstraints(res.TableConstraints) + + if err := d.Set("table_constraints", table_constraints); err != nil { + return fmt.Errorf("Error setting table constraints: %s", err) + } + } + + {{ if ne $.TargetVersionName `ga` -}} + if err := d.Set("resource_tags", res.ResourceTags); err != nil { + return fmt.Errorf("Error setting resource tags: %s", err) + } + + {{ end }} + // TODO: Update when the Get API fields for TableReplicationInfo are available in the client library. + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}BigQueryBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}") + if err != nil { + return err + } + + log.Printf("[INFO] Reading BigQuery table through API: %s", url) + + getRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + if v, ok := getRes["tableReplicationInfo"]; ok { + tableReplicationInfo := flattenTableReplicationInfo(v.(map[string]interface{})) + + if err := d.Set("table_replication_info", tableReplicationInfo); err != nil { + return fmt.Errorf("Error setting table replication info: %s", err) + } + } + + return nil +} + +type TableReference struct { + project string + datasetID string + tableID string +} + +func resourceBigQueryTableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + table, err := resourceTable(d, meta) + if err != nil { + return err + } + + log.Printf("[INFO] Updating BigQuery table: %s", d.Id()) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + tableID := d.Get("table_id").(string) + + tableReference := &TableReference{ + project: project, + datasetID: datasetID, + tableID: tableID, + } + + if err = resourceBigQueryTableColumnDrop(config, userAgent, table, tableReference); err != nil { + return err + } + + if _, err = config.NewBigQueryClient(userAgent).Tables.Update(project, datasetID, tableID, table).Do(); err != nil { + return err + } + + return resourceBigQueryTableRead(d, meta) +} + +func resourceBigQueryTableColumnDrop(config *transport_tpg.Config, userAgent string, table *bigquery.Table, tableReference *TableReference) error { + oldTable, err := config.NewBigQueryClient(userAgent).Tables.Get(tableReference.project, tableReference.datasetID, tableReference.tableID).Do() + if err != nil { + return err + } + + if table.Schema == nil { + return nil + } + + newTableFields := map[string]bool{} + for _, field := range table.Schema.Fields { + newTableFields[field.Name] = true + } + + droppedColumns := []string{} + for _, field := range oldTable.Schema.Fields { + if !newTableFields[field.Name] { + droppedColumns = append(droppedColumns, field.Name) + } + } + + if len(droppedColumns) > 0 { + droppedColumnsString := strings.Join(droppedColumns, ", DROP COLUMN ") + + dropColumnsDDL := fmt.Sprintf("ALTER TABLE `%s.%s.%s` DROP COLUMN %s", tableReference.project, tableReference.datasetID, tableReference.tableID, droppedColumnsString) + log.Printf("[INFO] Dropping columns in-place: %s", dropColumnsDDL) + + useLegacySQL := false + req := &bigquery.QueryRequest{ + Query: dropColumnsDDL, + UseLegacySql: &useLegacySQL, + } + + _, err = config.NewBigQueryClient(userAgent).Jobs.Query(tableReference.project, req).Do() + if err != nil { + return err + } + } + + return nil +} + +func resourceBigQueryTableDelete(d *schema.ResourceData, meta interface{}) error { + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("cannot destroy table %v without setting deletion_protection=false and running `terraform apply`", d.Id()) + } + {{- if ne $.TargetVersionName "ga" }} + if v, ok := d.GetOk("resource_tags"); ok { + if !d.Get("allow_resource_tags_on_deletion").(bool) { + var resourceTags []string + + for k, v := range v.(map[string]interface{}) { + resourceTags = append(resourceTags, fmt.Sprintf("%s:%s", k, v.(string))) + } + + return fmt.Errorf("cannot destroy table %v without unsetting the following resource tags or setting allow_resource_tags_on_deletion=true: %v", d.Id(), resourceTags) + } + } + + {{ end }} + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + log.Printf("[INFO] Deleting BigQuery table: %s", d.Id()) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + datasetID := d.Get("dataset_id").(string) + tableID := d.Get("table_id").(string) + + if err := config.NewBigQueryClient(userAgent).Tables.Delete(project, datasetID, tableID).Do(); err != nil { + return err + } + + d.SetId("") + + return nil +} + +func expandExternalDataConfiguration(cfg interface{}) (*bigquery.ExternalDataConfiguration, error) { + raw := cfg.([]interface{})[0].(map[string]interface{}) + + edc := &bigquery.ExternalDataConfiguration{ + Autodetect: raw["autodetect"].(bool), + } + + sourceUris := []string{} + for _, rawSourceUri := range raw["source_uris"].([]interface{}) { + sourceUris = append(sourceUris, rawSourceUri.(string)) + } + if len(sourceUris) > 0 { + edc.SourceUris = sourceUris + } + + if v, ok := raw["file_set_spec_type"]; ok { + edc.FileSetSpecType = v.(string) + } + + if v, ok := raw["compression"]; ok { + edc.Compression = v.(string) + } + + if v, ok := raw["json_extension"]; ok { + edc.JsonExtension = v.(string) + } + + if v, ok := raw["csv_options"]; ok { + edc.CsvOptions = expandCsvOptions(v) + } + if v, ok := raw["json_options"]; ok { + edc.JsonOptions = expandJsonOptions(v) + } + if v, ok := raw["bigtable_options"]; ok { + edc.BigtableOptions = expandBigtableOptions(v) + } + if v, ok := raw["google_sheets_options"]; ok { + edc.GoogleSheetsOptions = expandGoogleSheetsOptions(v) + } + if v, ok := raw["hive_partitioning_options"]; ok { + edc.HivePartitioningOptions = expandHivePartitioningOptions(v) + } + if v, ok := raw["avro_options"]; ok { + edc.AvroOptions = expandAvroOptions(v) + } + if v, ok := raw["parquet_options"]; ok { + edc.ParquetOptions = expandParquetOptions(v) + } + + if v, ok := raw["ignore_unknown_values"]; ok { + edc.IgnoreUnknownValues = v.(bool) + } + if v, ok := raw["max_bad_records"]; ok { + edc.MaxBadRecords = int64(v.(int)) + } + if v, ok := raw["schema"]; ok { + managePolicyTags := true + schema, err := expandSchema(v, managePolicyTags) + if err != nil { + return nil, err + } + edc.Schema = schema + } + if v, ok := raw["source_format"]; ok { + edc.SourceFormat = v.(string) + } + if v, ok := raw["connection_id"]; ok { + edc.ConnectionId = v.(string) + } + if v, ok := raw["reference_file_schema_uri"]; ok { + edc.ReferenceFileSchemaUri = v.(string) + } + if v, ok := raw["metadata_cache_mode"]; ok { + edc.MetadataCacheMode = v.(string) + } + if v, ok := raw["object_metadata"]; ok { + edc.ObjectMetadata = v.(string) + } + + return edc, nil + +} + +func flattenExternalDataConfiguration(edc *bigquery.ExternalDataConfiguration) ([]map[string]interface{}, error) { + result := map[string]interface{}{} + + result["autodetect"] = edc.Autodetect + result["source_uris"] = edc.SourceUris + + if edc.FileSetSpecType != "" { + result["file_set_spec_type"] = edc.FileSetSpecType + } + + if edc.Compression != "" { + result["compression"] = edc.Compression + } + + if edc.JsonExtension != "" { + result["json_extension"] = edc.JsonExtension + } + + if edc.CsvOptions != nil { + result["csv_options"] = flattenCsvOptions(edc.CsvOptions) + } + + if edc.GoogleSheetsOptions != nil { + result["google_sheets_options"] = flattenGoogleSheetsOptions(edc.GoogleSheetsOptions) + } + + if edc.HivePartitioningOptions != nil { + result["hive_partitioning_options"] = flattenHivePartitioningOptions(edc.HivePartitioningOptions) + } + + if edc.AvroOptions != nil { + result["avro_options"] = flattenAvroOptions(edc.AvroOptions) + } + + if edc.ParquetOptions != nil { + result["parquet_options"] = flattenParquetOptions(edc.ParquetOptions) + } + + if edc.JsonOptions != nil { + result["json_options"] = flattenJsonOptions(edc.JsonOptions) + } + + if edc.BigtableOptions != nil { + result["bigtable_options"] = flattenBigtableOptions(edc.BigtableOptions) + } + + if edc.IgnoreUnknownValues { + result["ignore_unknown_values"] = edc.IgnoreUnknownValues + } + if edc.MaxBadRecords != 0 { + result["max_bad_records"] = edc.MaxBadRecords + } + + if edc.SourceFormat != "" { + result["source_format"] = edc.SourceFormat + } + + if edc.ConnectionId != "" { + result["connection_id"] = edc.ConnectionId + } + + if edc.ReferenceFileSchemaUri != "" { + result["reference_file_schema_uri"] = edc.ReferenceFileSchemaUri + } + if edc.MetadataCacheMode != "" { + result["metadata_cache_mode"] = edc.MetadataCacheMode + } + + if edc.ObjectMetadata != "" { + result["object_metadata"] = edc.ObjectMetadata + } + + return []map[string]interface{}{result}, nil +} + +func expandCsvOptions(configured interface{}) *bigquery.CsvOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.CsvOptions{} + + if v, ok := raw["allow_jagged_rows"]; ok { + opts.AllowJaggedRows = v.(bool) + opts.ForceSendFields = append(opts.ForceSendFields, "allow_jagged_rows") + } + + if v, ok := raw["allow_quoted_newlines"]; ok { + opts.AllowQuotedNewlines = v.(bool) + opts.ForceSendFields = append(opts.ForceSendFields, "allow_quoted_newlines") + } + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + if v, ok := raw["field_delimiter"]; ok { + opts.FieldDelimiter = v.(string) + } + + if v, ok := raw["skip_leading_rows"]; ok { + opts.SkipLeadingRows = int64(v.(int)) + } + + if v, ok := raw["quote"]; ok { + quote := v.(string) + opts.Quote = "e + } + + opts.ForceSendFields = []string{"Quote"} + + return opts +} + +func flattenCsvOptions(opts *bigquery.CsvOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.AllowJaggedRows { + result["allow_jagged_rows"] = opts.AllowJaggedRows + } + + if opts.AllowQuotedNewlines { + result["allow_quoted_newlines"] = opts.AllowQuotedNewlines + } + + if opts.Encoding != "" { + result["encoding"] = opts.Encoding + } + + if opts.FieldDelimiter != "" { + result["field_delimiter"] = opts.FieldDelimiter + } + + if opts.SkipLeadingRows != 0 { + result["skip_leading_rows"] = opts.SkipLeadingRows + } + + if opts.Quote != nil { + result["quote"] = *opts.Quote + } + + return []map[string]interface{}{result} +} + +func expandGoogleSheetsOptions(configured interface{}) *bigquery.GoogleSheetsOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.GoogleSheetsOptions{} + + if v, ok := raw["range"]; ok { + opts.Range = v.(string) + } + + if v, ok := raw["skip_leading_rows"]; ok { + opts.SkipLeadingRows = int64(v.(int)) + } + return opts +} + +func flattenGoogleSheetsOptions(opts *bigquery.GoogleSheetsOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.Range != "" { + result["range"] = opts.Range + } + + if opts.SkipLeadingRows != 0 { + result["skip_leading_rows"] = opts.SkipLeadingRows + } + + return []map[string]interface{}{result} +} + +func expandHivePartitioningOptions(configured interface{}) *bigquery.HivePartitioningOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.HivePartitioningOptions{} + + if v, ok := raw["mode"]; ok { + opts.Mode = v.(string) + } + + if v, ok := raw["require_partition_filter"]; ok { + opts.RequirePartitionFilter = v.(bool) + } + + if v, ok := raw["source_uri_prefix"]; ok { + opts.SourceUriPrefix = v.(string) + } + + return opts +} + +func flattenHivePartitioningOptions(opts *bigquery.HivePartitioningOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.Mode != "" { + result["mode"] = opts.Mode + } + + if opts.RequirePartitionFilter { + result["require_partition_filter"] = opts.RequirePartitionFilter + } + + if opts.SourceUriPrefix != "" { + result["source_uri_prefix"] = opts.SourceUriPrefix + } + + return []map[string]interface{}{result} +} + +func expandAvroOptions(configured interface{}) *bigquery.AvroOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.AvroOptions{} + + if v, ok := raw["use_avro_logical_types"]; ok { + opts.UseAvroLogicalTypes = v.(bool) + } + + return opts +} + +func flattenAvroOptions(opts *bigquery.AvroOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.UseAvroLogicalTypes { + result["use_avro_logical_types"] = opts.UseAvroLogicalTypes + } + + return []map[string]interface{}{result} +} + +func expandParquetOptions(configured interface{}) *bigquery.ParquetOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.ParquetOptions{} + + if v, ok := raw["enum_as_string"]; ok { + opts.EnumAsString = v.(bool) + } + + if v, ok := raw["enable_list_inference"]; ok { + opts.EnableListInference = v.(bool) + } + + return opts +} + +func flattenParquetOptions(opts *bigquery.ParquetOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.EnumAsString { + result["enum_as_string"] = opts.EnumAsString + } + + if opts.EnableListInference { + result["enable_list_inference"] = opts.EnableListInference + } + + return []map[string]interface{}{result} +} + +func expandBigtableOptions(configured interface{}) *bigquery.BigtableOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.BigtableOptions{} + + crs := []*bigquery.BigtableColumnFamily{} + if v, ok := raw["column_family"]; ok { + for _, columnFamily := range v.([]interface{}) { + crs = append(crs, expandBigtableColumnFamily(columnFamily)) + } + + if len(crs) > 0 { + opts.ColumnFamilies = crs + } + } + + if v, ok := raw["ignore_unspecified_column_families"]; ok { + opts.IgnoreUnspecifiedColumnFamilies = v.(bool) + } + + if v, ok := raw["read_rowkey_as_string"]; ok { + opts.ReadRowkeyAsString = v.(bool) + } + + if v, ok := raw["output_column_families_as_json"]; ok { + opts.OutputColumnFamiliesAsJson = v.(bool) + } + + return opts +} + +func flattenBigtableOptions(opts *bigquery.BigtableOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.ColumnFamilies != nil { + result["column_family"] = flattenBigtableColumnFamily(opts.ColumnFamilies) + } + + if opts.IgnoreUnspecifiedColumnFamilies { + result["ignore_unspecified_column_families"] = opts.IgnoreUnspecifiedColumnFamilies + } + + if opts.ReadRowkeyAsString { + result["read_rowkey_as_string"] = opts.ReadRowkeyAsString + } + + if opts.OutputColumnFamiliesAsJson { + result["output_column_families_as_json"] = opts.OutputColumnFamiliesAsJson + } + + return []map[string]interface{}{result} +} + +func expandBigtableColumnFamily(configured interface{}) *bigquery.BigtableColumnFamily { + raw := configured.(map[string]interface{}) + + opts := &bigquery.BigtableColumnFamily{} + + crs := []*bigquery.BigtableColumn{} + if v, ok := raw["column"]; ok { + for _, column := range v.([]interface{}) { + crs = append(crs, expandBigtableColumn(column)) + } + + if len(crs) > 0 { + opts.Columns = crs + } + } + + if v, ok := raw["family_id"]; ok { + opts.FamilyId = v.(string) + } + + if v, ok := raw["type"]; ok { + opts.Type = v.(string) + } + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + if v, ok := raw["only_read_latest"]; ok { + opts.OnlyReadLatest = v.(bool) + } + + return opts +} + +func flattenBigtableColumnFamily(edc []*bigquery.BigtableColumnFamily) []map[string]interface{} { + results := []map[string]interface{}{} + + for _, fr := range edc { + result := map[string]interface{}{} + if fr.Columns != nil { + result["column"] = flattenBigtableColumn(fr.Columns) + } + result["family_id"] = fr.FamilyId + result["type"] = fr.Type + result["encoding"] = fr.Encoding + result["only_read_latest"] = fr.OnlyReadLatest + results = append(results, result) + } + + return results +} + +func expandBigtableColumn(configured interface{}) *bigquery.BigtableColumn { + raw := configured.(map[string]interface{}) + + opts := &bigquery.BigtableColumn{} + + if v, ok := raw["qualifier_encoded"]; ok { + opts.QualifierEncoded = v.(string) + } + + if v, ok := raw["qualifier_string"]; ok { + opts.QualifierString = v.(string) + } + + if v, ok := raw["field_name"]; ok { + opts.FieldName = v.(string) + } + + if v, ok := raw["type"]; ok { + opts.Type = v.(string) + } + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + if v, ok := raw["only_read_latest"]; ok { + opts.OnlyReadLatest = v.(bool) + } + + return opts +} + +func flattenBigtableColumn(edc []*bigquery.BigtableColumn) []map[string]interface{} { + results := []map[string]interface{}{} + + for _, fr := range edc { + result := map[string]interface{}{} + result["qualifier_encoded"] = fr.QualifierEncoded + result["qualifier_string"] = fr.QualifierString + result["field_name"] = fr.FieldName + result["type"] = fr.Type + result["encoding"] = fr.Encoding + result["only_read_latest"] = fr.OnlyReadLatest + results = append(results, result) + } + + return results +} + +func expandJsonOptions(configured interface{}) *bigquery.JsonOptions { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + opts := &bigquery.JsonOptions{} + + if v, ok := raw["encoding"]; ok { + opts.Encoding = v.(string) + } + + return opts +} + +func flattenJsonOptions(opts *bigquery.JsonOptions) []map[string]interface{} { + result := map[string]interface{}{} + + if opts.Encoding != "" { + result["encoding"] = opts.Encoding + } + + return []map[string]interface{}{result} +} + +func expandSchema(raw interface{}, managePolicyTags bool) (*bigquery.TableSchema, error) { + var fields []*bigquery.TableFieldSchema + + if len(raw.(string)) == 0 { + return nil, nil + } + + if err := json.Unmarshal([]byte(raw.(string)), &fields); err != nil { + return nil, err + } + + if managePolicyTags { + for _, field := range fields { + setEmptyPolicyTagsInSchema(field) + } + } + + return &bigquery.TableSchema{Fields: fields}, nil +} + +func flattenSchema(tableSchema *bigquery.TableSchema) (string, error) { + schema, err := json.Marshal(tableSchema.Fields) + if err != nil { + return "", err + } + + return string(schema), nil +} + +// Explicitly set empty PolicyTags unless the PolicyTags field is specified in the schema. +func setEmptyPolicyTagsInSchema(field *bigquery.TableFieldSchema) { + // Field has children fields. + if len(field.Fields) > 0 { + for _, subField := range field.Fields { + setEmptyPolicyTagsInSchema(subField) + } + return + } + // Field is a leaf. + if field.PolicyTags == nil { + field.PolicyTags = &bigquery.TableFieldSchemaPolicyTags{Names: []string{}} + } +} + +func expandTimePartitioning(configured interface{}) *bigquery.TimePartitioning { + raw := configured.([]interface{})[0].(map[string]interface{}) + tp := &bigquery.TimePartitioning{Type: raw["type"].(string)} + + if v, ok := raw["field"]; ok { + tp.Field = v.(string) + } + + if v, ok := raw["expiration_ms"]; ok { + tp.ExpirationMs = int64(v.(int)) + } + + if v, ok := raw["require_partition_filter"]; ok { + tp.RequirePartitionFilter = v.(bool) + } + + return tp +} + +func expandRangePartitioning(configured interface{}) (*bigquery.RangePartitioning, error) { + if configured == nil { + return nil, nil + } + + rpList := configured.([]interface{}) + if len(rpList) == 0 || rpList[0] == nil { + return nil, errors.New("Error casting range partitioning interface to expected structure") + } + + rangePartJson := rpList[0].(map[string]interface{}) + rp := &bigquery.RangePartitioning{ + Field: rangePartJson["field"].(string), + } + + if v, ok := rangePartJson["range"]; ok && v != nil { + rangeLs := v.([]interface{}) + if len(rangeLs) != 1 || rangeLs[0] == nil { + return nil, errors.New("Non-empty range must be given for range partitioning") + } + + rangeJson := rangeLs[0].(map[string]interface{}) + rp.Range = &bigquery.RangePartitioningRange{ + Start: int64(rangeJson["start"].(int)), + End: int64(rangeJson["end"].(int)), + Interval: int64(rangeJson["interval"].(int)), + ForceSendFields: []string{"Start"}, + } + } + + return rp, nil +} + +func flattenEncryptionConfiguration(ec *bigquery.EncryptionConfiguration) []map[string]interface{} { + re := regexp.MustCompile(`(projects/.*/locations/.*/keyRings/.*/cryptoKeys/.*)/cryptoKeyVersions/.*`) + paths := re.FindStringSubmatch(ec.KmsKeyName) + + if len(ec.KmsKeyName) == 0 { + return nil + } + + if len(paths) > 0 { + return []map[string]interface{}{ + { + "kms_key_name": paths[1], + "kms_key_version": ec.KmsKeyName, + }, + } + } + + // The key name was returned, no need to set the version + return []map[string]interface{}{{"{{"}}"kms_key_name": ec.KmsKeyName, "kms_key_version": ""{{"}}"}} +} + +func flattenTimePartitioning(tp *bigquery.TimePartitioning, use_old_rpf bool) []map[string]interface{} { + result := map[string]interface{}{"type": tp.Type} + + if tp.Field != "" { + result["field"] = tp.Field + } + + if tp.ExpirationMs != 0 { + result["expiration_ms"] = tp.ExpirationMs + } + + if tp.RequirePartitionFilter && use_old_rpf { + result["require_partition_filter"] = tp.RequirePartitionFilter + } + + return []map[string]interface{}{result} +} + +func flattenRangePartitioning(rp *bigquery.RangePartitioning) []map[string]interface{} { + result := map[string]interface{}{ + "field": rp.Field, + "range": []map[string]interface{}{ + { + "start": rp.Range.Start, + "end": rp.Range.End, + "interval": rp.Range.Interval, + }, + }, + } + + return []map[string]interface{}{result} +} + +func expandView(configured interface{}) *bigquery.ViewDefinition { + raw := configured.([]interface{})[0].(map[string]interface{}) + vd := &bigquery.ViewDefinition{Query: raw["query"].(string)} + + if v, ok := raw["use_legacy_sql"]; ok { + vd.UseLegacySql = v.(bool) + vd.ForceSendFields = append(vd.ForceSendFields, "UseLegacySql") + } + + return vd +} + +func flattenView(vd *bigquery.ViewDefinition) []map[string]interface{} { + result := map[string]interface{}{"query": vd.Query} + result["use_legacy_sql"] = vd.UseLegacySql + + return []map[string]interface{}{result} +} + +func expandMaterializedView(configured interface{}) *bigquery.MaterializedViewDefinition { + raw := configured.([]interface{})[0].(map[string]interface{}) + mvd := &bigquery.MaterializedViewDefinition{Query: raw["query"].(string)} + + if v, ok := raw["enable_refresh"]; ok { + mvd.EnableRefresh = v.(bool) + mvd.ForceSendFields = append(mvd.ForceSendFields, "EnableRefresh") + } + + if v, ok := raw["refresh_interval_ms"]; ok { + mvd.RefreshIntervalMs = int64(v.(int)) + mvd.ForceSendFields = append(mvd.ForceSendFields, "RefreshIntervalMs") + } + + if v, ok := raw["allow_non_incremental_definition"]; ok { + mvd.AllowNonIncrementalDefinition = v.(bool) + mvd.ForceSendFields = append(mvd.ForceSendFields, "AllowNonIncrementalDefinition") + } + + return mvd +} + +func flattenMaterializedView(mvd *bigquery.MaterializedViewDefinition) []map[string]interface{} { + result := map[string]interface{}{"query": mvd.Query} + result["enable_refresh"] = mvd.EnableRefresh + result["refresh_interval_ms"] = mvd.RefreshIntervalMs + result["allow_non_incremental_definition"] = mvd.AllowNonIncrementalDefinition + + return []map[string]interface{}{result} +} + +func expandPrimaryKey(configured interface{}) *bigquery.TableConstraintsPrimaryKey { + if len(configured.([]interface{})) == 0 { + return nil + } + + raw := configured.([]interface{})[0].(map[string]interface{}) + pk := &bigquery.TableConstraintsPrimaryKey{} + + columns := []string{} + for _, rawColumn := range raw["columns"].([]interface{}) { + if rawColumn == nil { + // Terraform reads "" as nil, which ends up crashing when we cast below + // sending "" to the API triggers a 400, which is okay. + rawColumn = "" + } + columns = append(columns, rawColumn.(string)) + } + if len(columns) > 0 { + pk.Columns = columns + } + + return pk +} + +func flattenPrimaryKey(edc *bigquery.TableConstraintsPrimaryKey) []map[string]interface{} { + result := map[string]interface{}{} + + if edc.Columns != nil { + result["columns"] = edc.Columns + } + + return []map[string]interface{}{result} +} + +func expandReferencedTable(configured interface{}) *bigquery.TableConstraintsForeignKeysReferencedTable { + raw := configured.([]interface{})[0].(map[string]interface{}) + rt := &bigquery.TableConstraintsForeignKeysReferencedTable{} + + if v, ok := raw["project_id"]; ok { + rt.ProjectId = v.(string) + } + if v, ok := raw["dataset_id"]; ok { + rt.DatasetId = v.(string) + } + if v, ok := raw["table_id"]; ok { + rt.TableId = v.(string) + } + + return rt +} + +func flattenReferencedTable(edc *bigquery.TableConstraintsForeignKeysReferencedTable) []map[string]interface{} { + result := map[string]interface{}{} + + result["project_id"] = edc.ProjectId + result["dataset_id"] = edc.DatasetId + result["table_id"] = edc.TableId + + return []map[string]interface{}{result} +} + +func expandColumnReference(configured interface{}) *bigquery.TableConstraintsForeignKeysColumnReferences { + raw := configured.(map[string]interface{}) + + cr := &bigquery.TableConstraintsForeignKeysColumnReferences{} + + if v, ok := raw["referencing_column"]; ok { + cr.ReferencingColumn = v.(string) + } + if v, ok := raw["referenced_column"]; ok { + cr.ReferencedColumn = v.(string) + } + + return cr +} + +func flattenColumnReferences(edc []*bigquery.TableConstraintsForeignKeysColumnReferences) []map[string]interface{} { + results := []map[string]interface{}{} + + for _, cr := range edc { + result := map[string]interface{}{} + result["referenced_column"] = cr.ReferencedColumn + result["referencing_column"] = cr.ReferencingColumn + results = append(results, result) + } + + return results +} + +func expandForeignKey(configured interface{}) *bigquery.TableConstraintsForeignKeys { + raw := configured.(map[string]interface{}) + + fk := &bigquery.TableConstraintsForeignKeys{} + if v, ok := raw["name"]; ok { + fk.Name = v.(string) + } + if v, ok := raw["referenced_table"]; ok { + fk.ReferencedTable = expandReferencedTable(v) + } + crs := []*bigquery.TableConstraintsForeignKeysColumnReferences{} + if v, ok := raw["column_references"]; ok { + for _, rawColumnReferences := range v.([]interface{}) { + crs = append(crs, expandColumnReference(rawColumnReferences)) + } + } + + if len(crs) > 0 { + fk.ColumnReferences = crs + } + + return fk +} + +func flattenForeignKeys(edc []*bigquery.TableConstraintsForeignKeys) []map[string]interface{} { + results := []map[string]interface{}{} + + for _, fr := range edc { + result := map[string]interface{}{} + result["name"] = fr.Name + result["column_references"] = flattenColumnReferences(fr.ColumnReferences) + result["referenced_table"] = flattenReferencedTable(fr.ReferencedTable) + results = append(results, result) + } + + return results +} + +func expandTableConstraints(cfg interface{}) (*bigquery.TableConstraints, error) { + raw := cfg.([]interface{})[0].(map[string]interface{}) + + edc := &bigquery.TableConstraints{} + + if v, ok := raw["primary_key"]; ok { + edc.PrimaryKey = expandPrimaryKey(v) + } + + fks := []*bigquery.TableConstraintsForeignKeys{} + + if v, ok := raw["foreign_keys"]; ok { + for _, rawForeignKey := range v.([]interface{}) { + fks = append(fks, expandForeignKey(rawForeignKey)) + } + } + + if len(fks) > 0 { + edc.ForeignKeys = fks + } + + return edc, nil + +} + +func flattenTableConstraints(edc *bigquery.TableConstraints) []map[string]interface{} { + result := map[string]interface{}{} + + if edc.PrimaryKey != nil { + result["primary_key"] = flattenPrimaryKey(edc.PrimaryKey) + } + if edc.ForeignKeys != nil { + result["foreign_keys"] = flattenForeignKeys(edc.ForeignKeys) + } + + return []map[string]interface{}{result} +} + +func expandTableReplicationInfo(cfg interface{}) map[string]interface{} { + raw := cfg.([]interface{})[0].(map[string]interface{}) + + result := map[string]interface{}{} + + if v, ok := raw["source_project_id"]; ok { + result["source_project_id"] = v.(string) + } + + if v, ok := raw["source_dataset_id"]; ok { + result["source_dataset_id"] = v.(string) + } + + if v, ok := raw["source_table_id"]; ok { + result["source_table_id"] = v.(string) + } + + if v, ok := raw["replication_interval_ms"]; ok { + result["replication_interval_ms"] = int64(v.(int)) + } + + return result +} + +func flattenTableReplicationInfo(tableReplicationInfo map[string]interface{}) []map[string]interface{} { + result := map[string]interface{}{} + + if v, ok := tableReplicationInfo["sourceTable"]; ok { + sourceTable := v.(map[string]interface{}) + if v, ok := sourceTable["projectId"]; ok { + result["source_project_id"] = v.(string) + } + if v, ok := sourceTable["datasetId"]; ok { + result["source_dataset_id"] = v.(string) + } + if v, ok := sourceTable["tableId"]; ok { + result["source_table_id"] = v.(string) + } + } + + if v, ok := tableReplicationInfo["replicationIntervalMs"]; ok { + replicationIntervalMs := v.(string) + if i, err := strconv.Atoi(replicationIntervalMs); err == nil { + result["replication_interval_ms"] = int64(i) + } + } + + return []map[string]interface{}{result} +} + +func resourceBigQueryTableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/datasets/(?P[^/]+)/tables/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Explicitly set virtual fields to default values on import + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + {{- if ne $.TargetVersionName "ga" }} + if err := d.Set("allow_resource_tags_on_deletion", false); err != nil { + return nil, fmt.Errorf("Error setting allow_resource_tags_on_deletion: %s", err) + } + {{- end }} + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/datasets/{{"{{"}}dataset_id{{"}}"}}/tables/{{"{{"}}table_id{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl new file mode 100644 index 000000000000..30b7e6c34804 --- /dev/null +++ b/mmv1/third_party/terraform/services/bigquery/go/resource_bigquery_table_test.go.tmpl @@ -0,0 +1,4261 @@ +package bigquery_test + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccBigQueryTable_Basic(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "DAY"), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_DropColumns(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableTimePartitioningDropColumns(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableTimePartitioningDropColumnsUpdate(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_Kms(t *testing.T) { + t.Parallel() + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKey(t) + cryptoKeyName := kms.CryptoKey.Name + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableKms(cryptoKeyName, datasetID, tableID), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_HourlyTimePartitioning(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "HOUR"), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_MonthlyTimePartitioning(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "MONTH"), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_YearlyTimePartitioning(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableTimePartitioning(datasetID, tableID, "YEAR"), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableUpdated(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_HivePartitioning(t *testing.T) { + t.Parallel() + bucketName := acctest.TestBucketName(t) + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableHivePartitioning(bucketName, datasetID, tableID), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_HivePartitioningCustomSchema(t *testing.T) { + t.Parallel() + bucketName := acctest.TestBucketName(t) + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableHivePartitioningCustomSchema(bucketName, datasetID, tableID), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"external_data_configuration.0.schema", "deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_AvroPartitioning(t *testing.T) { + t.Parallel() + bucketName := acctest.TestBucketName(t) + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + avroFilePath := "./test-fixtures/avro-generated.avro" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableAvroPartitioning(bucketName, avroFilePath, datasetID, tableID), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_json(t *testing.T) { + t.Parallel() + bucketName := acctest.TestBucketName(t) + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableJson(datasetID, tableID, bucketName, "UTF-8"), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"external_data_configuration.0.schema", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableJson(datasetID, tableID, bucketName, "UTF-16BE"), + }, + }, + }) +} + +func TestAccBigQueryTable_RangePartitioning(t *testing.T) { + t.Parallel() + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableRangePartitioning(datasetID, tableID), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_PrimaryKey(t *testing.T) { + t.Parallel() + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTablePrimaryKey(datasetID, tableID), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_ForeignKey(t *testing.T) { + t.Parallel() + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID_pk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID_fk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableForeignKeys(projectID, datasetID, tableID_pk, tableID_fk), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_updateTableConstraints(t *testing.T) { + t.Parallel() + resourceName := "google_bigquery_table.test" + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID_pk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID_fk := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableForeignKeys(projectID, datasetID, tableID_pk, tableID_fk), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableTableConstraintsUpdate(projectID, datasetID, tableID_pk, tableID_fk), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_View(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithView(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_updateView(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithView(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableWithNewSqlView(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_WithViewAndSchema(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithViewAndSchema(datasetID, tableID, "table description1"), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableWithViewAndSchema(datasetID, tableID, "table description2"), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Basic(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) + queryNew := strings.ReplaceAll(query, "2019", "2020") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, queryNew), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_MaterializedView_DailyTimePartioning_Update(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) + + enable_refresh := "false" + refresh_interval_ms := "3600000" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning_basic(datasetID, tableID, materialized_viewID, query), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableWithMatViewDailyTimePartitioning(datasetID, tableID, materialized_viewID, enable_refresh, refresh_interval_ms, query), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_MaterializedView_NonIncremental_basic(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + materialized_viewID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + query := fmt.Sprintf("SELECT count(some_string) as count, some_int, ts FROM `%s.%s` WHERE DATE(ts) = '2019-01-01' GROUP BY some_int, ts", datasetID, tableID) + maxStaleness := "0-0 0 10:0:0" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithMatViewNonIncremental_basic(datasetID, tableID, materialized_viewID, query, maxStaleness), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "require_partition_filter", "time_partitioning.0.require_partition_filter"}, + }, + { + ResourceName: "google_bigquery_table.mv_test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "require_partition_filter", "time_partitioning.0.require_partition_filter"}, + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_parquet(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSParquet(datasetID, tableID, bucketName, objectName), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_parquetOptions(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSParquetOptions(datasetID, tableID, bucketName, objectName, true, true), + }, + { + Config: testAccBigQueryTableFromGCSParquetOptions(datasetID, tableID, bucketName, objectName, false, false), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_iceberg(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSIceberg(datasetID, tableID, bucketName), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_parquetFileSetSpecType(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + parquetFileName := "test.parquet" + manifestName := fmt.Sprintf("tf_test_%s.manifest.json", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSParquetManifest(datasetID, tableID, bucketName, manifestName, parquetFileName), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_queryAcceleration(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.gz.parquet", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + metadataCacheMode := "AUTOMATIC" + // including an optional field. Should work without specifiying. + // Has to follow google sql IntervalValue encoding + maxStaleness := "0-0 0 10:0:0" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSParquetWithQueryAcceleration(connectionID, datasetID, tableID, bucketName, objectName, metadataCacheMode, maxStaleness), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_objectTable(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + // including an optional field. Should work without specifiying. + // Has to follow google sql IntervalValue encoding + maxStaleness := "0-0 0 10:0:0" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), + }, + { + Config: testAccBigQueryTableFromGCSObjectTableMetadata(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), + }, + { + Config: testAccBigQueryTableFromGCSObjectTable(connectionID, datasetID, tableID, bucketName, objectName, maxStaleness), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseNameReference(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "US" + connection_id_reference := "google_bigquery_connection.test.name" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "US" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsCentral1LowerCase(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "us-central1" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_UsEast1(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "US-EAST1" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_connectionIdDiff_UseIdReference_EuropeWest8(t *testing.T) { + t.Parallel() + // Setup + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + // Feature Under Test. + location := "EUROPE-WEST8" + connection_id_reference := "google_bigquery_connection.test.id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableExternalDataConfigurationConnectionID(location, connectionID, datasetID, tableID, bucketName, objectName, connection_id_reference), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_CSV(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCS(datasetID, tableID, bucketName, objectName, TEST_CSV, "CSV", "\\\""), + Check: testAccCheckBigQueryExtData(t, "\""), + }, + { + Config: testAccBigQueryTableFromGCS(datasetID, tableID, bucketName, objectName, TEST_CSV, "CSV", ""), + Check: testAccCheckBigQueryExtData(t, ""), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_CSV_WithSchema_InvalidSchemas(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_NOT_JSON), + ExpectError: regexp.MustCompile("contains an invalid JSON"), + }, + { + Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_NOT_JSON_LIST), + ExpectError: regexp.MustCompile("\"schema\" is not a JSON array"), + }, + { + Config: testAccBigQueryTableFromGCSWithExternalDataConfigSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_INVALID_SCHEMA_JSON_LIST_WITH_NULL_ELEMENT), + ExpectError: regexp.MustCompile("\"schema\" contains a nil element"), + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_CSV_WithSchemaAndConnectionID_UpdateNoConnectionID(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateToConnectionID(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + connectionID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableFromGCSWithSchemaWithConnectionId2(datasetID, tableID, connectionID, projectID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryExternalDataTable_CSV_WithSchema_UpdateAllowQuotedNewlines(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + objectName := fmt.Sprintf("tf_test_%s.csv", acctest.RandString(t, 10)) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromGCSWithSchema(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + { + Config: testAccBigQueryTableFromGCSWithSchema_UpdatAllowQuotedNewlines(datasetID, tableID, bucketName, objectName, TEST_SIMPLE_CSV, TEST_SIMPLE_CSV_SCHEMA), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryDataTable_bigtable(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 8), + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromBigtable(context), + }, + { + ResourceName: "google_bigquery_table.table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryDataTable_bigtable_options(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 8), + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromBigtableOptions(context), + }, + { + ResourceName: "google_bigquery_table.table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableFromBigtable(context), + }, + }, + }) +} + +func TestAccBigQueryDataTable_sheet(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableFromSheet(context), + }, + { + ResourceName: "google_bigquery_table.table", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryDataTable_jsonEquivalency(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable_jsonEq(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryTable_jsonEqModeRemoved(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryDataTable_canReorderParameters(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + // we don't run any checks because the resource will error out if + // it attempts to destroy/tear down. + Config: testAccBigQueryTable_jsonPreventDestroy(datasetID, tableID), + }, + { + Config: testAccBigQueryTable_jsonPreventDestroyOrderChanged(datasetID, tableID), + }, + { + Config: testAccBigQueryTable_jsonEq(datasetID, tableID), + }, + }, + }) +} + +func TestAccBigQueryDataTable_expandArray(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable_arrayInitial(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryTable_arrayExpanded(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "last_modified_time", "deletion_protection", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccBigQueryTable_allowDestroy(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable_noAllowDestroy(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "labels", "terraform_labels"}, + }, + { + Config: testAccBigQueryTable_noAllowDestroy(datasetID, tableID), + Destroy: true, + ExpectError: regexp.MustCompile("deletion_protection"), + }, + { + Config: testAccBigQueryTable_noAllowDestroyUpdated(datasetID, tableID), + }, + }, + }) +} + +func TestAccBigQueryTable_emptySchema(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTable_mimicCreateFromConsole(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTable_emptySchema(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_Update_SchemaWithoutPolicyTagsToWithPolicyTags(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableBasicSchema(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToNoPolicyTag(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableBasicSchema(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToEmptyPolicyTag(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableBasicSchemaWithEmptyPolicyTags(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_Update_SchemaWithPolicyTagsToEmptyPolicyTagNames(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableBasicSchemaWithPolicyTags(datasetID, tableID, projectID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccBigQueryTableBasicSchemaWithEmptyPolicyTagNames(datasetID, tableID), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_invalidSchemas(t *testing.T) { + t.Parallel() + // Pending VCR support in https://github.com/hashicorp/terraform-provider-google/issues/15427. + acctest.SkipIfVcr(t) + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_NOT_JSON), + ExpectError: regexp.MustCompile("contains an invalid JSON"), + }, + { + Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_NOT_JSON_LIST), + ExpectError: regexp.MustCompile("\"schema\" is not a JSON array"), + }, + { + Config: testAccBigQueryTableWithSchema(datasetID, tableID, TEST_INVALID_SCHEMA_JSON_LIST_WITH_NULL_ELEMENT), + ExpectError: regexp.MustCompile("\"schema\" contains a nil element"), + }, + }, + }) +} + +func TestAccBigQueryTable_TableReplicationInfo_ConflictsWithView(t *testing.T) { + t.Parallel() + + datasetID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + tableID := fmt.Sprintf("tf_test_%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithReplicationInfoAndView(datasetID, tableID), + ExpectError: regexp.MustCompile("Schema, view, or materialized view cannot be specified when table replication info is present"), + }, + }, + }) +} + +func TestAccBigQueryTable_TableReplicationInfo_WithoutReplicationInterval(t *testing.T) { + t.Parallel() + + projectID := envvar.GetTestProjectFromEnv() + + sourceDatasetID := fmt.Sprintf("tf_test_source_dataset_%s", acctest.RandString(t, 10)) + sourceTableID := fmt.Sprintf("tf_test_source_table_%s", acctest.RandString(t, 10)) + sourceMVID := fmt.Sprintf("tf_test_source_mv_%s", acctest.RandString(t, 10)) + replicaDatasetID := fmt.Sprintf("tf_test_replica_dataset_%s", acctest.RandString(t, 10)) + replicaMVID := fmt.Sprintf("tf_test_replica_mv_%s", acctest.RandString(t, 10)) + sourceMVJobID := fmt.Sprintf("tf_test_create_source_mv_job_%s", acctest.RandString(t, 10)) + dropMVJobID := fmt.Sprintf("tf_test_drop_source_mv_job_%s", acctest.RandString(t, 10)) + replicationIntervalExpr := "" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithReplicationInfo(projectID, sourceDatasetID, sourceTableID, sourceMVID, replicaDatasetID, replicaMVID, sourceMVJobID, dropMVJobID, replicationIntervalExpr), + }, + { + ResourceName: "google_bigquery_table.replica_mv", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccBigQueryTable_TableReplicationInfo_WithReplicationInterval(t *testing.T) { + t.Parallel() + + projectID := envvar.GetTestProjectFromEnv() + + sourceDatasetID := fmt.Sprintf("tf_test_source_dataset_%s", acctest.RandString(t, 10)) + sourceTableID := fmt.Sprintf("tf_test_source_table_%s", acctest.RandString(t, 10)) + sourceMVID := fmt.Sprintf("tf_test_source_mv_%s", acctest.RandString(t, 10)) + replicaDatasetID := fmt.Sprintf("tf_test_replica_dataset_%s", acctest.RandString(t, 10)) + replicaMVID := fmt.Sprintf("tf_test_replica_mv_%s", acctest.RandString(t, 10)) + sourceMVJobID := fmt.Sprintf("tf_test_create_source_mv_job_%s", acctest.RandString(t, 10)) + dropMVJobID := fmt.Sprintf("tf_test_drop_source_mv_job_%s", acctest.RandString(t, 10)) + replicationIntervalExpr := "replication_interval_ms = 600000" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithReplicationInfo(projectID, sourceDatasetID, sourceTableID, sourceMVID, replicaDatasetID, replicaMVID, sourceMVJobID, dropMVJobID, replicationIntervalExpr), + }, + { + ResourceName: "google_bigquery_table.replica_mv", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccBigQueryTable_ResourceTags(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "dataset_id": fmt.Sprintf("tf_test_dataset_%s", acctest.RandString(t, 10)), + "table_id" : fmt.Sprintf("tf_test_table_%s", acctest.RandString(t, 10)), + "tag_key_name1": fmt.Sprintf("tf_test_tag_key1_%s", acctest.RandString(t, 10)), + "tag_value_name1": fmt.Sprintf("tf_test_tag_value1_%s", acctest.RandString(t, 10)), + "tag_key_name2": fmt.Sprintf("tf_test_tag_key2_%s", acctest.RandString(t, 10)), + "tag_value_name2": fmt.Sprintf("tf_test_tag_value2_%s", acctest.RandString(t, 10)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckBigQueryTableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigQueryTableWithResourceTags(context), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, + }, + { + Config: testAccBigQueryTableWithResourceTagsUpdate(context), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, + }, + // testAccBigQueryTableWithResourceTagsDestroy must be called at the end of this test to clear the resource tag bindings of the table before deletion. + { + Config: testAccBigQueryTableWithResourceTagsDestroy(context), + }, + { + ResourceName: "google_bigquery_table.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection", "allow_resource_tags_on_deletion"}, + }, + }, + }) +} + +{{ end }} +func testAccCheckBigQueryExtData(t *testing.T, expectedQuoteChar string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_table" { + continue + } + + config := acctest.GoogleProviderConfig(t) + dataset := rs.Primary.Attributes["dataset_id"] + table := rs.Primary.Attributes["table_id"] + res, err := config.NewBigQueryClient(config.UserAgent).Tables.Get(config.Project, dataset, table).Do() + if err != nil { + return err + } + + if res.Type != "EXTERNAL" { + return fmt.Errorf("Table \"%s.%s\" is of type \"%s\", expected EXTERNAL.", dataset, table, res.Type) + } + edc := res.ExternalDataConfiguration + cvsOpts := edc.CsvOptions + if cvsOpts == nil || *cvsOpts.Quote != expectedQuoteChar { + return fmt.Errorf("Table \"%s.%s\" quote should be '%s' but was '%s'", dataset, table, expectedQuoteChar, *cvsOpts.Quote) + } + } + return nil + } +} + +func testAccCheckBigQueryTableDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_bigquery_table" { + continue + } + + config := acctest.GoogleProviderConfig(t) + _, err := config.NewBigQueryClient(config.UserAgent).Tables.Get(config.Project, rs.Primary.Attributes["dataset_id"], rs.Primary.Attributes["table_id"]).Do() + if err == nil { + return fmt.Errorf("Table still present") + } + } + + return nil + } +} + +func testAccBigQueryTableBasicSchema(datasetID, tableID string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "test" { + dataset_id = "%s" +} + +resource "google_bigquery_table" "test" { + deletion_protection = false + table_id = "%s" + dataset_id = google_bigquery_dataset.test.dataset_id + + schema = < %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + +func TestAccBinaryAuthorizationAttestor_basic(t *testing.T) { + t.Parallel() + + name := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBinaryAuthorizationAttestorDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBinaryAuthorizationAttestorBasic(name), + }, + { + ResourceName: "google_binary_authorization_attestor.attestor", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBinaryAuthorizationAttestor_full(t *testing.T) { + t.Parallel() + + name := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBinaryAuthorizationAttestorDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBinaryAuthorizationAttestorFull(name), + }, + { + ResourceName: "google_binary_authorization_attestor.attestor", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBinaryAuthorizationAttestor_kms(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKeyWithPurpose(t, "ASYMMETRIC_SIGN") + attestorName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBinaryAuthorizationAttestorDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBinaryAuthorizationAttestorKms(attestorName, kms.CryptoKey.Name), + }, + { + ResourceName: "google_binary_authorization_attestor.attestor", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccBinaryAuthorizationAttestor_update(t *testing.T) { + t.Parallel() + + name := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBinaryAuthorizationAttestorDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBinaryAuthorizationAttestorBasic(name), + }, + { + ResourceName: "google_binary_authorization_attestor.attestor", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBinaryAuthorizationAttestorFull(name), + }, + { + ResourceName: "google_binary_authorization_attestor.attestor", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccBinaryAuthorizationAttestorBasic(name), + }, + { + ResourceName: "google_binary_authorization_attestor.attestor", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccBinaryAuthorizationAttestorBasic(name string) string { + return fmt.Sprintf(` +resource "google_container_analysis_note" "note" { + name = "tf-test-%s" + attestation_authority { + hint { + human_readable_name = "My Attestor" + } + } +} + +resource "google_binary_authorization_attestor" "attestor" { + name = "tf-test-%s" + attestation_authority_note { + note_reference = google_container_analysis_note.note.name + } +} +`, name, name) +} + +func testAccBinaryAuthorizationAttestorFull(name string) string { + return fmt.Sprintf(` +resource "google_container_analysis_note" "note" { + name = "tf-test-%s" + attestation_authority { + hint { + human_readable_name = "My Attestor" + } + } +} + +resource "google_binary_authorization_attestor" "attestor" { + name = "tf-test-%s" + description = "my description" + attestation_authority_note { + note_reference = google_container_analysis_note.note.name + public_keys { + ascii_armored_pgp_public_key = < +<%# load bearing - the erb compiler will interpret the %s as ruby otherwise -%> <%= "%s" %> EOF diff --git a/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all.go.tmpl b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all.go.tmpl new file mode 100644 index 000000000000..03de0caaf356 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all.go.tmpl @@ -0,0 +1,212 @@ +package cloudasset + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudAssetResourcesSearchAll() *schema.Resource { + return &schema.Resource{ + Read: datasourceGoogleCloudAssetResourcesSearchAllRead, + Schema: map[string]*schema.Schema{ + "scope": { + Type: schema.TypeString, + Required: true, + }, + "query": { + Type: schema.TypeString, + Optional: true, + }, + "asset_types": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + }, + "results": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "asset_type": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Computed: true, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "additional_attributes": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "location": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network_tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + } +} + +func datasourceGoogleCloudAssetResourcesSearchAllRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + params := make(map[string]string) + results := make([]map[string]interface{}, 0) + + scope := d.Get("scope").(string) + query := d.Get("query").(string) + assetTypes := d.Get("asset_types").([]interface{}) + + url := fmt.Sprintf("https://cloudasset.googleapis.com/v1p1beta1/%s/resources:searchAll", scope) + params["query"] = query + + url, err = transport_tpg.AddArrayQueryParams(url, "asset_types", assetTypes) + if err != nil { + return fmt.Errorf("Error setting asset_types: %s", err) + } + + for { + url, err := transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + var project string + if config.UserProjectOverride && config.BillingProject != "" { + project = config.BillingProject + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Project: project, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error searching resources: %s", err) + } + + pageResults := vflattenDatasourceGoogleCloudAssetResourcesSearchAllList(res["results"]) + results = append(results, pageResults...) + + pToken, ok := res["nextPageToken"] + if ok && pToken != nil && pToken.(string) != "" { + params["pageToken"] = pToken.(string) + } else { + break + } + } + + if err := d.Set("results", results); err != nil { + return fmt.Errorf("Error searching resources: %s", err) + } + + if err := d.Set("query", query); err != nil { + return fmt.Errorf("Error setting query: %s", err) + } + + if err := d.Set("asset_types", assetTypes); err != nil { + return fmt.Errorf("Error setting asset_types: %s", err) + } + + d.SetId(scope) + + return nil +} + +func vflattenDatasourceGoogleCloudAssetResourcesSearchAllList(v interface{}) []map[string]interface{} { + if v == nil { + return make([]map[string]interface{}, 0) + } + + ls := v.([]interface{}) + results := make([]map[string]interface{}, 0, len(ls)) + for _, raw := range ls { + p := raw.(map[string]interface{}) + + var mName, mAssetType, mProject, mDisplayName, mDescription, mAdditionalAttributes, mLocation, mLabels, mNetworkTags interface{} + if pName, ok := p["name"]; ok { + mName = pName + } + if pAssetType, ok := p["assetType"]; ok { + mAssetType = pAssetType + } + if pProject, ok := p["project"]; ok { + mProject = pProject + } + if pDisplayName, ok := p["displayName"]; ok { + mDisplayName = pDisplayName + } + if pDescription, ok := p["description"]; ok { + mDescription = pDescription + } + if pAdditionalAttributes, ok := p["additionalAttributes"]; ok { + mAdditionalAttributes = pAdditionalAttributes + } + if pLocation, ok := p["location"]; ok { + mLocation = pLocation + } + if pLabels, ok := p["labels"]; ok { + mLabels = pLabels + } + if pNetworkTags, ok := p["networkTags"]; ok { + mNetworkTags = pNetworkTags + } + results = append(results, map[string]interface{}{ + "name": mName, + "asset_type": mAssetType, + "project": mProject, + "display_name": mDisplayName, + "description": mDescription, + "additional_attributes": mAdditionalAttributes, + "location": mLocation, + "labels": mLabels, + "network_tags": mNetworkTags, + }) + } + + return results +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all_test.go.tmpl b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all_test.go.tmpl new file mode 100644 index 000000000000..a5a66df751c8 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_resources_search_all_test.go.tmpl @@ -0,0 +1,51 @@ +package cloudasset_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceGoogleCloudAssetResourcesSearchAll_basic(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleCloudAssetProjectResources(project), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_cloud_asset_resources_search_all.resources", + "results.0.asset_type", regexp.MustCompile("cloudresourcemanager.googleapis.com/Project")), + resource.TestMatchResourceAttr("data.google_cloud_asset_resources_search_all.resources", + "results.0.display_name", regexp.MustCompile(project)), + resource.TestMatchResourceAttr("data.google_cloud_asset_resources_search_all.resources", + "results.0.name", regexp.MustCompile(fmt.Sprintf("//cloudresourcemanager.googleapis.com/projects/%s", project))), + resource.TestCheckResourceAttrSet("data.google_cloud_asset_resources_search_all.resources", "results.0.location"), + resource.TestCheckResourceAttrSet("data.google_cloud_asset_resources_search_all.resources", "results.0.project"), + ), + }, + }, + }) +} + +func testAccCheckGoogleCloudAssetProjectResources(project string) string { + return fmt.Sprintf(` +data google_cloud_asset_resources_search_all resources { + scope = "projects/%s" + asset_types = [ + "cloudresourcemanager.googleapis.com/Project" + ] +} +`, project) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources.go b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources.go new file mode 100644 index 000000000000..79a2b86d5a0f --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources.go @@ -0,0 +1,269 @@ +package cloudasset + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleCloudAssetSearchAllResources() *schema.Resource { + return &schema.Resource{ + Read: datasourceGoogleCloudAssetSearchAllResourcesRead, + Schema: map[string]*schema.Schema{ + "scope": { + Type: schema.TypeString, + Required: true, + }, + "query": { + Type: schema.TypeString, + Optional: true, + }, + "asset_types": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + }, + "results": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + "asset_type": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Computed: true, + }, + "folders": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "organization": { + Type: schema.TypeString, + Computed: true, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "location": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network_tags": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "kms_keys": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + }, + "state": { + Type: schema.TypeString, + Computed: true, + }, + "parent_full_resource_name": { + Type: schema.TypeString, + Computed: true, + }, + "parent_asset_type": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func datasourceGoogleCloudAssetSearchAllResourcesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + params := make(map[string]string) + results := make([]map[string]interface{}, 0) + + scope := d.Get("scope").(string) + query := d.Get("query").(string) + assetTypes := d.Get("asset_types").([]interface{}) + + url := fmt.Sprintf("https://cloudasset.googleapis.com/v1/%s:searchAllResources", scope) + params["query"] = query + + url, err = transport_tpg.AddArrayQueryParams(url, "asset_types", assetTypes) + if err != nil { + return fmt.Errorf("Error setting asset_types: %s", err) + } + + for { + url, err := transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + var project string + if config.UserProjectOverride && config.BillingProject != "" { + project = config.BillingProject + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Project: project, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("Error searching resources: %s", err) + } + + pageResults := flattenDatasourceGoogleCloudAssetSearchAllResources(res["results"]) + results = append(results, pageResults...) + + pToken, ok := res["nextPageToken"] + if ok && pToken != nil && pToken.(string) != "" { + params["pageToken"] = pToken.(string) + } else { + break + } + } + + if err := d.Set("results", results); err != nil { + return fmt.Errorf("Error searching resources: %s", err) + } + + if err := d.Set("query", query); err != nil { + return fmt.Errorf("Error setting query: %s", err) + } + + if err := d.Set("asset_types", assetTypes); err != nil { + return fmt.Errorf("Error setting asset_types: %s", err) + } + + d.SetId(scope) + + return nil +} + +func flattenDatasourceGoogleCloudAssetSearchAllResources(v interface{}) []map[string]interface{} { + if v == nil { + return make([]map[string]interface{}, 0) + } + + ls := v.([]interface{}) + results := make([]map[string]interface{}, 0, len(ls)) + for _, raw := range ls { + p := raw.(map[string]interface{}) + + var mName, mAssetType, mProject, mFolders, mOrganization, mDisplayName, mDescription, mLocation, mLabels, mNetworkTags, mKmsKeys, mCreateTime, mUpdateTime, mState, mParentFullResourceName, mParentAssetType interface{} + if pName, ok := p["name"]; ok { + mName = pName + } + if pAssetType, ok := p["assetType"]; ok { + mAssetType = pAssetType + } + if pProject, ok := p["project"]; ok { + mProject = pProject + } + if pFolders, ok := p["folders"]; ok { + mFolders = pFolders + } + if pOrganization, ok := p["organization"]; ok { + mOrganization = pOrganization + } + if pDisplayName, ok := p["displayName"]; ok { + mDisplayName = pDisplayName + } + if pDescription, ok := p["description"]; ok { + mDescription = pDescription + } + if pLocation, ok := p["location"]; ok { + mLocation = pLocation + } + if pLabels, ok := p["labels"]; ok { + mLabels = pLabels + } + if pNetworkTags, ok := p["networkTags"]; ok { + mNetworkTags = pNetworkTags + } + if pKmsKeys, ok := p["kmsKeys"]; ok { + mKmsKeys = pKmsKeys + } + if pCreateTime, ok := p["createTime"]; ok { + mCreateTime = pCreateTime + } + if pUpdateTime, ok := p["updateTime"]; ok { + mUpdateTime = pUpdateTime + } + if pState, ok := p["state"]; ok { + mState = pState + } + if pParentFullResourceName, ok := p["parentFullResourceName"]; ok { + mParentFullResourceName = pParentFullResourceName + } + if pParentAssetType, ok := p["parentAssetType"]; ok { + mParentAssetType = pParentAssetType + } + results = append(results, map[string]interface{}{ + "name": mName, + "asset_type": mAssetType, + "project": mProject, + "folders": mFolders, + "organization": mOrganization, + "display_name": mDisplayName, + "description": mDescription, + "location": mLocation, + "labels": mLabels, + "network_tags": mNetworkTags, + "kms_keys": mKmsKeys, + "create_time": mCreateTime, + "update_time": mUpdateTime, + "state": mState, + "parent_full_resource_name": mParentFullResourceName, + "parent_asset_type": mParentAssetType, + }) + } + + return results +} + diff --git a/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources_test.go b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources_test.go new file mode 100644 index 000000000000..fe5fe3e53e45 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudasset/go/data_source_google_cloud_asset_search_all_resources_test.go @@ -0,0 +1,49 @@ +package cloudasset_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceGoogleCloudAssetSearchAllResources_basic(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckGoogleCloudAssetProjectResourcesList(project), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("data.google_cloud_asset_search_all_resources.resources", + "results.0.asset_type", regexp.MustCompile("cloudresourcemanager.googleapis.com/Project")), + resource.TestMatchResourceAttr("data.google_cloud_asset_search_all_resources.resources", + "results.0.display_name", regexp.MustCompile(project)), + resource.TestMatchResourceAttr("data.google_cloud_asset_search_all_resources.resources", + "results.0.name", regexp.MustCompile(fmt.Sprintf("//cloudresourcemanager.googleapis.com/projects/%s", project))), + resource.TestCheckResourceAttrSet("data.google_cloud_asset_search_all_resources.resources", "results.0.location"), + resource.TestCheckResourceAttrSet("data.google_cloud_asset_search_all_resources.resources", "results.0.project"), + ), + }, + }, + }) +} + +func testAccCheckGoogleCloudAssetProjectResourcesList(project string) string { + return fmt.Sprintf(` +data google_cloud_asset_search_all_resources resources { + scope = "projects/%s" + asset_types = [ + "cloudresourcemanager.googleapis.com/Project" + ] +} +`, project) +} + diff --git a/mmv1/third_party/terraform/services/cloudbuild/go/resource_cloudbuild_worker_pool_test.go.tmpl b/mmv1/third_party/terraform/services/cloudbuild/go/resource_cloudbuild_worker_pool_test.go.tmpl new file mode 100644 index 000000000000..91678db8c888 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudbuild/go/resource_cloudbuild_worker_pool_test.go.tmpl @@ -0,0 +1,249 @@ +package cloudbuild_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccCloudbuildWorkerPool_withComputedAnnotations(t *testing.T) { + // Skip it in VCR test because of the randomness of uuid in "annotations" field + // which causes the replaying mode after recording mode failing in VCR test + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + }, + CheckDestroy: funcAccTestCloudbuildWorkerPoolCheckDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudbuildWorkerPool_updated(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + ResourceName: "google_cloudbuild_worker_pool.pool", + }, + { + Config: testAccCloudbuildWorkerPool_withComputedAnnotations(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + ResourceName: "google_cloudbuild_worker_pool.pool", + }, + }, + }) +} + +func TestAccCloudbuildWorkerPool_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: funcAccTestCloudbuildWorkerPoolCheckDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudbuildWorkerPool_basic(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ResourceName: "google_cloudbuild_worker_pool.pool", + }, + { + Config: testAccCloudbuildWorkerPool_updated(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + ResourceName: "google_cloudbuild_worker_pool.pool", + }, + { + Config: testAccCloudbuildWorkerPool_noWorkerConfig(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ResourceName: "google_cloudbuild_worker_pool.pool", + }, + }, + }) +} + +func testAccCloudbuildWorkerPool_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloudbuild_worker_pool" "pool" { + name = "pool%{random_suffix}" + location = "europe-west1" + worker_config { + disk_size_gb = 100 + machine_type = "e2-standard-8" + no_external_ip = true + } +} +`, context) +} + +func testAccCloudbuildWorkerPool_updated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloudbuild_worker_pool" "pool" { + name = "pool%{random_suffix}" + location = "europe-west1" + worker_config { + disk_size_gb = 101 + machine_type = "e2-standard-4" + no_external_ip = false + } + + annotations = { + env = "foo" + default_expiration_ms = 3600000 + } +} +`, context) +} + +func testAccCloudbuildWorkerPool_withComputedAnnotations(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "random_uuid" "test" { +} + +resource "google_cloudbuild_worker_pool" "pool" { + name = "pool%{random_suffix}" + location = "europe-west1" + worker_config { + disk_size_gb = 101 + machine_type = "e2-standard-4" + no_external_ip = false + } + + annotations = { + env = "${random_uuid.test.result}" + default_expiration_ms = 3600000 + } +} +`, context) +} + +func testAccCloudbuildWorkerPool_noWorkerConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloudbuild_worker_pool" "pool" { + name = "pool%{random_suffix}" + location = "europe-west1" +} +`, context) +} + +func TestAccCloudbuildWorkerPool_withNetwork(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "cloudbuild-workerpool-1"), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: funcAccTestCloudbuildWorkerPoolCheckDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudbuildWorkerPool_withNetwork(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ResourceName: "google_cloudbuild_worker_pool.pool", + }, + }, + }) +} + +func testAccCloudbuildWorkerPool_withNetwork(context map[string]interface{}) string { + return acctest.Nprintf(` + +data "google_compute_network" "network" { + name = "%{network_name}" +} + +resource "google_cloudbuild_worker_pool" "pool" { + name = "pool%{random_suffix}" + location = "europe-west1" + worker_config { + disk_size_gb = 101 + machine_type = "e2-standard-4" + no_external_ip = false + } + network_config { + peered_network = data.google_compute_network.network.id + peered_network_ip_range = "/29" + } +} +`, context) +} + +func funcAccTestCloudbuildWorkerPoolCheckDestroy(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_cloudbuild_worker_pool" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}CloudBuildBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/workerPools/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("CloudbuildWorkerPool still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/cloudfunctions/go/resource_cloudfunctions_function_test.go.tmpl b/mmv1/third_party/terraform/services/cloudfunctions/go/resource_cloudfunctions_function_test.go.tmpl new file mode 100644 index 000000000000..25dede0f1569 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudfunctions/go/resource_cloudfunctions_function_test.go.tmpl @@ -0,0 +1,1293 @@ +package cloudfunctions_test + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcloudfunctions "github.com/hashicorp/terraform-provider-google/google/services/cloudfunctions" + "google.golang.org/api/cloudfunctions/v1" +) + +const ( + FUNCTION_TRIGGER_HTTP = iota +) + +const testHTTPTriggerPath = "./test-fixtures/http_trigger.js" +const testHTTPTriggerUpdatePath = "./test-fixtures/http_trigger_update.js" +const testPubSubTriggerPath = "./test-fixtures/pubsub_trigger.js" +const testBucketTriggerPath = "./test-fixtures/bucket_trigger.js" +const testFirestoreTriggerPath = "./test-fixtures/firestore_trigger.js" +const testSecretEnvVarFunctionPath = "./test-fixtures/secret_environment_variables.js" +const testSecretVolumesMountFunctionPath = "./test-fixtures/secret_volumes_mount.js" + +func TestAccCloudFunctionsFunction_basic(t *testing.T) { + t.Parallel() + + var function cloudfunctions.CloudFunction + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_basic(functionName, bucketName, zipFilePath), + Check: resource.ComposeTestCheckFunc( + testAccCloudFunctionsFunctionExists( + t, funcResourceName, &function), + resource.TestCheckResourceAttr(funcResourceName, + "name", functionName), + resource.TestCheckResourceAttr(funcResourceName, + "description", "test function"), + resource.TestCheckResourceAttr(funcResourceName, + "docker_registry", "ARTIFACT_REGISTRY"), + resource.TestCheckResourceAttr(funcResourceName, + "available_memory_mb", "128"), + resource.TestCheckResourceAttr(funcResourceName, + "max_instances", "10"), + resource.TestCheckResourceAttr(funcResourceName, + "min_instances", "3"), + resource.TestCheckResourceAttr(funcResourceName, + "ingress_settings", "ALLOW_INTERNAL_ONLY"), + resource.TestCheckResourceAttr(funcResourceName, + "status", "ACTIVE"), + testAccCloudFunctionsFunctionSource(fmt.Sprintf("gs://%s/index.zip", bucketName), &function), + testAccCloudFunctionsFunctionTrigger(FUNCTION_TRIGGER_HTTP, &function), + resource.TestCheckResourceAttr(funcResourceName, + "timeout", "61"), + resource.TestCheckResourceAttr(funcResourceName, + "entry_point", "helloGET"), + resource.TestCheckResourceAttr(funcResourceName, + "trigger_http", "true"), + resource.TestCheckResourceAttr(funcResourceName, + "version_id", "1"), + testAccCloudFunctionsFunctionHasLabel("my-label", "my-label-value", &function), + testAccCloudFunctionsFunctionHasEnvironmentVariable("TEST_ENV_VARIABLE", + "test-env-variable-value", &function), + ), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_update(t *testing.T) { + t.Parallel() + + var function cloudfunctions.CloudFunction + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + zipFileUpdatePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerUpdatePath) + random_suffix := acctest.RandString(t, 10) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_basic(functionName, bucketName, zipFilePath), + Check: resource.ComposeTestCheckFunc( + testAccCloudFunctionsFunctionExists( + t, funcResourceName, &function), + resource.TestCheckResourceAttr(funcResourceName, + "available_memory_mb", "128"), + resource.TestCheckResourceAttr(funcResourceName, + "version_id", "1"), + testAccCloudFunctionsFunctionHasLabel("my-label", "my-label-value", &function), + ), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables", "labels", "terraform_labels"}, + }, + { + Config: testAccCloudFunctionsFunction_updated(functionName, bucketName, zipFileUpdatePath, random_suffix), + Check: resource.ComposeTestCheckFunc( + testAccCloudFunctionsFunctionExists( + t, funcResourceName, &function), + resource.TestCheckResourceAttr(funcResourceName, + "available_memory_mb", "256"), + resource.TestCheckResourceAttr(funcResourceName, + "description", "test function updated"), + resource.TestCheckResourceAttr(funcResourceName, + "docker_registry", "ARTIFACT_REGISTRY"), + resource.TestCheckResourceAttr(funcResourceName, + "timeout", "91"), + resource.TestCheckResourceAttr(funcResourceName, + "max_instances", "15"), + resource.TestCheckResourceAttr(funcResourceName, + "min_instances", "5"), + resource.TestCheckResourceAttr(funcResourceName, + "ingress_settings", "ALLOW_ALL"), + resource.TestCheckResourceAttr(funcResourceName, + "version_id", "2"), + testAccCloudFunctionsFunctionHasLabel("my-label", "my-updated-label-value", &function), + testAccCloudFunctionsFunctionHasLabel("a-new-label", "a-new-label-value", &function), + testAccCloudFunctionsFunctionHasEnvironmentVariable("TEST_ENV_VARIABLE", + "test-env-variable-value", &function), + testAccCloudFunctionsFunctionHasEnvironmentVariable("NEW_ENV_VARIABLE", + "new-env-variable-value", &function), + ), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_buildworkerpool(t *testing.T) { + t.Parallel() + + var function cloudfunctions.CloudFunction + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + location := "us-central1" + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + proj := envvar.GetTestProjectFromEnv() + + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_buildworkerpool(functionName, bucketName, zipFilePath, location), + Check: resource.ComposeTestCheckFunc( + testAccCloudFunctionsFunctionExists( + t, funcResourceName, &function), + resource.TestCheckResourceAttr(funcResourceName, + "name", functionName), + resource.TestCheckResourceAttr(funcResourceName, + "build_worker_pool", fmt.Sprintf("projects/%s/locations/%s/workerPools/pool-%s", proj, location, functionName)), + ), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_pubsub(t *testing.T) { + t.Parallel() + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + topicName := fmt.Sprintf("tf-test-sub-%s", acctest.RandString(t, 10)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testPubSubTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_pubsub(functionName, bucketName, + topicName, zipFilePath), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(funcResourceName, + "max_instances", "3000"), + ), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_bucket(t *testing.T) { + t.Parallel() + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testBucketTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_bucket(functionName, bucketName, zipFilePath), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + { + Config: testAccCloudFunctionsFunction_bucketNoRetry(functionName, bucketName, zipFilePath), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccCloudFunctionsFunction_dockerRepository(t *testing.T) { + t.Parallel() + funcResourceName := "google_cloudfunctions_function.function" + arRepoName := fmt.Sprintf("tf-ar-test-docker-repository-%s", acctest.RandString(t, 10)) + functionName := fmt.Sprintf("tf-ar-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-ar-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_docker_repository(arRepoName, functionName, bucketName, zipFilePath), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccCloudFunctionsFunction_cmek(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + kmsKey := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + funcResourceName := "google_cloudfunctions_function.function" + arRepoName := fmt.Sprintf("tf-cmek-test-docker-repository-%s", acctest.RandString(t, 10)) + functionName := fmt.Sprintf("tf-cmek-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-cmek-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_cmek(kmsKey.CryptoKey.Name, arRepoName, functionName, bucketName, zipFilePath), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} +{{- end }} + +func TestAccCloudFunctionsFunction_firestore(t *testing.T) { + t.Parallel() + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testFirestoreTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_firestore(functionName, bucketName, zipFilePath), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_sourceRepo(t *testing.T) { + t.Parallel() + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + proj := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_sourceRepo(functionName, proj), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_serviceAccountEmail(t *testing.T) { + t.Parallel() + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_serviceAccountEmail(functionName, bucketName, zipFilePath), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_vpcConnector(t *testing.T) { + t.Parallel() + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + networkName := fmt.Sprintf("tf-test-net-%d", acctest.RandInt(t)) + vpcConnectorName := fmt.Sprintf("tf-test-conn-%s", acctest.RandString(t, 5)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + projectNumber := os.Getenv("GOOGLE_PROJECT_NUMBER") + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_vpcConnector(projectNumber, networkName, functionName, bucketName, zipFilePath, "10.10.0.0/28", vpcConnectorName), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables", "labels", "terraform_labels"}, + }, + { + Config: testAccCloudFunctionsFunction_vpcConnector(projectNumber, networkName, functionName, bucketName, zipFilePath, "10.20.0.0/28", vpcConnectorName+"-update"), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_secretEnvVar(t *testing.T) { + t.Parallel() + + randomSecretSuffix := acctest.RandString(t, 10) + accountId := fmt.Sprintf("tf-test-account-%s", randomSecretSuffix) + secretName := fmt.Sprintf("tf-test-secret-%s", randomSecretSuffix) + versionName1 := fmt.Sprintf("tf-test-version1-%s", randomSecretSuffix) + versionName2 := fmt.Sprintf("tf-test-version2-%s", randomSecretSuffix) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + functionName := fmt.Sprintf("tf-test-%s", randomSecretSuffix) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testSecretEnvVarFunctionPath) + funcResourceName := "google_cloudfunctions_function.function" + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_secretEnvVar(secretName, versionName1, bucketName, functionName, "1", zipFilePath, accountId), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + { + Config: testAccCloudFunctionsFunction_secretEnvVar(secretName, versionName2, bucketName+"-update", functionName, "2", zipFilePath, accountId), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func TestAccCloudFunctionsFunction_secretMount(t *testing.T) { + t.Parallel() + + projectNumber := os.Getenv("GOOGLE_PROJECT_NUMBER") + randomSecretSuffix := acctest.RandString(t, 10) + accountId := fmt.Sprintf("tf-test-account-%s", randomSecretSuffix) + secretName := fmt.Sprintf("tf-test-secret-%s", randomSecretSuffix) + versionName1 := fmt.Sprintf("tf-test-version1-%s", randomSecretSuffix) + versionName2 := fmt.Sprintf("tf-test-version2-%s", randomSecretSuffix) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + functionName := fmt.Sprintf("tf-test-%s", randomSecretSuffix) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testSecretVolumesMountFunctionPath) + funcResourceName := "google_cloudfunctions_function.function" + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_secretMount(projectNumber, secretName, versionName1, bucketName, functionName, "1", zipFilePath, accountId), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + { + Config: testAccCloudFunctionsFunction_secretMount(projectNumber, secretName, versionName2, bucketName, functionName, "2", zipFilePath, accountId), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + +func testAccCheckCloudFunctionsFunctionDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_cloudfunctions_function" { + continue + } + + name := rs.Primary.Attributes["name"] + project := rs.Primary.Attributes["project"] + region := rs.Primary.Attributes["region"] + cloudFuncId := &tpgcloudfunctions.CloudFunctionId{ + Project: project, + Region: region, + Name: name, + } + _, err := config.NewCloudFunctionsClient(config.UserAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() + if err == nil { + return fmt.Errorf("Function still exists") + } + + } + + return nil + } +} + +func testAccCloudFunctionsFunctionExists(t *testing.T, n string, function *cloudfunctions.CloudFunction) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := acctest.GoogleProviderConfig(t) + name := rs.Primary.Attributes["name"] + project := rs.Primary.Attributes["project"] + region := rs.Primary.Attributes["region"] + cloudFuncId := &tpgcloudfunctions.CloudFunctionId{ + Project: project, + Region: region, + Name: name, + } + found, err := config.NewCloudFunctionsClient(config.UserAgent).Projects.Locations.Functions.Get(cloudFuncId.CloudFunctionId()).Do() + if err != nil { + return fmt.Errorf("CloudFunctions Function not present") + } + + *function = *found + + return nil + } +} + +func testAccCloudFunctionsFunctionSource(n string, function *cloudfunctions.CloudFunction) resource.TestCheckFunc { + return func(s *terraform.State) error { + if n != function.SourceArchiveUrl { + return fmt.Errorf("Expected source to be %v, got %v", n, function.EntryPoint) + } + return nil + } +} + +func testAccCloudFunctionsFunctionTrigger(n int, function *cloudfunctions.CloudFunction) resource.TestCheckFunc { + return func(s *terraform.State) error { + switch n { + case FUNCTION_TRIGGER_HTTP: + if function.HttpsTrigger == nil { + return fmt.Errorf("Expected HttpsTrigger to be set") + } + default: + return fmt.Errorf("testAccCloudFunctionsFunctionTrigger expects only FUNCTION_TRIGGER_HTTP, ") + } + return nil + } +} + +func testAccCloudFunctionsFunctionHasLabel(key, value string, + function *cloudfunctions.CloudFunction) resource.TestCheckFunc { + return func(s *terraform.State) error { + val, ok := function.Labels[key] + if !ok { + return fmt.Errorf("Label with key %s not found", key) + } + + if val != value { + return fmt.Errorf("Label value did not match for key %s: expected %s but found %s", key, value, val) + } + return nil + } +} + +func testAccCloudFunctionsFunctionHasEnvironmentVariable(key, value string, + function *cloudfunctions.CloudFunction) resource.TestCheckFunc { + return func(s *terraform.State) error { + if val, ok := function.EnvironmentVariables[key]; ok { + if val != value { + return fmt.Errorf("Environment Variable value did not match for key %s: expected %s but found %s", + key, value, val) + } + } else { + return fmt.Errorf("Environment Variable with key %s not found", key) + } + return nil + } +} + +func testAccCloudFunctionsFunction_basic(functionName string, bucketName string, zipFilePath string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + description = "test function" + docker_registry = "ARTIFACT_REGISTRY" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + trigger_http = true + timeout = 61 + entry_point = "helloGET" + ingress_settings = "ALLOW_INTERNAL_ONLY" + labels = { + my-label = "my-label-value" + } + environment_variables = { + TEST_ENV_VARIABLE = "test-env-variable-value" + } + build_environment_variables = { + TEST_ENV_VARIABLE = "test-build-env-variable-value" + } + max_instances = 10 + min_instances = 3 +} +`, bucketName, zipFilePath, functionName) +} + +func testAccCloudFunctionsFunction_updated(functionName string, bucketName string, zipFilePath string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index_update.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + description = "test function updated" + docker_registry = "ARTIFACT_REGISTRY" + docker_repository = google_artifact_registry_repository.my-repo.id + available_memory_mb = 256 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + trigger_http = true + https_trigger_security_level = "SECURE_ALWAYS" + runtime = "nodejs10" + timeout = 91 + entry_point = "helloGET" + ingress_settings = "ALLOW_ALL" + labels = { + my-label = "my-updated-label-value" + a-new-label = "a-new-label-value" + } + environment_variables = { + TEST_ENV_VARIABLE = "test-env-variable-value" + NEW_ENV_VARIABLE = "new-env-variable-value" + } + build_environment_variables = { + TEST_ENV_VARIABLE = "test-build-env-variable-value" + NEW_ENV_VARIABLE = "new-build-env-variable-value" + } + max_instances = 15 + min_instances = 5 + region = "us-central1" +} + +resource "google_artifact_registry_repository" "my-repo" { + location = "us-central1" + repository_id = "tf-test-my-repository%s" + description = "example docker repository with cmek" + format = "DOCKER" +} +`, bucketName, zipFilePath, functionName, randomSuffix) +} + +func testAccCloudFunctionsFunction_buildworkerpool(functionName string, bucketName string, zipFilePath string, location string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudbuild_worker_pool" "pool" { + name = "pool-%[3]s" + location = "%s" + worker_config { + disk_size_gb = 100 + machine_type = "e2-standard-4" + no_external_ip = false + } +} + +resource "google_cloudfunctions_function" "function" { + name = "%[3]s" + runtime = "nodejs10" + description = "test function" + docker_registry = "ARTIFACT_REGISTRY" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + trigger_http = true + timeout = 61 + entry_point = "helloGET" + build_worker_pool = google_cloudbuild_worker_pool.pool.id +}`, bucketName, zipFilePath, functionName, location) +} + +func testAccCloudFunctionsFunction_pubsub(functionName string, bucketName string, + topic string, zipFilePath string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_pubsub_topic" "sub" { + name = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + timeout = 61 + entry_point = "helloPubSub" + event_trigger { + event_type = "providers/cloud.pubsub/eventTypes/topic.publish" + resource = google_pubsub_topic.sub.name + failure_policy { + retry = false + } + } +} +`, bucketName, zipFilePath, topic, functionName) +} + +func testAccCloudFunctionsFunction_bucket(functionName string, bucketName string, + zipFilePath string) string { + return fmt.Sprintf(` +data "google_client_config" "current" { +} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + timeout = 61 + entry_point = "helloGCS" + event_trigger { + event_type = "google.storage.object.finalize" + resource = "projects/${data.google_client_config.current.project}/buckets/${google_storage_bucket.bucket.name}" + failure_policy { + retry = true + } + } +} +`, bucketName, zipFilePath, functionName) +} + +func testAccCloudFunctionsFunction_bucketNoRetry(functionName string, bucketName string, + zipFilePath string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + timeout = 61 + entry_point = "helloGCS" + event_trigger { + event_type = "google.storage.object.finalize" + resource = google_storage_bucket.bucket.name + } +} +`, bucketName, zipFilePath, functionName) +} + +func testAccCloudFunctionsFunction_firestore(functionName string, bucketName string, + zipFilePath string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + timeout = 61 + entry_point = "helloFirestore" + event_trigger { + event_type = "providers/cloud.firestore/eventTypes/document.write" + resource = "messages/{messageId}" + } +} +`, bucketName, zipFilePath, functionName) +} + +func testAccCloudFunctionsFunction_sourceRepo(functionName, project string) string { + return fmt.Sprintf(` +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + + source_repository { + // There isn't yet an API that'll allow us to create a source repository and + // put code in it, so we created this repository outside the test to be used + // here. If this test is run outside of CI, you may need to create your own + // source repo. + url = "https://source.developers.google.com/projects/%s/repos/cloudfunctions-test-do-not-delete/moveable-aliases/master/paths/" + } + + trigger_http = true + entry_point = "helloGET" +} +`, functionName, project) +} + +func testAccCloudFunctionsFunction_serviceAccountEmail(functionName, bucketName, zipFilePath string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +data "google_compute_default_service_account" "default" { +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + + service_account_email = data.google_compute_default_service_account.default.email + + trigger_http = true + entry_point = "helloGET" +} +`, bucketName, zipFilePath, functionName) +} + +func testAccCloudFunctionsFunction_vpcConnector(projectNumber, networkName, functionName, bucketName, zipFilePath, vpcIp, vpcConnectorName string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_project_iam_member" "gcfadmin" { + project = data.google_project.project.project_id + role = "roles/editor" + member = "serviceAccount:service-%s@gcf-admin-robot.iam.gserviceaccount.com" +} + +resource "google_compute_network" "vpc" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_vpc_access_connector" "%s" { + name = "%s" + region = "us-central1" + ip_cidr_range = "%s" + network = google_compute_network.vpc.name +} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs10" + + description = "test function" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + trigger_http = true + timeout = 61 + entry_point = "helloGET" + labels = { + my-label = "my-label-value" + } + environment_variables = { + TEST_ENV_VARIABLE = "test-env-variable-value" + } + max_instances = 10 + min_instances = 3 + vpc_connector = google_vpc_access_connector.%s.self_link + vpc_connector_egress_settings = "PRIVATE_RANGES_ONLY" + + depends_on = [google_project_iam_member.gcfadmin] +} +`, projectNumber, networkName, vpcConnectorName, vpcConnectorName, vpcIp, bucketName, zipFilePath, functionName, vpcConnectorName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCloudFunctionsFunction_docker_repository(arRepoName, functionName, bucketName, zipFilePath string) string { + return fmt.Sprintf(` +data "google_project" "project" { +} + +resource "google_artifact_registry_repository" "test-ar-repo" { + repository_id = "%s" + location = "us-central1" + format = "DOCKER" +} + +resource "google_artifact_registry_repository_iam_binding" "binding" { + location = google_artifact_registry_repository.test-ar-repo.location + repository = google_artifact_registry_repository.test-ar-repo.name + role = "roles/artifactregistry.admin" + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcf-admin-robot.iam.gserviceaccount.com", + ] +} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + description = "Function deployed to customer-provided Artifact Registry" + runtime = "nodejs10" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + docker_repository = google_artifact_registry_repository.test-ar-repo.id + trigger_http = true + timeout = 61 + entry_point = "helloGET" +} +`, arRepoName, bucketName, zipFilePath, functionName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCloudFunctionsFunction_cmek(kmsKey, arRepoName, functionName, bucketName, zipFilePath string) string { + return fmt.Sprintf(` +data "google_project" "project" { +} + +resource "google_artifact_registry_repository" "unencoded-ar-repo" { + repository_id = "unencoded-ar-repo-to-generate-p4sa" + location = "us-central1" + format = "DOCKER" +} + +resource "google_artifact_registry_repository_iam_binding" "binding" { + location = google_artifact_registry_repository.encoded-ar-repo.location + repository = google_artifact_registry_repository.encoded-ar-repo.name + role = "roles/artifactregistry.admin" + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcf-admin-robot.iam.gserviceaccount.com", + ] +} + +resource "google_kms_crypto_key_iam_member" "gcf_cmek_keyuser_1" { + crypto_key_id = "%s" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.project.number}@gcf-admin-robot.iam.gserviceaccount.com" +} + +resource "google_kms_crypto_key_iam_member" "gcf_cmek_keyuser_2" { + crypto_key_id = "%s" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-artifactregistry.iam.gserviceaccount.com" +} + +resource "google_kms_crypto_key_iam_member" "gcf_cmek_keyuser_3" { + crypto_key_id = "%s" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.project.number}@gs-project-accounts.iam.gserviceaccount.com" +} + + +resource "google_artifact_registry_repository" "encoded-ar-repo" { + repository_id = "%s" + kms_key_name = "%s" + location = "us-central1" + format = "DOCKER" + depends_on = [ + google_kms_crypto_key_iam_member.gcf_cmek_keyuser_1, + google_kms_crypto_key_iam_member.gcf_cmek_keyuser_2, + google_kms_crypto_key_iam_member.gcf_cmek_keyuser_3, + ] +} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + description = "CMEK function" + runtime = "nodejs10" + available_memory_mb = 128 + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + docker_repository = google_artifact_registry_repository.encoded-ar-repo.id + kms_key_name = "%s" + trigger_http = true + timeout = 61 + entry_point = "helloGET" +} +`, kmsKey, kmsKey, kmsKey, arRepoName, kmsKey, bucketName, zipFilePath, functionName, kmsKey) +} +{{- end }} + +func testAccCloudFunctionsFunction_secretEnvVar(secretName, versionName, bucketName, functionName, versionNumber, zipFilePath, accountId string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_service_account" "cloud_function_runner" { + account_id = "%s" + display_name = "Testing Cloud Function Secrets integration" +} + +resource "google_secret_manager_secret" "test_secret" { + secret_id = "%s" + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_version" "%s" { + secret = google_secret_manager_secret.test_secret.id + secret_data = "This is my secret data." +} + +resource "google_secret_manager_secret_iam_member" "cloud_function_iam_member" { + secret_id = google_secret_manager_secret.test_secret.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${google_service_account.cloud_function_runner.email}" +} + +resource "google_storage_bucket" "cloud_functions" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "cloud_function_zip_object" { + name = "cloud-function.zip" + bucket = google_storage_bucket.cloud_functions.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs14" + service_account_email = google_service_account.cloud_function_runner.email + entry_point = "echoSecret" + source_archive_bucket = google_storage_bucket.cloud_functions.id + source_archive_object = google_storage_bucket_object.cloud_function_zip_object.name + trigger_http = true + secret_environment_variables { + key = "MY_SECRET" + secret = google_secret_manager_secret.test_secret.secret_id + version = "%s" + } + +} +`, accountId, secretName, versionName, bucketName, zipFilePath, functionName, versionNumber) +} + +func testAccCloudFunctionsFunction_secretMount(projectNumber, secretName, versionName, bucketName, functionName, versionNumber, zipFilePath, accountId string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_service_account" "cloud_function_runner" { + account_id = "%s" + display_name = "Testing Cloud Function Secrets integration" +} + +resource "google_secret_manager_secret" "test_secret" { + secret_id = "%s" + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_version" "%s" { + secret = google_secret_manager_secret.test_secret.id + secret_data = "This is my secret data." +} + +resource "google_secret_manager_secret_iam_member" "cloud_function_iam_member" { + secret_id = google_secret_manager_secret.test_secret.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${google_service_account.cloud_function_runner.email}" +} + +resource "google_storage_bucket" "cloud_functions" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "cloud_function_zip_object" { + name = "cloud-function.zip" + bucket = google_storage_bucket.cloud_functions.name + source = "%s" +} + +resource "google_cloudfunctions_function" "function" { + name = "%s" + runtime = "nodejs14" + service_account_email = google_service_account.cloud_function_runner.email + entry_point = "echoSecret" + source_archive_bucket = google_storage_bucket.cloud_functions.id + source_archive_object = google_storage_bucket_object.cloud_function_zip_object.name + trigger_http = true + secret_volumes { + secret = google_secret_manager_secret.test_secret.secret_id + mount_path = "/etc/secrets" + project_id = "%s" + versions { + version = "%s" + path = "/test-secret" + } + } + +} +`, accountId, secretName, versionName, bucketName, zipFilePath, functionName, projectNumber, versionNumber) +} diff --git a/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_lookup.go b/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_lookup.go new file mode 100644 index 000000000000..fd1f470d71f7 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_lookup.go @@ -0,0 +1,101 @@ +package cloudidentity + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleCloudIdentityGroupLookup() *schema.Resource { + + return &schema.Resource{ + Read: dataSourceGoogleCloudIdentityGroupLookupRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The [resource name](https://cloud.google.com/apis/design/resource_names) of the looked-up Group.`, + }, + "group_key": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: `The EntityKey of the Group to lookup. A unique identifier for an entity in the Cloud Identity Groups API. +An entity can represent either a group with an optional namespace or a user without a namespace. +The combination of id and namespace must be unique; however, the same id can be used with different namespaces.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of the entity. For Google-managed entities, the id should be the email address of an existing group or user. +For external-identity-mapped entities, the id must be a string conforming to the Identity Source's requirements. +Must be unique within a namespace.`, + }, + "namespace": { + Type: schema.TypeString, + Optional: true, + Description: `The namespace in which the entity exists. If not specified, the EntityKey represents a Google-managed entity such as a Google user or a Google Group. +If specified, the EntityKey represents an external-identity-mapped group. The namespace must correspond to an identity source created in Admin Console and must be in the form of identitysources/{identity_source}.`, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleCloudIdentityGroupLookupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + gkId, ok := d.GetOk("group_key.0.id") + if !ok { + return fmt.Errorf("error getting group key id") + } + id := gkId.(string) + + groupsLookupCall := config.NewCloudIdentityClient(userAgent).Groups.Lookup().GroupKeyId(id) + + gkNamespace, ok := d.GetOk("group_key.0.namespace") + if ok { + // If optional namespace argument provided, add as param to API call + namespace := gkNamespace.(string) + groupsLookupCall = groupsLookupCall.GroupKeyNamespace(namespace) + } + + if config.UserProjectOverride { + billingProject := "" + // err may be nil - project isn't required for this resource + if project, err := tpgresource.GetProject(d, config); err == nil { + billingProject = project + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if billingProject != "" { + groupsLookupCall.Header().Set("X-Goog-User-Project", billingProject) + } + } + resp, err := groupsLookupCall.Do() + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroups %q", d.Id()), "Groups") + } + + if err := d.Set("name", resp.Name); err != nil { + return fmt.Errorf("error setting group lookup name: %s", err) + } + d.SetId(time.Now().UTC().String()) + return nil +} diff --git a/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_memberships.go.tmpl b/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_memberships.go.tmpl new file mode 100644 index 000000000000..39dd1fcf61b0 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_group_memberships.go.tmpl @@ -0,0 +1,106 @@ +package cloudidentity + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if ne $.TargetVersionName "ga" }} + cloudidentity "google.golang.org/api/cloudidentity/v1beta1" +{{- else }} + "google.golang.org/api/cloudidentity/v1" +{{- end }} +) + +func DataSourceGoogleCloudIdentityGroupMemberships() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudIdentityGroupMembership().Schema) + + return &schema.Resource{ + Read: dataSourceGoogleCloudIdentityGroupMembershipsRead, + + Schema: map[string]*schema.Schema{ + "memberships": { + Type: schema.TypeList, + Computed: true, + Description: `List of Cloud Identity group memberships.`, + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + "group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Group to get memberships from.`, + }, + }, + } +} + +func dataSourceGoogleCloudIdentityGroupMembershipsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + result := []map[string]interface{}{} + membershipsCall := config.NewCloudIdentityClient(userAgent).Groups.Memberships.List(d.Get("group").(string)).View("FULL") + if config.UserProjectOverride { + billingProject := "" + // err may be nil - project isn't required for this resource + if project, err := tpgresource.GetProject(d, config); err == nil { + billingProject = project + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if billingProject != "" { + membershipsCall.Header().Set("X-Goog-User-Project", billingProject) + } + } + + err = membershipsCall.Pages(config.Context, func(resp *cloudidentity.ListMembershipsResponse) error { + for _, member := range resp.Memberships { + result = append(result, map[string]interface{}{ + "name": member.Name, + "type": member.Type, + "roles": flattenCloudIdentityGroupMembershipsRoles(member.Roles), +{{- if ne $.TargetVersionName "ga" }} + "member_key": flattenCloudIdentityGroupsEntityKey(member.MemberKey), +{{- end }} + "preferred_member_key": flattenCloudIdentityGroupsEntityKey(member.PreferredMemberKey), + }) + } + + return nil + }) + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroupMemberships %q", d.Id()), "") + } + + if err := d.Set("memberships", result); err != nil { + return fmt.Errorf("Error setting memberships: %s", err) + } + d.SetId(time.Now().UTC().String()) + return nil +} + +func flattenCloudIdentityGroupMembershipsRoles(roles []*cloudidentity.MembershipRole) []interface{} { + transformed := []interface{}{} + + for _, role := range roles { + transformed = append(transformed, map[string]interface{}{ + "name": role.Name, + }) + } + return transformed +} diff --git a/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_groups.go.tmpl b/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_groups.go.tmpl new file mode 100644 index 000000000000..e814cbb37c65 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudidentity/go/data_source_cloud_identity_groups.go.tmpl @@ -0,0 +1,103 @@ +package cloudidentity + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if ne $.TargetVersionName "ga" }} + cloudidentity "google.golang.org/api/cloudidentity/v1beta1" +{{- else }} + "google.golang.org/api/cloudidentity/v1" +{{- end }} +) + +func DataSourceGoogleCloudIdentityGroups() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceCloudIdentityGroup().Schema) + + return &schema.Resource{ + Read: dataSourceGoogleCloudIdentityGroupsRead, + + Schema: map[string]*schema.Schema{ + "groups": { + Type: schema.TypeList, + Computed: true, + Description: `List of Cloud Identity groups.`, + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the entity under which this Group resides in the +Cloud Identity resource hierarchy. + +Must be of the form identitysources/{identity_source_id} for external-identity-mapped +groups or customers/{customer_id} for Google Groups.`, + }, + }, + } +} + +func dataSourceGoogleCloudIdentityGroupsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + result := []map[string]interface{}{} + groupsCall := config.NewCloudIdentityClient(userAgent).Groups.List().Parent(d.Get("parent").(string)).View("FULL") + if config.UserProjectOverride { + billingProject := "" + // err may be nil - project isn't required for this resource + if project, err := tpgresource.GetProject(d, config); err == nil { + billingProject = project + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + if billingProject != "" { + groupsCall.Header().Set("X-Goog-User-Project", billingProject) + } + } + err = groupsCall.Pages(config.Context, func(resp *cloudidentity.ListGroupsResponse) error { + for _, group := range resp.Groups { + result = append(result, map[string]interface{}{ + "name": group.Name, + "display_name": group.DisplayName, + "labels": group.Labels, + "description": group.Description, + "group_key": flattenCloudIdentityGroupsEntityKey(group.GroupKey), + }) + } + + return nil + }) + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("CloudIdentityGroups %q", d.Id()), "Groups") + } + + if err := d.Set("groups", result); err != nil { + return fmt.Errorf("Error setting groups: %s", err) + } + d.SetId(time.Now().UTC().String()) + return nil +} + +func flattenCloudIdentityGroupsEntityKey(entityKey *cloudidentity.EntityKey) []interface{} { + transformed := map[string]interface{}{ + "id": entityKey.Id, + "namespace": entityKey.Namespace, + } + return []interface{}{transformed} +} diff --git a/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_membership_test.go.tmpl b/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_membership_test.go.tmpl new file mode 100644 index 000000000000..5e999415ed14 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_membership_test.go.tmpl @@ -0,0 +1,563 @@ +package cloudidentity_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "google.golang.org/api/iam/v1" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func testAccCloudIdentityGroupMembership_updateTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "identity_user": envvar.GetTestIdentityUserFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_update1(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.basic", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCloudIdentityGroupMembership_update2(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.basic", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccCloudIdentityGroupMembership_update1(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_update1(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "basic" { + group = google_cloud_identity_group.group.id + + preferred_member_key { + id = "%{identity_user}@%{org_domain}" + } + + roles { + name = "MEMBER" + expiry_detail { + expire_time = "2215-10-02T15:01:23Z" + } + } + +} +`, context) +} + +func testAccCloudIdentityGroupMembership_update2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "basic" { + group = google_cloud_identity_group.group.id + + preferred_member_key { + id = "%{identity_user}@%{org_domain}" + } + + roles { + name = "MEMBER" + } + + roles { + name = "MANAGER" + } +} +`, context) +} + +func testAccCloudIdentityGroupMembership_importTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "identity_user": envvar.GetTestIdentityUserFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_import(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_import(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "basic" { + group = google_cloud_identity_group.group.id + + preferred_member_key { + id = "%{identity_user}@%{org_domain}" + } + + roles { + name = "MEMBER" + } + + roles { + name = "MANAGER" + } +} +`, context) +} + +func testAccCloudIdentityGroupMembership_membershipDoesNotExistTest(t *testing.T) { + // Skip VCR because the service account needs to be created/deleted out of + // band, and so those calls aren't recorded + acctest.SkipIfVcr(t) + + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + saId := "tf-test-sa-" + acctest.RandString(t, 10) + project := envvar.GetTestProjectFromEnv() + config := acctest.BootstrapConfig(t) + + r := &iam.CreateServiceAccountRequest{ + AccountId: saId, + ServiceAccount: &iam.ServiceAccount{}, + } + + sa, err := config.NewIamClient(config.UserAgent).Projects.ServiceAccounts.Create("projects/" + project, r).Do() + if err != nil { + t.Fatalf("Error creating service account: %s", err) + } + + context["member_id"] = sa.Email + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_dne(context), + }, + { + PreConfig: func() { + config := acctest.GoogleProviderConfig(t) + + _, err := config.NewIamClient(config.UserAgent).Projects.ServiceAccounts.Delete(sa.Name).Do() + if err != nil { + t.Errorf("cannot delete service account %s: %v", sa.Name, err) + return + } + }, + Config: testAccCloudIdentityGroupMembership_dne(context), + PlanOnly: true, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_dne(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group-%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group-%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "basic" { + group = google_cloud_identity_group.group.id + + preferred_member_key { + id = "%{member_id}" + } + + roles { + name = "MEMBER" + } +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipWithMemberKeyTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipWithMemberKey(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.cloud_identity_group_membership_basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipWithMemberKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group" "child-group" { + display_name = "tf-test-my-identity-group%{random_suffix}-child" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}-child@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "cloud_identity_group_membership_basic" { + group = google_cloud_identity_group.group.id + + member_key { + id = google_cloud_identity_group.child-group.group_key[0].id + } + + roles { + name = "MEMBER" + } +} +`, context) +} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserWithMemberKeyTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "identity_user": envvar.GetTestIdentityUserFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserWithMemberKey(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.cloud_identity_group_membership_basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserWithMemberKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "cloud_identity_group_membership_basic" { + group = google_cloud_identity_group.group.id + + member_key { + id = "%{identity_user}@%{org_domain}" + } + + roles { + name = "MEMBER" + } + + roles { + name = "MANAGER" + } +} +`, context) +} +{{- end }} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipExampleTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipExample(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.cloud_identity_group_membership_basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"group"}, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group" "child-group" { + display_name = "tf-test-my-identity-group%{random_suffix}-child" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}-child@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "cloud_identity_group_membership_basic" { + group = google_cloud_identity_group.group.id + + preferred_member_key { + id = google_cloud_identity_group.child-group.group_key[0].id + } + + roles { + name = "MEMBER" + } +} +`, context) +} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserExampleTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "identity_user": envvar.GetTestIdentityUserFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupMembershipDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserExample(context), + }, + { + ResourceName: "google_cloud_identity_group_membership.cloud_identity_group_membership_basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"group"}, + }, + }, + }) +} + +func testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "group" { + display_name = "tf-test-my-identity-group%{random_suffix}" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} + +resource "google_cloud_identity_group_membership" "cloud_identity_group_membership_basic" { + group = google_cloud_identity_group.group.id + + preferred_member_key { + id = "%{identity_user}@%{org_domain}" + } + + roles { + name = "MEMBER" + } + + roles { + name = "MANAGER" + } +} +`, context) +} + +func testAccCheckCloudIdentityGroupMembershipDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_cloud_identity_group_membership" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}CloudIdentityBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("CloudIdentityGroupMembership still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_sweeper.go.tmpl b/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_sweeper.go.tmpl new file mode 100644 index 000000000000..37edadb41ee4 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_sweeper.go.tmpl @@ -0,0 +1,131 @@ +package cloudidentity + +import ( + "context" + "fmt" + "log" + "net/url" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("CloudIdentityGroup", testSweepCloudIdentityGroup) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudIdentityGroup(region string) error { + resourceName := "CloudIdentityGroup" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + custId := envvar.GetTestCustIdFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "parent": url.PathEscape(fmt.Sprintf("customers/%s", custId)), + }, + } + +{{ if ne $.TargetVersionName `ga` -}} + listTemplate := "https://cloudidentity.googleapis.com/v1beta1/groups?parent={{"{{"}}parent{{"}}"}}" +{{- else }} + listTemplate := "https://cloudidentity.googleapis.com/v1/groups?parent={{"{{"}}parent{{"}}"}}" +{{- end }} + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["groups"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["displayName"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := obj["name"].(string) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(obj["displayName"].(string)) { + nonPrefixCount++ + continue + } + +{{ if ne $.TargetVersionName `ga` -}} + deleteTemplate := "https://cloudidentity.googleapis.com/v1beta1/{{"{{"}}name{{"}}"}}" +{{- else }} + deleteTemplate := "https://cloudidentity.googleapis.com/v1/{{"{{"}}name{{"}}"}}" +{{- end }} + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_test.go.tmpl b/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_test.go.tmpl new file mode 100644 index 000000000000..8c0d001b0727 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudidentity/go/resource_cloud_identity_group_test.go.tmpl @@ -0,0 +1,178 @@ +package cloudidentity_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Intended to fix https://github.com/hashicorp/terraform-provider-google/issues/10001 +// These are all of the tests that use a cloud_identity_group, except for +// testAccAccessContextManagerGcpUserAccessBinding_basicTest. The theory is that they sometimes +// fail with a 409 because of concurrent roster mutations, so running them serially should prevent +// the error. +func TestAccCloudIdentityGroup(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "basic": testAccCloudIdentityGroup_cloudIdentityGroupsBasicExampleTest, + "update": testAccCloudIdentityGroup_updateTest, + "membership_basic": testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipExampleTest, + "membership_update": testAccCloudIdentityGroupMembership_updateTest, + "membership_import": testAccCloudIdentityGroupMembership_importTest, + "membership_dne": testAccCloudIdentityGroupMembership_membershipDoesNotExistTest, + "membership_user": testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserExampleTest, +{{- if ne $.TargetVersionName "ga" }} + "membership_with_member_key": testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipWithMemberKeyTest, + "membership_user_with_member_key": testAccCloudIdentityGroupMembership_cloudIdentityGroupMembershipUserWithMemberKeyTest, +{{- end }} + "data_source_basic": testAccDataSourceCloudIdentityGroups_basicTest, + "data_source_membership_basic": testAccDataSourceCloudIdentityGroupMemberships_basicTest, + "data_source_group_lookup": testAccDataSourceCloudIdentityGroupLookup_basicTest, + } + + for name, tc := range testCases { + // shadow the tc variable into scope so that when + // the loop continues, if t.Run hasn't executed tc(t) + // yet, we don't have a race condition + // see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccCloudIdentityGroup_updateTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroup_cloudIdentityGroupsBasicExample(context), + }, + { + Config: testAccCloudIdentityGroup_update(context), + }, + }, + }) +} + +func testAccCloudIdentityGroup_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "cloud_identity_group_basic" { + display_name = "tf-test-my-identity-group%{random_suffix}-update" + description = "my-description" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + "cloudidentity.googleapis.com/groups.security" = "" + } +} +`, context) +} + +func testAccCloudIdentityGroup_cloudIdentityGroupsBasicExampleTest(t *testing.T) { + context := map[string]interface{}{ + "org_domain": envvar.GetTestOrgDomainFromEnv(t), + "cust_id": envvar.GetTestCustIdFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudIdentityGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudIdentityGroup_cloudIdentityGroupsBasicExample(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_cloud_identity_group.cloud_identity_group_basic", + "additional_group_keys.#"), + ), + }, + { + ResourceName: "google_cloud_identity_group.cloud_identity_group_basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_group_config"}, + }, + }, + }) +} + +func testAccCloudIdentityGroup_cloudIdentityGroupsBasicExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_identity_group" "cloud_identity_group_basic" { + display_name = "tf-test-my-identity-group%{random_suffix}" + initial_group_config = "WITH_INITIAL_OWNER" + + parent = "customers/%{cust_id}" + + group_key { + id = "tf-test-my-identity-group%{random_suffix}@%{org_domain}" + } + + labels = { + "cloudidentity.googleapis.com/groups.discussion_forum" = "" + } +} +`, context) +} + +func testAccCheckCloudIdentityGroupDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_cloud_identity_group" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}CloudIdentityBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("CloudIdentityGroup still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl b/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl new file mode 100644 index 000000000000..031cf395a22f --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudrun/go/resource_cloud_run_service_test.go.tmpl @@ -0,0 +1,1438 @@ +package cloudrun_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccCloudRunService_cloudRunServiceUpdate(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "10", "600"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "50", "300"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +// test that the status fields are propagated correctly +func TestAccCloudRunService_cloudRunServiceCreateHasStatus(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "10", "600"), + Check: resource.TestCheckResourceAttrSet("google_cloud_run_service.default", "status.0.traffic.0.revision_name"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "status.0.conditions", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels"}, + }, + }, + }) +} + +// this test checks that Terraform does not fail with a 409 recreating the same service +func TestAccCloudRunService_foregroundDeletion(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "10", "600"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: " ", // very explicitly add a space, as the test runner fails if this is just "" + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "10", "600"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceUpdate(name, project, concurrency, timeoutSeconds string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + labels = { + env = "foo" + default_expiration_ms = 3600000 + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + ports { + container_port = 8080 + } + } + container_concurrency = %s + timeout_seconds = %s + } + } + + traffic { + percent = 100 + latest_revision = true + tag = "magic-module" + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project, concurrency, timeoutSeconds) +} + +func TestAccCloudRunService_secretVolume(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithSecretVolume(name, project, "secret-"+acctest.RandString(t, 5), "secret-"+acctest.RandString(t, 6), "google_secret_manager_secret.secret1.secret_id"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithSecretVolume(name, project, "secret-"+acctest.RandString(t, 10), "secret-"+acctest.RandString(t, 11), "google_secret_manager_secret.secret2.secret_id"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithSecretVolume(name, project, secretName1, secretName2, secretRef string) string { + return fmt.Sprintf(` +data "google_project" "project" { +} + +resource "google_secret_manager_secret" "secret1" { + secret_id = "%s" + replication { + auto {} + } +} + +resource "google_secret_manager_secret" "secret2" { + secret_id = "%s" + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "secret1-version-data" { + secret = google_secret_manager_secret.secret1.name + secret_data = "secret-data1" +} + +resource "google_secret_manager_secret_version" "secret2-version-data" { + secret = google_secret_manager_secret.secret2.name + secret_data = "secret-data2" +} + +resource "google_secret_manager_secret_iam_member" "secret1-access" { + secret_id = google_secret_manager_secret.secret1.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret1] +} + +resource "google_secret_manager_secret_iam_member" "secret2-access" { + secret_id = google_secret_manager_secret.secret2.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret2] +} + +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + volume_mounts { + name = "a-volume" + mount_path = "/secrets" + } + } + volumes { + name = "a-volume" + secret { + secret_name = %s + items { + key = "1" + path = "my-secret" + } + } + } + } + } + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + traffic { + percent = 100 + latest_revision = true + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } + + depends_on = [google_secret_manager_secret_version.secret1-version-data, google_secret_manager_secret_version.secret2-version-data] +} +`, secretName1, secretName2, name, secretRef, project) +} + +func TestAccCloudRunService_secretEnvironmentVariable(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithSecretEnvVar(name, project, "secret-"+acctest.RandString(t, 5), "secret-"+acctest.RandString(t, 6), "google_secret_manager_secret.secret1.secret_id"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithSecretEnvVar(name, project, "secret-"+acctest.RandString(t, 10), "secret-"+acctest.RandString(t, 11), "google_secret_manager_secret.secret2.secret_id"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithSecretEnvVar(name, project, secretName1, secretName2, secretRef string) string { + return fmt.Sprintf(` +data "google_project" "project" { +} + +resource "google_secret_manager_secret" "secret1" { + secret_id = "%s" + replication { + auto {} + } +} + +resource "google_secret_manager_secret" "secret2" { + secret_id = "%s" + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "secret1-version-data" { + secret = google_secret_manager_secret.secret1.name + secret_data = "secret-data1" +} + +resource "google_secret_manager_secret_version" "secret2-version-data" { + secret = google_secret_manager_secret.secret2.name + secret_data = "secret-data2" +} + +resource "google_secret_manager_secret_iam_member" "secret1-access" { + secret_id = google_secret_manager_secret.secret1.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret1] +} + +resource "google_secret_manager_secret_iam_member" "secret2-access" { + secret_id = google_secret_manager_secret.secret2.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret2] +} + +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + env { + name = "SECRET_ENV_VAR" + value_from { + secret_key_ref { + name = %s + key = "1" + } + } + } + } + } + } + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + traffic { + percent = 100 + latest_revision = true + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } + + depends_on = [google_secret_manager_secret_version.secret1-version-data, google_secret_manager_secret_version.secret2-version-data] +} +`, secretName1, secretName2, name, secretRef, project) +} + +func TestAccCloudRunService_withProviderDefaultLabels(t *testing.T) { + // The test failed if VCR testing is enabled, because the cached provider config is used. + // With the cached provider config, any changes in the provider default labels will not be applied. + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_withProviderDefaultLabels(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "4"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.annotations.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.annotations.generated-by", "magic-modules"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_annotations.%", "6"), + ), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_resourceLabelsOverridesProviderDefaultLabels(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_expiration_ms", "3600000"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "4"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.annotations.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.annotations.generated-by", "magic-modules-update"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_annotations.%", "6"), + ), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_moveResourceLabelToProviderDefaultLabels(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_expiration_ms", "3600000"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "4"), + ), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_resourceLabelsOverridesProviderDefaultLabels(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_expiration_ms", "3600000"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "4"), + ), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceBasic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "1"), + + resource.TestCheckNoResourceAttr("google_cloud_run_service.default", "metadata.0.annotations.%"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_annotations.%", "5"), + ), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +func TestAccCloudRunServiceMigration_withLabels(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + project := envvar.GetTestProjectFromEnv() + oldVersion := map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.83.0", // a version that doesn't separate user defined labels and system labels + Source: "registry.terraform.io/hashicorp/google", + }, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "10", "600"), + ExternalProviders: oldVersion, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdate(name, project, "10", "600"), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.annotations.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_annotations.%", "6"), + ), + }, + }, + }) +} + +func TestAccCloudRunService_withComputedLabels(t *testing.T) { + // Skip it in VCR test because of the randomness of uuid in "labels" field + // which causes the replaying mode after recording mode failing in VCR test + acctest.SkipIfVcr(t) + t.Parallel() + + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + project := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_withComputedLabels(name, project, "10", "600"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +func testAccCloudRunService_withComputedLabels(name, project, concurrency, timeoutSeconds string) string { + return fmt.Sprintf(` +resource "random_uuid" "test" { +} + +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + env = "${random_uuid.test.result}" + } + labels = { + key1 = "${random_uuid.test.result}" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + ports { + container_port = 8080 + } + } + container_concurrency = %s + timeout_seconds = %s + } + } + + traffic { + percent = 100 + latest_revision = true + tag = "magic-module" + } +} +`, name, project, concurrency, timeoutSeconds) +} + +func testAccCloudRunService_withProviderDefaultLabels(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + annotations = { + generated-by = "magic-modules" + } + labels = { + env = "foo" + default_expiration_ms = 3600000 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +func testAccCloudRunService_resourceLabelsOverridesProviderDefaultLabels(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + annotations = { + generated-by = "magic-modules-update" + } + labels = { + env = "foo" + default_expiration_ms = 3600000 + default_key1 = "value1" + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +func testAccCloudRunService_moveResourceLabelToProviderDefaultLabels(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + env = "foo" + } +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + annotations = { + generated-by = "magic-modules" + } + labels = { + default_expiration_ms = 3600000 + default_key1 = "value1" + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +func testAccCloudRunService_cloudRunServiceBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccCloudRunService_probes(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceWithEmptyTCPStartupProbeAndHTTPLivenessProbe(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithTCPStartupProbeAndHTTPLivenessProbe(name, project, "2", "1", "5", "2"), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithEmptyHTTPStartupProbe(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithHTTPStartupProbe(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithEmptyGRPCLivenessProbe(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithGRPCLivenessProbe(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) +} + +func testAccCloudRunService_cloudRunServiceWithEmptyTCPStartupProbeAndHTTPLivenessProbe(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + ports { + container_port = 8080 + } + startup_probe { + tcp_socket {} + } + liveness_probe { + http_get {} + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithTCPStartupProbeAndHTTPLivenessProbe(name, project, delay, timeout, peroid, failure_threshold string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + ports { + container_port = 8080 + } + startup_probe { + initial_delay_seconds = %s + period_seconds = %s + timeout_seconds = %s + failure_threshold = %s + tcp_socket { + port = 8080 + } + } + liveness_probe { + initial_delay_seconds = %s + period_seconds = %s + timeout_seconds = %s + failure_threshold = %s + http_get { + path = "/some-path" + port = 8080 + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project, delay, peroid, timeout, failure_threshold, delay, peroid, timeout, failure_threshold) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithEmptyHTTPStartupProbe(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + startup_probe { + http_get {} + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithHTTPStartupProbe(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + startup_probe { + http_get { + path = "/some-path" + port = 8080 + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithEmptyGRPCLivenessProbe(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + liveness_probe { + grpc {} + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithGRPCLivenessProbe(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + liveness_probe { + grpc { + port = 8080 + service = "health" + } + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +{{ end }} + +func TestAccCloudRunService_withCreationOnlyAttribution(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "add_attribution": "true", + "attribution_strategy": "CREATION_ONLY", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_withAttributionLabelCreate(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.user_label", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "3"), // Includes one label generated by Cloud Run + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.user_label", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.goog-terraform-provisioned", "true"), + ), + }, + { + Config: testAccCloudRunService_withAttributionLabelUpdate(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.user_label", "bar"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.user_label", "bar"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.goog-terraform-provisioned", "true"), + ), + }, + { + Config: testAccCloudRunService_withAttributionLabelClear(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.goog-terraform-provisioned", "true"), + ), + }, + }, + }) +} + +func TestAccCloudRunService_withProactiveAttribution(t *testing.T) { + // VCR tests cache provider configuration between steps, this test changes provider configuration and fails under VCR. + acctest.SkipIfVcr(t) + t.Parallel() + + suffix := acctest.RandString(t, 10) + createContext := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": suffix, + "add_attribution": "false", + "attribution_strategy": "PROACTIVE", + } + updateContext := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": suffix, + "add_attribution": "true", + "attribution_strategy": "PROACTIVE", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_withAttributionLabelCreate(createContext), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "2"), // Includes one label generated by Cloud Run + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.user_label", "foo"), + ), + }, + { + Config: testAccCloudRunService_withAttributionLabelUpdate(updateContext), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.user_label", "bar"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "3"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.user_label", "bar"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.goog-terraform-provisioned", "true"), + ), + }, + { + Config: testAccCloudRunService_withAttributionLabelClear(updateContext), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_cloud_run_service.default", "metadata.0.labels.%"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_service.default", "metadata.0.effective_labels.goog-terraform-provisioned", "true"), + ), + }, + }, + }) +} + +func testAccCloudRunService_withAttributionLabelCreate(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + add_terraform_attribution_label = %{add_attribution} + terraform_attribution_label_addition_strategy = "%{attribution_strategy}" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + labels = { + user_label = "foo" + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +func testAccCloudRunService_withAttributionLabelUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + add_terraform_attribution_label = %{add_attribution} + terraform_attribution_label_addition_strategy = "%{attribution_strategy}" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + labels = { + user_label = "bar" + } + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +func testAccCloudRunService_withAttributionLabelClear(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + add_terraform_attribution_label = %{add_attribution} + terraform_attribution_label_addition_strategy = "%{attribution_strategy}" +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-cloudrun-srv%{random_suffix}" + location = "us-central1" + + template { + spec { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } + + metadata { + namespace = "%{project}" + } + + traffic { + percent = 100 + latest_revision = true + } +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccCloudRunService_csiVolume(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + name := "tftest-cloudrun-" + acctest.RandString(t, 6) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + { + Config: testAccCloudRunService_cloudRunServiceUpdateWithGcsVolume(name, project,), + }, + { + ResourceName: "google_cloud_run_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"metadata.0.resource_version", "metadata.0.annotations", "metadata.0.labels", "metadata.0.terraform_labels", "status.0.conditions"}, + }, + }, + }) + } + + +func testAccCloudRunService_cloudRunServiceWithEmptyDirVolume(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + provider = google-beta + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + "run.googleapis.com/launch-stage" = "BETA" + } + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + volume_mounts { + name = "vol1" + mount_path = "/mnt/vol1" + } + } + volumes { + name = "vol1" + empty_dir { size_limit = "256Mi" } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +func testAccCloudRunService_cloudRunServiceUpdateWithGcsVolume(name, project string) string { + return fmt.Sprintf(` +resource "google_cloud_run_service" "default" { + provider = google-beta + name = "%s" + location = "us-central1" + + metadata { + namespace = "%s" + annotations = { + generated-by = "magic-modules" + "run.googleapis.com/launch-stage" = "BETA" + } + } + + template { + metadata { + annotations = { + "run.googleapis.com/execution-environment" = "gen2" + } + } + spec { + containers { + image = "gcr.io/cloudrun/hello" + volume_mounts { + name = "vol1" + mount_path = "/mnt/vol1" + } + } + volumes { + name = "vol1" + csi { + driver = "gcsfuse.run.googleapis.com" + read_only = true + volume_attributes = { + bucketName = "gcp-public-data-landsat" + } + } + } + } + } + + lifecycle { + ignore_changes = [ + metadata.0.annotations, + ] + } +} +`, name, project) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl new file mode 100644 index 000000000000..44b1e1ab979a --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_job_test.go.tmpl @@ -0,0 +1,566 @@ +package cloudrunv2_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccCloudRunV2Job_cloudrunv2JobFullUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobFull(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage", "labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobFullUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage", "labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobFull(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + labels = { + label-1 = "value-1" + } + annotations = { + job-annotation-1 = "job-value-1" + } + client = "client-1" + client_version = "client-version-1" + + template { + labels = { + label-1 = "value-1" + } + annotations = { + temp-annotation-1 = "temp-value-1" + } + parallelism = 4 + task_count = 4 + template { + timeout = "300s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN2" + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + args = ["https://cloud.google.com/run", "www.google.com"] + command = ["/bin/echo"] + env { + name = "SOURCE" + value = "remote" + } + env { + name = "TARGET" + value = "home" + } + ports { + name = "h2c" + container_port = 8080 + } + resources { + limits = { + cpu = "4" + memory = "2Gi" + } + } + } + max_retries = 5 + } + } + + lifecycle { + ignore_changes = [ + launch_stage, + ] + } + } + resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" + } +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobFullUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + binary_authorization { + use_default = true + breakglass_justification = "Some justification" + } + labels = { + label-1 = "value-update" + } + annotations = { + job-annotation-1 = "job-value-update" + } + client = "client-update" + client_version = "client-version-update" + + template { + labels = { + label-1 = "value-update" + } + annotations = { + temp-annotation-1 = "temp-value-update" + } + parallelism = 2 + task_count = 8 + template { + timeout = "500s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN2" + containers { + name = "container-update" + image = "us-docker.pkg.dev/cloudrun/container/hello" + args = ["https://cloud.google.com/run"] + command = ["printenv"] + env { + name = "SOURCE_UPDATE" + value = "remote-update" + } + env { + name = "TARGET_UPDATE" + value = "home-update" + } + ports { + name = "h2c" + container_port = 8080 + } + resources { + limits = { + cpu = "2" + memory = "8Gi" + } + } + } + vpc_access{ + connector = google_vpc_access_connector.connector.id + egress = "ALL_TRAFFIC" + } + max_retries = 0 + } + } + + lifecycle { + ignore_changes = [ + launch_stage, + ] + } +} +resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} +resource "google_vpc_access_connector" "connector" { + name = "tf-test-run-vpc%{random_suffix}" + subnet { + name = google_compute_subnetwork.custom_test.name + } + machine_type = "e2-standard-4" + min_instances = 2 + max_instances = 3 + region = "us-central1" +} +resource "google_compute_subnetwork" "custom_test" { + name = "tf-test-run-subnetwork%{random_suffix}" + ip_cidr_range = "10.2.0.0/28" + region = "us-central1" + network = google_compute_network.custom_test.id +} +resource "google_compute_network" "custom_test" { + name = "tf-test-run-network%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobWithDirectVPCUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "job_name": jobName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithDirectVPC(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithDirectVPCUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithDirectVPC(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + launch_stage = "BETA" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + vpc_access { + network_interfaces { + network = "default" + } + } + } + } + + lifecycle { + ignore_changes = [ + launch_stage, + ] + } + } +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithDirectVPCUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + launch_stage = "BETA" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + vpc_access { + network_interfaces { + network = "my-network" + subnetwork = "my-network" + tags = ["tag1", "tag2", "tag3"] + } + } + } + } + + lifecycle { + ignore_changes = [ + launch_stage, + ] + } + } +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccCloudRunV2Job_cloudrunv2JobWithGcsUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "job_name": jobName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithNoVolume(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithGcsVolume(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithNoVolume(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + launch_stage = "BETA" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } + + lifecycle { + ignore_changes = [ + launch_stage, + ] + } + } +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithGcsVolume(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + launch_stage = "BETA" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + volume_mounts { + name = "gcs" + mount_path = "/mnt/gcs" + } + } + volumes { + name = "gcs" + gcs { + bucket = "gcp-public-data-landsat" + read_only = true + } + } + } + } + } +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobWithNfsUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "job_name": jobName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithNoVolume(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithNfsVolume(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + launch_stage = "BETA" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + volume_mounts { + name = "nfs" + mount_path = "/mnt/nfs" + } + } + volumes { + name = "nfs" + nfs { + server = "10.0.10.10" + path = "/" + read_only = true + } + } + } + } + } +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobWithStartExecutionTokenUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context1 := map[string]interface{}{ + "job_name": jobName, + "token": "token1", + } + context2 := map[string]interface{}{ + "job_name": jobName, + "token": "token2", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithStartExecutionToken(context1), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithStartExecutionToken(context2), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithStartExecutionToken(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + start_execution_token = "%{token}" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } + } +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobWithRunExecutionTokenUpdate(t *testing.T) { + t.Parallel() + + jobName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context1 := map[string]interface{}{ + "job_name": jobName, + "token": "token1", + } + context2 := map[string]interface{}{ + "job_name": jobName, + "token": "token2", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithRunExecutionToken(context1), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithRunExecutionToken(context2), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithRunExecutionToken(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "%{job_name}" + location = "us-central1" + run_execution_token = "%{token}" + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/job" + } + } + } + } +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl new file mode 100644 index 000000000000..987729a706f9 --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudrunv2/go/resource_cloud_run_v2_service_test.go.tmpl @@ -0,0 +1,1051 @@ +package cloudrunv2_test + +import ( + "fmt" + "regexp" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/cloudrunv2" +) + +func TestAccCloudRunV2Service_cloudrunv2ServiceFullUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceFull(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceFullUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + + +func testAccCloudRunV2Service_cloudrunv2ServiceFull(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + description = "description creating" + location = "us-central1" + annotations = { + generated-by = "magic-modules" + } + ingress = "INGRESS_TRAFFIC_ALL" + labels = { + label-1 = "value-1" + } + client = "client-1" + client_version = "client-version-1" + template { + labels = { + label-1 = "value-1" + } + timeout = "300s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN2" + scaling { + max_instance_count = 3 + min_instance_count = 1 + } + annotations = { + generated-by = "magic-modules" + } + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + env { + name = "SOURCE" + value = "remote" + } + env { + name = "TARGET" + value = "home" + } + ports { + name = "h2c" + container_port = 8080 + } + resources { + cpu_idle = true + startup_cpu_boost = true + limits = { + cpu = "4" + memory = "2Gi" + } + } + } + session_affinity = false + } +} + +resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} +`, context) +} +func testAccCloudRunV2Service_cloudrunv2ServiceFullUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + description = "description updating" + location = "us-central1" + annotations = { + generated-by = "magic-modules-files" + } + ingress = "INGRESS_TRAFFIC_ALL" + binary_authorization { + use_default = true + breakglass_justification = "Some justification" + } + labels = { + label-1 = "value-update" + } + client = "client-update" + client_version = "client-version-update" + + template { + labels = { + label-1 = "value-update" + } + timeout = "500s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN1" + scaling { + max_instance_count = 2 + min_instance_count = 1 + } + annotations = { + generated-by = "magic-modules" + } + containers { + name = "container-update" + image = "us-docker.pkg.dev/cloudrun/container/hello" + env { + name = "SOURCE_UPDATE" + value = "remote-update" + } + env { + name = "TARGET_UPDATE" + value = "home-update" + } + ports { + name = "h2c" + container_port = 8080 + } + resources { + cpu_idle = true + startup_cpu_boost = false + limits = { + cpu = "2" + memory = "8Gi" + } + } + } + vpc_access{ + connector = google_vpc_access_connector.connector.id + egress = "ALL_TRAFFIC" + } + session_affinity = true + } + traffic { + type = "TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST" + percent = 100 + tag = "tt-update" + } +} + +resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} + +resource "google_vpc_access_connector" "connector" { + name = "tf-test-run-vpc%{random_suffix}" + subnet { + name = google_compute_subnetwork.custom_test.name + } + machine_type = "e2-standard-4" + min_instances = 2 + max_instances = 3 + region = "us-central1" +} +resource "google_compute_subnetwork" "custom_test" { + name = "tf-test-run-subnetwork%{random_suffix}" + ip_cidr_range = "10.2.0.0/28" + region = "us-central1" + network = google_compute_network.custom_test.id +} +resource "google_compute_network" "custom_test" { + name = "tf-test-run-network%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccCloudRunV2Service_cloudrunv2ServiceGcsVolume(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceGcsVolume(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceGcsVolume(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + description = "description creating" + location = "us-central1" + launch_stage = "BETA" + annotations = { + generated-by = "magic-modules" + } + ingress = "INGRESS_TRAFFIC_ALL" + labels = { + label-1 = "value-1" + } + client = "client-1" + client_version = "client-version-1" + template { + labels = { + label-1 = "value-1" + } + timeout = "300s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN2" + scaling { + max_instance_count = 3 + min_instance_count = 1 + } + annotations = { + generated-by = "magic-modules" + } + volumes { + name = "gcs" + gcs { + bucket = "gcp-public-data-landsat" + read_only = true + } + } + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + env { + name = "SOURCE" + value = "remote" + } + env { + name = "TARGET" + value = "home" + } + ports { + name = "h2c" + container_port = 8080 + } + volume_mounts { + name = "gcs" + mount_path = "/mnt/landsat" + } + resources { + cpu_idle = true + startup_cpu_boost = true + limits = { + cpu = "4" + memory = "2Gi" + } + } + } + session_affinity = false + } +} + +resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} +`, context) +} +{{- end }} +func TestAccCloudRunV2Service_cloudrunv2ServiceTCPProbesUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceWithEmptyTCPStartupProbeAndHTTPLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations"}, + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceUpdateWithTCPStartupProbeAndHTTPLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations"}, + }, + }, + }) +} + +func TestAccCloudRunV2Service_cloudrunv2ServiceHTTPProbesUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceUpdateWithEmptyHTTPStartupProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations"}, + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceUpdateWithHTTPStartupProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations"}, + }, + }, + }) +} + +func TestAccCloudRunV2Service_cloudrunv2ServiceGRPCProbesUpdate(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "service_name": serviceName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithEmptyGRPCLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations"}, + }, + { + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations"}, + }, + // The following test steps of gRPC startup probe are expected to fail with startup probe check failures. + // This is because, due to the unavailability of ready-to-use container images of a gRPC service that + // implements the standard gRPC health check protocol, we compromise and use a container image of an + // ordinary HTTP service to deploy the gRPC service, which never passes startup probes. + // So we only check that the `startup.grpc {}` block and its properties are accepted by the APIs. + { + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithEmptyGRPCStartupProbe(context), + ExpectError: regexp.MustCompile(fmt.Sprintf(`Revision '%s-.*' is not ready and cannot serve traffic\. The user-provided container failed the configured startup probe checks\.`, serviceName)), + }, + { + PreConfig: testAccCheckCloudRunV2ServiceDestroyByNameProducer(t, serviceName), + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCStartupProbe(context), + ExpectError: regexp.MustCompile(fmt.Sprintf(`Revision '%s-.*' is not ready and cannot serve traffic\. The user-provided container failed the configured startup probe checks\.`, serviceName)), + }, + { + PreConfig: testAccCheckCloudRunV2ServiceDestroyByNameProducer(t, serviceName), + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCLivenessAndStartupProbes(context), + ExpectError: regexp.MustCompile(fmt.Sprintf(`Revision '%s-.*' is not ready and cannot serve traffic\. The user-provided container failed the configured startup probe checks\.`, serviceName)), + }, + { + PreConfig: testAccCheckCloudRunV2ServiceDestroyByNameProducer(t, serviceName), + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCLivenessAndStartupProbes(context), + PlanOnly: true, + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckCloudRunV2ServiceDestroyByNameProducer(t *testing.T, serviceName string) func() { + return func() { + config := acctest.GoogleProviderConfig(t) + service := config.NewCloudRunV2Client(config.UserAgent).Projects.Locations.Services + qualifiedServiceName := fmt.Sprintf("projects/%s/locations/%s/services/%s", config.Project, config.Region, serviceName) + op, err := service.Delete(qualifiedServiceName).Do() + if err != nil { + t.Errorf("Error while deleting the Cloud Run service: %s", err) + return + } + err = cloudrunv2.RunAdminV2OperationWaitTime(config, op, config.Project, "Waiting for Cloud Run service to be deleted", config.UserAgent, 5*time.Minute) + if err != nil { + t.Errorf("Error while waiting for Cloud Run service delete operation to complete: %s", err.Error()) + } + } +} + +func testAccCloudRunV2Service_cloudrunv2ServiceWithEmptyTCPStartupProbeAndHTTPLivenessProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + tcp_socket {} + } + liveness_probe { + http_get {} + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceUpdateWithTCPStartupProbeAndHTTPLivenessProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + initial_delay_seconds = 2 + period_seconds = 1 + timeout_seconds = 5 + failure_threshold = 2 + tcp_socket { + port = 8080 + } + } + liveness_probe { + initial_delay_seconds = 2 + period_seconds = 1 + timeout_seconds = 5 + failure_threshold = 2 + http_get { + path = "/some-path" + port = 8080 + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceUpdateWithEmptyHTTPStartupProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + startup_probe { + http_get {} + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceUpdateWithHTTPStartupProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + startup_probe { + initial_delay_seconds = 3 + period_seconds = 2 + timeout_seconds = 6 + failure_threshold = 3 + http_get { + path = "/some-path" + port = 8080 + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithEmptyGRPCLivenessProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name ="%{service_name}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + liveness_probe { + grpc {} + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCLivenessProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%{service_name}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + liveness_probe { + grpc { + port = 8080 + service = "grpc.health.v1.Health" + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithEmptyGRPCStartupProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%{service_name}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + grpc {} + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCStartupProbe(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%{service_name}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + grpc { + port = 8080 + service = "grpc.health.v1.Health" + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithGRPCLivenessAndStartupProbes(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%{service_name}" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + liveness_probe { + grpc { + port = 8080 + service = "grpc.health.v1.Health" + } + } + startup_probe { + grpc { + port = 8080 + service = "grpc.health.v1.Health" + } + } + } + } +} +`, context) +} + +func TestAccCloudRunV2Service_cloudrunv2ServiceWithDirectVPCUpdate(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "service_name": serviceName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudRunServiceWithDirectVPC(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + { + Config: testAccCloudRunV2Service_cloudRunServiceWithDirectVPCUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Service_cloudRunServiceWithDirectVPC(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%{service_name}" + location = "us-central1" + launch_stage = "GA" + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + vpc_access { + network_interfaces { + network = "default" + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudRunServiceWithDirectVPCUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%{service_name}" + location = "us-central1" + launch_stage = "GA" + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + vpc_access { + network_interfaces { + subnetwork = "default" + tags = ["foo", "bar"] + } + } + } +} +`, context) +} + +func TestAccCloudRunV2Service_cloudrunv2ServiceCustomAudienceUpdate(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-cloudrun-service%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithCustomAudience(serviceName, "test"), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithCustomAudience(serviceName, "test_update"), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Service_cloudRunServiceUpdateWithoutCustomAudience(serviceName), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "launch_stage"}, + }, + }, + }) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithoutCustomAudience(serviceName string) string { + return fmt.Sprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%s" + location = "us-central1" + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + } + } +} +`, serviceName) +} + +func testAccCloudRunV2Service_cloudRunServiceUpdateWithCustomAudience(serviceName string, customAudience string) string { + return fmt.Sprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "%s" + location = "us-central1" + custom_audiences = ["%s"] + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + } + } +} +`, serviceName, customAudience) +} + +func TestAccCloudRunV2Service_cloudrunv2ServiceAttributionLabel(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "add_attribution": "true", + "attribution_strategy": "CREATION_ONLY", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceWithAttributionLabel(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "terraform_labels.user_label", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "effective_labels.user_label", "foo"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "effective_labels.goog-terraform-provisioned", "true"), + ), + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceWithAttributionLabelUpdate(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "labels.%", "1"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "terraform_labels.user_label", "bar"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "terraform_labels.goog-terraform-provisioned", "true"), + + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "effective_labels.%", "2"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "effective_labels.user_label", "bar"), + resource.TestCheckResourceAttr("google_cloud_run_v2_service.default", "effective_labels.goog-terraform-provisioned", "true"), + ), + }, + }, + }) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceWithAttributionLabel(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + add_terraform_attribution_label = %{add_attribution} + terraform_attribution_label_addition_strategy = "%{attribution_strategy}" +} + +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + + labels = { + user_label = "foo" + } + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + } + } +} +`, context) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceWithAttributionLabelUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + add_terraform_attribution_label = %{add_attribution} + terraform_attribution_label_addition_strategy = "%{attribution_strategy}" +} + +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + location = "us-central1" + + labels = { + user_label = "bar" + } + + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + } + } +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccCloudRunV2Service_cloudrunv2ServiceWithServiceMinInstances(t *testing.T) { + t.Parallel() + context := map[string]interface{} { + "random_suffix" : acctest.RandString(t, 10), + } + acctest.VcrTest(t, resource.TestCase { + PreCheck: func() { acctest.AccTestPreCheck(t)}, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudRunV2ServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceWithMinInstancesAndDefaultUriDisabled(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage"}, + }, + { + Config: testAccCloudRunV2Service_cloudrunv2ServiceWithNoMinInstances(context), + }, + { + ResourceName: "google_cloud_run_v2_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location", "annotations", "labels", "terraform_labels", "launch_stage"}, + }, + + }, + }) +} + +func testAccCloudRunV2Service_cloudrunv2ServiceWithNoMinInstances(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + description = "description creating" + location = "us-central1" + launch_stage = "BETA" + annotations = { + generated-by = "magic-modules" + } + ingress = "INGRESS_TRAFFIC_ALL" + labels = { + label-1 = "value-1" + } + client = "client-1" + client_version = "client-version-1" + template { + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + lifecycle { + ignore_changes = [ + launch_stage, + ] + } +} + +`, context) +} +func testAccCloudRunV2Service_cloudrunv2ServiceWithMinInstancesAndDefaultUriDisabled(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_cloud_run_v2_service" "default" { + name = "tf-test-cloudrun-service%{random_suffix}" + description = "description creating" + location = "us-central1" + launch_stage = "BETA" + annotations = { + generated-by = "magic-modules" + } + ingress = "INGRESS_TRAFFIC_ALL" + labels = { + label-1 = "value-1" + } + client = "client-1" + client_version = "client-version-1" + scaling { + min_instance_count = 1 + } + default_uri_disabled = true + template { + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + lifecycle { + ignore_changes = [ + launch_stage, + ] + } +} + +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/cloudtasks/go/resource_cloud_tasks_queue_test.go b/mmv1/third_party/terraform/services/cloudtasks/go/resource_cloud_tasks_queue_test.go new file mode 100644 index 000000000000..e7cda3d2045a --- /dev/null +++ b/mmv1/third_party/terraform/services/cloudtasks/go/resource_cloud_tasks_queue_test.go @@ -0,0 +1,226 @@ +package cloudtasks_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccCloudTasksQueue_update(t *testing.T) { + t.Parallel() + + name := "cloudtasksqueuetest-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudTasksQueue_full(name), + }, + { + ResourceName: "google_cloud_tasks_queue.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_engine_routing_override.0.service", "app_engine_routing_override.0.version", "app_engine_routing_override.0.instance"}, + }, + { + Config: testAccCloudTasksQueue_update(name), + }, + { + ResourceName: "google_cloud_tasks_queue.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_engine_routing_override.0.service", "app_engine_routing_override.0.version", "app_engine_routing_override.0.instance"}, + }, + }, + }) +} + +func TestAccCloudTasksQueue_update2Basic(t *testing.T) { + t.Parallel() + + name := "cloudtasksqueuetest-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudTasksQueue_full(name), + }, + { + ResourceName: "google_cloud_tasks_queue.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_engine_routing_override.0.service", "app_engine_routing_override.0.version", "app_engine_routing_override.0.instance"}, + }, + { + Config: testAccCloudTasksQueue_basic(name), + }, + { + ResourceName: "google_cloud_tasks_queue.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_engine_routing_override.0.service", "app_engine_routing_override.0.version", "app_engine_routing_override.0.instance"}, + }, + }, + }) +} + +func TestAccCloudTasksQueue_MaxRetryDiffSuppress0s(t *testing.T) { + t.Parallel() + testID := acctest.RandString(t, 10) + cloudTaskName := fmt.Sprintf("tf-test-%s", testID) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudtasksQueueMaxRetry0s(cloudTaskName), + }, + { + ResourceName: "google_cloud_tasks_queue.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// Make sure the diff suppression function handles the situation where an +// unexpected time unit is used, e.g., 2.0s instead of 2s or 2.0s instead of +// 2.000s +func TestAccCloudTasksQueue_TimeUnitDiff(t *testing.T) { + t.Parallel() + testID := acctest.RandString(t, 10) + cloudTaskName := fmt.Sprintf("tf-test-%s", testID) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudtasksQueueTimeUnitDiff(cloudTaskName), + }, + { + ResourceName: "google_cloud_tasks_queue.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCloudTasksQueue_basic(name string) string { + return fmt.Sprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "%s" + location = "us-central1" + + retry_config { + max_attempts = 5 + } + +} +`, name) +} + +func testAccCloudTasksQueue_full(name string) string { + return fmt.Sprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "%s" + location = "us-central1" + + app_engine_routing_override { + service = "worker" + version = "1.0" + instance = "test" + } + + rate_limits { + max_concurrent_dispatches = 3 + max_dispatches_per_second = 2 + } + + retry_config { + max_attempts = 5 + max_retry_duration = "4s" + max_backoff = "3s" + min_backoff = "2s" + max_doublings = 1 + } + + stackdriver_logging_config { + sampling_ratio = 0.9 + } +} +`, name) +} + +func testAccCloudTasksQueue_update(name string) string { + return fmt.Sprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "%s" + location = "us-central1" + + app_engine_routing_override { + service = "main" + version = "2.0" + instance = "beta" + } + + rate_limits { + max_concurrent_dispatches = 4 + max_dispatches_per_second = 3 + } + + retry_config { + max_attempts = 6 + max_retry_duration = "5s" + max_backoff = "4s" + min_backoff = "3s" + max_doublings = 2 + } + + stackdriver_logging_config { + sampling_ratio = 0.1 + } +} +`, name) +} + +func testAccCloudtasksQueueMaxRetry0s(cloudTaskName string) string { + return fmt.Sprintf(` + resource "google_cloud_tasks_queue" "default" { + name = "%s" + location = "us-central1" + + retry_config { + max_attempts = -1 + max_backoff = "3600s" + max_doublings = 16 + max_retry_duration = "0s" + min_backoff = "0.100s" + } + } +`, cloudTaskName) +} + +func testAccCloudtasksQueueTimeUnitDiff(cloudTaskName string) string { + return fmt.Sprintf(` +resource "google_cloud_tasks_queue" "default" { + name = "%s" + location = "us-central1" + + retry_config { + max_attempts = -1 + max_backoff = "5.000s" + max_doublings = 16 + max_retry_duration = "1.0s" + min_backoff = "0.10s" + } +} +`, cloudTaskName) +} diff --git a/mmv1/third_party/terraform/services/composer/go/composer_operation.go.tmpl b/mmv1/third_party/terraform/services/composer/go/composer_operation.go.tmpl new file mode 100644 index 000000000000..b31b9bd88023 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/composer_operation.go.tmpl @@ -0,0 +1,37 @@ +package composer + +import ( + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/composer/v1" +{{- else }} + "google.golang.org/api/composer/v1beta1" +{{- end }} +) + +type ComposerOperationWaiter struct { + Service *composer.ProjectsLocationsService + tpgresource.CommonOperationWaiter +} + +func (w *ComposerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + return w.Service.Operations.Get(w.Op.Name).Do() +} + +func ComposerOperationWaitTime(config *transport_tpg.Config, op *composer.Operation, project, activity, userAgent string, timeout time.Duration) error { + w := &ComposerOperationWaiter{ + Service: config.NewComposerClient(userAgent).Projects.Locations, + } + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map.go.tmpl b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map.go.tmpl new file mode 100644 index 000000000000..2f9ad0137118 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map.go.tmpl @@ -0,0 +1,51 @@ +package composer + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComposerUserWorkloadsConfigMap() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComposerUserWorkloadsConfigMap().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "environment", "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleComposerUserWorkloadsConfigMapRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComposerUserWorkloadsConfigMapRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/environments/{{"{{"}}environment{{"}}"}}/userWorkloadsConfigMaps/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = resourceComposerUserWorkloadsConfigMapRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map_test.go.tmpl b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map_test.go.tmpl new file mode 100644 index 000000000000..ee92a7e5501f --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_config_map_test.go.tmpl @@ -0,0 +1,59 @@ +package composer_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceComposerUserWorkloadsConfigMap_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "env_name": fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)), + "config_map_name": fmt.Sprintf("tf-test-composer-config-map-%d", acctest.RandInt(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComposerUserWorkloadsConfigMap_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_composer_user_workloads_config_map.test", + "google_composer_user_workloads_config_map.test"), + ), + }, + }, + }) +} + +func testAccDataSourceComposerUserWorkloadsConfigMap_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "test" { + name = "%{env_name}" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_config_map" "test" { + environment = google_composer_environment.test.name + name = "%{config_map_name}" + data = { + db_host: "dbhost:5432", + api_host: "apihost:443", + } +} +data "google_composer_user_workloads_config_map" "test" { + name = google_composer_user_workloads_config_map.test.name + environment = google_composer_environment.test.name +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret.go.tmpl b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret.go.tmpl new file mode 100644 index 000000000000..0acf758493bb --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret.go.tmpl @@ -0,0 +1,63 @@ +package composer + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleComposerUserWorkloadsSecret() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComposerUserWorkloadsSecret().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "environment", "name") + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project", "region") + + return &schema.Resource{ + Read: dataSourceGoogleComposerUserWorkloadsSecretRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComposerUserWorkloadsSecretRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/environments/{{"{{"}}environment{{"}}"}}/userWorkloadsSecrets/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // retrieve "data" in advance, because Read function won't do it. + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Get(id).Do() + if err != nil { + return err + } + + if err := d.Set("data", res.Data); err != nil { + return fmt.Errorf("Error setting UserWorkloadsSecret Data: %s", err) + } + + err = resourceComposerUserWorkloadsSecretRead(d, meta) + if err != nil { + return err + } + + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret_test.go.tmpl b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret_test.go.tmpl new file mode 100644 index 000000000000..91bbb1141d5f --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/data_source_google_composer_user_workloads_secret_test.go.tmpl @@ -0,0 +1,102 @@ +package composer_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceComposerUserWorkloadsSecret_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "env_name": fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)), + "secret_name": fmt.Sprintf("%s-%d", testComposerUserWorkloadsSecretPrefix, acctest.RandInt(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComposerUserWorkloadsSecret_basic(context), + Check: resource.ComposeTestCheckFunc( + checkSecretDataSourceMatchesResource(), + ), + }, + }, + }) +} + +func checkSecretDataSourceMatchesResource() resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources["data.google_composer_user_workloads_secret.test"] + if !ok { + return fmt.Errorf("can't find %s in state", "data.google_composer_user_workloads_secret.test") + } + rs, ok := s.RootModule().Resources["google_composer_user_workloads_secret.test"] + if !ok { + return fmt.Errorf("can't find %s in state", "google_composer_user_workloads_secret.test") + } + + dsAttr := ds.Primary.Attributes + rsAttr := rs.Primary.Attributes + errMsg := "" + + for k := range rsAttr { + if k == "%" || k == "data.%" { + continue + } + // ignore diff if it's due to secrets being masked. + if strings.HasPrefix(k, "data.") { + if _, ok := dsAttr[k]; !ok{ + errMsg += fmt.Sprintf("%s is defined in resource and not in datasource\n", k) + } + if dsAttr[k] == "**********" { + continue + } + } + if dsAttr[k] != rsAttr[k] { + errMsg += fmt.Sprintf("%s is %s; want %s\n", k, dsAttr[k], rsAttr[k]) + } + } + + if errMsg != "" { + return errors.New(errMsg) + } + + return nil + } +} + +func testAccDataSourceComposerUserWorkloadsSecret_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "test" { + name = "%{env_name}" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_secret" "test" { + environment = google_composer_environment.test.name + name = "%{secret_name}" + data = { + username: base64encode("username"), + password: base64encode("password"), + } +} +data "google_composer_user_workloads_secret" "test" { + name = google_composer_user_workloads_secret.test.name + environment = google_composer_environment.test.name +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/composer/go/resource_composer_environment.go.tmpl b/mmv1/third_party/terraform/services/composer/go/resource_composer_environment.go.tmpl new file mode 100644 index 000000000000..203fe392f526 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/resource_composer_environment.go.tmpl @@ -0,0 +1,3121 @@ +package composer + +import ( + "fmt" + "net" + "log" + "regexp" + "strings" + "time" + "context" + + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/composer/v1" +{{- else }} + composer "google.golang.org/api/composer/v1beta1" +{{- end }} +) + +const ( + composerEnvironmentEnvVariablesRegexp = "[a-zA-Z_][a-zA-Z0-9_]*." + composerEnvironmentReservedAirflowEnvVarRegexp = "AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+" + composerEnvironmentVersionRegexp = `composer-(([0-9]+)(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-(([0-9]+)((\.[0-9]+)(\.[0-9]+)?)?(-build\.[0-9]+)?)` +) + +var composerEnvironmentReservedEnvVar = map[string]struct{}{ + "AIRFLOW_HOME": {}, + "C_FORCE_ROOT": {}, + "CONTAINER_NAME": {}, + "DAGS_FOLDER": {}, + "GCP_PROJECT": {}, + "GCS_BUCKET": {}, + "GKE_CLUSTER_NAME": {}, + "SQL_DATABASE": {}, + "SQL_INSTANCE": {}, + "SQL_PASSWORD": {}, + "SQL_PROJECT": {}, + "SQL_REGION": {}, + "SQL_USER": {}, +} + +var ( + composerSoftwareConfigKeys = []string{ + "config.0.software_config.0.airflow_config_overrides", + "config.0.software_config.0.pypi_packages", + "config.0.software_config.0.env_variables", + "config.0.software_config.0.image_version", + "config.0.software_config.0.python_version", + "config.0.software_config.0.scheduler_count", +{{- if ne $.TargetVersionName "ga" }} + "config.0.software_config.0.cloud_data_lineage_integration", + "config.0.software_config.0.web_server_plugins_mode", +{{- end }} + } + + composerConfigKeys = []string{ + "config.0.node_count", + "config.0.node_config", + "config.0.software_config", + "config.0.recovery_config", + "config.0.private_environment_config", + "config.0.web_server_network_access_control", + "config.0.database_config", + "config.0.web_server_config", + "config.0.encryption_config", + "config.0.maintenance_window", + "config.0.workloads_config", + "config.0.environment_size", + "config.0.master_authorized_networks_config", + "config.0.resilience_mode", +{{- if ne $.TargetVersionName "ga" }} + "config.0.enable_private_environment", + "config.0.enable_private_builds_only", +{{- end }} + "config.0.data_retention_config", + } + + recoveryConfigKeys = []string{ + "config.0.recovery_config.0.scheduled_snapshots_config", + } + + workloadsConfigKeys = []string{ + "config.0.workloads_config.0.scheduler", + "config.0.workloads_config.0.triggerer", + "config.0.workloads_config.0.web_server", + "config.0.workloads_config.0.worker", +{{- if ne $.TargetVersionName "ga" }} + "config.0.workloads_config.0.dag_processor", +{{- end }} + } + + composerPrivateEnvironmentConfig = []string{ + "config.0.private_environment_config.0.connection_type", + "config.0.private_environment_config.0.enable_private_endpoint", + "config.0.private_environment_config.0.master_ipv4_cidr_block", + "config.0.private_environment_config.0.cloud_sql_ipv4_cidr_block", + "config.0.private_environment_config.0.web_server_ipv4_cidr_block", + "config.0.private_environment_config.0.cloud_composer_network_ipv4_cidr_block", + "config.0.private_environment_config.0.enable_privately_used_public_ips", + "config.0.private_environment_config.0.cloud_composer_connection_subnetwork", + } + + composerIpAllocationPolicyKeys = []string{ + "config.0.node_config.0.ip_allocation_policy.0.use_ip_aliases", + "config.0.node_config.0.ip_allocation_policy.0.cluster_secondary_range_name", + "config.0.node_config.0.ip_allocation_policy.0.services_secondary_range_name", + "config.0.node_config.0.ip_allocation_policy.0.cluster_ipv4_cidr_block", + "config.0.node_config.0.ip_allocation_policy.0.services_ipv4_cidr_block", + } + + allowedIpRangesConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Required: true, + Description: `IP address or range, defined using CIDR notation, of requests that this rule applies to. Examples: 192.168.1.1 or 192.168.0.0/16 or 2001:db8::/32 or 2001:0db8:0000:0042:0000:8a2e:0370:7334. IP range prefixes should be properly truncated. For example, 1.2.3.4/24 should be truncated to 1.2.3.0/24. Similarly, for IPv6, 2001:db8::1/32 should be truncated to 2001:db8::/32.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A description of this ip range.`, + }, + }, + } + + cidrBlocks = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `display_name is a field for users to identify CIDR blocks.`, + }, + "cidr_block": { + Type: schema.TypeString, + Required: true, + Description: `cidr_block must be specified in CIDR notation.`, + }, + }, + } +) + +func ResourceComposerEnvironment() *schema.Resource { + return &schema.Resource{ + Create: resourceComposerEnvironmentCreate, + Read: resourceComposerEnvironmentRead, + Update: resourceComposerEnvironmentUpdate, + Delete: resourceComposerEnvironmentDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComposerEnvironmentImport, + }, + + Timeouts: &schema.ResourceTimeout{ + // Composer takes <= 1 hr for create/update. + Create: schema.DefaultTimeout(120 * time.Minute), + Update: schema.DefaultTimeout(120 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderRegion, + tpgresource.SetLabelsDiff, + customdiff.Sequence( + customdiff.ValidateChange("config.0.software_config.0.image_version", imageVersionChangeValidationFunc), + versionValidationCustomizeDiffFunc, + ), +{{- if ne $.TargetVersionName "ga" }} + customdiff.ForceNewIf("config.0.node_config.0.network", forceNewCustomDiff("config.0.node_config.0.network")), + customdiff.ForceNewIf("config.0.node_config.0.subnetwork", forceNewCustomDiff("config.0.node_config.0.subnetwork")), +{{- end }} + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the environment.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The location or Compute Engine region for the environment.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + "config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration parameters for this environment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + AtLeastOneOf: composerConfigKeys, + ValidateFunc: validation.IntAtLeast(3), + Description: `The number of nodes in the Kubernetes Engine cluster that will be used to run this environment. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "node_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The configuration used for the Kubernetes Engine cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Compute Engine zone in which to deploy the VMs running the Apache Airflow software, specified as the zone name or relative resource name (e.g. "projects/{project}/zones/{zone}"). Must belong to the enclosing environment's project and region. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "machine_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "network": { + Type: schema.TypeString, + Computed: true, + Optional: true, +{{- if eq $.TargetVersionName "ga" }} + ForceNew: true, +{{- else }} + ForceNew: false, + ConflictsWith: []string{"config.0.node_config.0.composer_network_attachment"}, +{{- end }} + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Compute Engine machine type used for cluster instances, specified as a name or relative resource name. For example: "projects/{project}/zones/{zone}/machineTypes/{machineType}". Must belong to the enclosing environment's project and region/zone. The network must belong to the environment's project. If unspecified, the "default" network ID in the environment's project is used. If a Custom Subnet Network is provided, subnetwork must also be provided.`, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, +{{- if eq $.TargetVersionName "ga" }} + ForceNew: true, +{{- else }} + ForceNew: false, + Computed: true, + ConflictsWith: []string{"config.0.node_config.0.composer_network_attachment"}, +{{- end }} + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The Compute Engine subnetwork to be used for machine communications, specified as a self-link, relative resource name (e.g. "projects/{project}/regions/{region}/subnetworks/{subnetwork}"), or by name. If subnetwork is provided, network must also be provided and the subnetwork must belong to the enclosing environment's project and region.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "composer_network_attachment": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + Description: `PSC (Private Service Connect) Network entry point. Customers can pre-create the Network Attachment and point Cloud Composer environment to use. It is possible to share network attachment among many environments, provided enough IP addresses are available.`, + }, +{{- end }} + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The disk size in GB used for node VMs. Minimum size is 20GB. If unspecified, defaults to 100GB. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "oauth_scopes": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + Description: `The set of Google API scopes to be made available on all node VMs. Cannot be updated. If empty, defaults to ["https://www.googleapis.com/auth/cloud-platform"]. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateServiceAccountRelativeNameOrEmail, + DiffSuppressFunc: compareServiceAccountEmailToLink, + Description: `The Google Cloud Platform Service Account to be used by the node VMs. If a service account is not specified, the "default" Compute Engine service account is used. Cannot be updated. If given, note that the service account must have roles/composer.worker for any GCP resources created under the Cloud Composer Environment.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "max_pods_per_node": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(8, 110), + Description: `The maximum pods per node in the GKE cluster allocated during environment creation. Lowering this value reduces IP address consumption by the Cloud Composer Kubernetes cluster. This value can only be set during environment creation, and only if the environment is VPC-Native. The range of possible values is 8-110, and the default is 32. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, +{{- end }} + "enable_ip_masq_agent": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Deploys 'ip-masq-agent' daemon set in the GKE cluster and defines nonMasqueradeCIDRs equals to pod IP range so IP masquerading is used for all destination addresses, except between pods traffic. See: https://cloud.google.com/kubernetes-engine/docs/how-to/ip-masquerade-agent`, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + Description: `The list of instance tags applied to all node VMs. Tags are used to identify valid sources or targets for network firewalls. Each tag within the list must comply with RFC1035. Cannot be updated.`, + }, + "ip_allocation_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + ConfigMode: schema.SchemaConfigModeAttr, + MaxItems: 1, + Description: `Configuration for controlling how IPs are allocated in the GKE cluster. Cannot be updated.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "use_ip_aliases": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: composerIpAllocationPolicyKeys, + Description: `Whether or not to enable Alias IPs in the GKE cluster. If true, a VPC-native cluster is created. Defaults to true if the ip_allocation_policy block is present in config. This field is only supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use VPC-native GKE clusters.`, + }, + "cluster_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: composerIpAllocationPolicyKeys, + Description: `The name of the cluster's secondary range used to allocate IP addresses to pods. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true.`, + ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.cluster_ipv4_cidr_block"}, + }, + "services_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: composerIpAllocationPolicyKeys, + Description: `The name of the services' secondary range used to allocate IP addresses to the cluster. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true.`, + ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.services_ipv4_cidr_block"}, + }, + "cluster_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: composerIpAllocationPolicyKeys, + Description: `The IP address range used to allocate IP addresses to pods in the cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either cluster_secondary_range_name or cluster_ipv4_cidr_block but not both.`, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, + ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.cluster_secondary_range_name"}, + }, + "services_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: composerIpAllocationPolicyKeys, + Description: `The IP address range used to allocate IP addresses in this cluster. For Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*, this field is applicable only when use_ip_aliases is true. Set to blank to have GKE choose a range with the default size. Set to /netmask (e.g. /14) to have GKE choose a range with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use. Specify either services_secondary_range_name or services_ipv4_cidr_block but not both.`, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, + ConflictsWith: []string{"config.0.node_config.0.ip_allocation_policy.0.services_secondary_range_name"}, + }, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + "composer_internal_ipv4_cidr_block": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + ValidateFunc: validateComposerInternalIpv4CidrBlock, + Description: `IPv4 cidr range that will be used by Composer internal components.`, + }, +{{- end }} + }, + }, + }, + "recovery_config": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The recovery configuration settings for the Cloud Composer environment`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scheduled_snapshots_config": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: recoveryConfigKeys, + Description: `The configuration settings for scheduled snapshots.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled, Cloud Composer periodically saves snapshots of your environment to a Cloud Storage bucket.`, + }, + "snapshot_location": { + Type: schema.TypeString, + Optional: true, + Description: `the URI of a bucket folder where to save the snapshot.`, + }, + "snapshot_creation_schedule": { + Type: schema.TypeString, + Optional: true, + Description: `Snapshot schedule, in the unix-cron format.`, + }, + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: `A time zone for the schedule. This value is a time offset and does not take into account daylight saving time changes. Valid values are from UTC-12 to UTC+12. Examples: UTC, UTC-01, UTC+03.`, + }, + }, + }, + }, + }, + }, + }, + "software_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The configuration settings for software inside the environment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "airflow_config_overrides": { + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Apache Airflow configuration properties to override. Property keys contain the section and property names, separated by a hyphen, for example "core-dags_are_paused_at_creation". Section names must not contain hyphens ("-"), opening square brackets ("["), or closing square brackets ("]"). The property name must not be empty and cannot contain "=" or ";". Section and property names cannot contain characters: "." Apache Airflow configuration property names must be written in snake_case. Property values can contain any character, and can be written in any lower/upper case format. Certain Apache Airflow configuration property values are blacklisted, and cannot be overridden.`, + }, + "pypi_packages": { + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validateComposerEnvironmentPypiPackages, + Description: `Custom Python Package Index (PyPI) packages to be installed in the environment. Keys refer to the lowercase package name (e.g. "numpy"). Values are the lowercase extras and version specifier (e.g. "==1.12.0", "[devel,gcp_api]", "[devel]>=1.8.2, <1.9.2"). To specify a package without pinning it to a version specifier, use the empty string as the value.`, + }, + "env_variables": { + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Elem: &schema.Schema{Type: schema.TypeString}, + ValidateFunc: validateComposerEnvironmentEnvVariables, + Description: `Additional environment variables to provide to the Apache Airflow scheduler, worker, and webserver processes. Environment variable names must match the regular expression [a-zA-Z_][a-zA-Z0-9_]*. They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+), and they cannot match any of the following reserved names: AIRFLOW_HOME C_FORCE_ROOT CONTAINER_NAME DAGS_FOLDER GCP_PROJECT GCS_BUCKET GKE_CLUSTER_NAME SQL_DATABASE SQL_INSTANCE SQL_PASSWORD SQL_PROJECT SQL_REGION SQL_USER.`, + }, + "image_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + ValidateFunc: verify.ValidateRegexp(composerEnvironmentVersionRegexp), + DiffSuppressFunc: composerImageVersionDiffSuppress, + Description: `The version of the software running in the environment. This encapsulates both the version of Cloud Composer functionality and the version of Apache Airflow. It must match the regular expression composer-([0-9]+(\.[0-9]+\.[0-9]+(-preview\.[0-9]+)?)?|latest)-airflow-([0-9]+(\.[0-9]+(\.[0-9]+)?)?). The Cloud Composer portion of the image version is a full semantic version, or an alias in the form of major version number or 'latest'. The Apache Airflow portion of the image version is a full semantic version that points to one of the supported Apache Airflow versions, or an alias in the form of only major or major.minor versions specified. See documentation for more details and version list.`, + }, + "python_version": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Computed: true, + ForceNew: true, + Description: `The major version of Python used to run the Apache Airflow scheduler, worker, and webserver processes. Can be set to '2' or '3'. If not specified, the default is '2'. Cannot be updated. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*. Environments in newer versions always use Python major version 3.`, + }, + "scheduler_count": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: composerSoftwareConfigKeys, + Computed: true, + Description: `The number of schedulers for Airflow. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-2.*.*.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "cloud_data_lineage_integration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerSoftwareConfigKeys, + MaxItems: 1, + Description: `The configuration for Cloud Data Lineage integration. Supported for Cloud Composer environments in versions composer-2.1.2-airflow-*.*.* and newer`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not Cloud Data Lineage integration is enabled.`, + }, + }, + }, + }, + "web_server_plugins_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + AtLeastOneOf: composerSoftwareConfigKeys, + ValidateFunc: validation.StringInSlice([]string{"ENABLED", "DISABLED"}, false), + Description: `Should be either 'ENABLED' or 'DISABLED'. Defaults to 'ENABLED'. Used in Composer 3.`, + }, +{{- end }} + }, + }, + }, + "private_environment_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + ForceNew: true, + Description: `The configuration used for the Private IP Cloud Composer environment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "connection_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"VPC_PEERING", "PRIVATE_SERVICE_CONNECT"}, false), + Description: `Mode of internal communication within the Composer environment. Must be one of "VPC_PEERING" or "PRIVATE_SERVICE_CONNECT".`, + }, + "enable_private_endpoint": { + Type: schema.TypeBool, + Optional: true, + Default: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + Description: `If true, access to the public endpoint of the GKE cluster is denied. If this field is set to true, ip_allocation_policy.use_ip_aliases must be set to true for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "master_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + Description: `The IP range in CIDR notation to use for the hosted master network. This range is used for assigning internal IP addresses to the cluster master or set of masters and to the internal load balancer virtual IP. This range must not overlap with any other ranges in use within the cluster's network. If left blank, the default value of '172.16.0.0/28' is used.`, + }, + "web_server_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + Description: `The CIDR block from which IP range for web server will be reserved. Needs to be disjoint from master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + }, + "cloud_sql_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + Description: `The CIDR block from which IP range in tenant project will be reserved for Cloud SQL. Needs to be disjoint from web_server_ipv4_cidr_block.`, + }, + "cloud_composer_network_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + Description: `The CIDR block from which IP range for Cloud Composer Network in tenant project will be reserved. Needs to be disjoint from private_cluster_config.master_ipv4_cidr_block and cloud_sql_ipv4_cidr_block. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, + }, + "enable_privately_used_public_ips": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + Description: `When enabled, IPs from public (non-RFC1918) ranges can be used for ip_allocation_policy.cluster_ipv4_cidr_block and ip_allocation_policy.service_ipv4_cidr_block.`, + }, + "cloud_composer_connection_subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: composerPrivateEnvironmentConfig, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `When specified, the environment will use Private Service Connect instead of VPC peerings to connect to Cloud SQL in the Tenant Project, and the PSC endpoint in the Customer Project will use an IP address from this subnetwork. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, + }, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + "enable_private_environment": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: false, + AtLeastOneOf: composerConfigKeys, + Description: `Optional. If true, a private Composer environment will be created.`, + }, + "enable_private_builds_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: false, + AtLeastOneOf: composerConfigKeys, + Description: `Optional. If true, builds performed during operations that install Python packages have only private connectivity to Google services. If false, the builds also have access to the internet.`, + }, +{{- end }} + "web_server_network_access_control": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `Network-level access control policy for the Airflow web server.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "allowed_ip_range": { + Type: schema.TypeSet, + Computed: true, + Optional: true, + Elem: allowedIpRangesConfig, + Description: `A collection of allowed IP ranges with descriptions.`, + }, + }, + }, + }, + "database_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The configuration of Cloud SQL instance that is used by the Apache Airflow software. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: `Optional. Cloud SQL machine type used by Airflow database. It has to be one of: db-n1-standard-2, db-n1-standard-4, db-n1-standard-8 or db-n1-standard-16. If not specified, db-n1-standard-2 will be used.`, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Optional. Cloud SQL database preferred zone.`, + }, + }, + }, + }, + "web_server_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The configuration settings for the Airflow web server App Engine instance. This field is supported for Cloud Composer environments in versions composer-1.*.*-airflow-*.*.*.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "machine_type": { + Type: schema.TypeString, + Required: true, + Description: `Optional. Machine type on which Airflow web server is running. It has to be one of: composer-n1-webserver-2, composer-n1-webserver-4 or composer-n1-webserver-8. If not specified, composer-n1-webserver-2 will be used. Value custom is returned only in response, if Airflow web server parameters were manually changed to a non-standard values.`, + }, + }, + }, + }, + "encryption_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The encryption options for the Composer environment and its dependencies.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Optional. Customer-managed Encryption Key available through Google's Key Management Service. Cannot be updated.`, + }, + }, + }, + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The configuration for Cloud Composer maintenance window.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: `Start time of the first recurrence of the maintenance window.`, + }, + "end_time": { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: `Maintenance window end time. It is used only to calculate the duration of the maintenance window. The value for end-time must be in the future, relative to 'start_time'.`, + }, + "recurrence": { + Type: schema.TypeString, + Required: true, + ForceNew: false, + Description: `Maintenance window recurrence. Format is a subset of RFC-5545 (https://tools.ietf.org/html/rfc5545) 'RRULE'. The only allowed values for 'FREQ' field are 'FREQ=DAILY' and 'FREQ=WEEKLY;BYDAY=...'. Example values: 'FREQ=WEEKLY;BYDAY=TU,WE', 'FREQ=DAILY'.`, + }, + }, + }, + }, + + "data_retention_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The configuration setting for Airflow data retention mechanism. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4. or newer`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "task_logs_retention_config": { + Type: schema.TypeList, + Description: `Optional. The configuration setting for Task Logs.`, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"CLOUD_LOGGING_ONLY", "CLOUD_LOGGING_AND_CLOUD_STORAGE"}, false), + Description: `Whether logs in cloud logging only is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.0.32-airflow-2.1.4 and newer.`, + }, + }, + }, + }, + }, + }, + }, + "workloads_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `The workloads configuration settings for the GKE cluster associated with the Cloud Composer environment. Supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scheduler": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: workloadsConfigKeys, + ForceNew: false, + Computed: true, + Description: `Configuration for resources used by Airflow schedulers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `CPU request and limit for a single Airflow scheduler replica`, + }, + "memory_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Memory (GB) request and limit for a single Airflow scheduler replica.`, + }, + "storage_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Storage (GB) request and limit for a single Airflow scheduler replica.`, + }, + "count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of schedulers.`, + }, + }, + }, + }, + "triggerer": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: workloadsConfigKeys, + Computed: true, + Description: `Configuration for resources used by Airflow triggerers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeFloat, + Required: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `CPU request and limit for a single Airflow triggerer replica.`, + }, + "memory_gb": { + Type: schema.TypeFloat, + Required: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Memory (GB) request and limit for a single Airflow triggerer replica.`, + }, + "count": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of triggerers.`, + }, + }, + }, + }, + "web_server": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: workloadsConfigKeys, + ForceNew: false, + Computed: true, + Description: `Configuration for resources used by Airflow web server.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `CPU request and limit for Airflow web server.`, + }, + "memory_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Memory (GB) request and limit for Airflow web server.`, + }, + "storage_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Storage (GB) request and limit for Airflow web server.`, + }, + }, + }, + }, + "worker": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: workloadsConfigKeys, + ForceNew: false, + Computed: true, + Description: `Configuration for resources used by Airflow workers.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `CPU request and limit for a single Airflow worker replica.`, + }, + "memory_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Memory (GB) request and limit for a single Airflow worker replica.`, + }, + "storage_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Storage (GB) request and limit for a single Airflow worker replica.`, + }, + "min_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Minimum number of workers for autoscaling.`, + }, + "max_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of workers for autoscaling.`, + }, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + "dag_processor": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: workloadsConfigKeys, + ForceNew: false, + Computed: true, + Description: `Configuration for resources used by DAG processor.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `CPU request and limit for DAG processor.`, + }, + "memory_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Memory (GB) request and limit for DAG processor.`, + }, + "storage_gb": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.FloatAtLeast(0), + Description: `Storage (GB) request and limit for DAG processor.`, + }, + "count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + Computed: true, + ValidateFunc: validation.IntBetween(0, 3), + Description: `Number of DAG processors.`, + }, + }, + }, + }, +{{- end }} + }, + }, + }, + "environment_size": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + AtLeastOneOf: composerConfigKeys, + ValidateFunc: validation.StringInSlice([]string{"ENVIRONMENT_SIZE_SMALL", "ENVIRONMENT_SIZE_MEDIUM", "ENVIRONMENT_SIZE_LARGE"}, false), + Description: `The size of the Cloud Composer environment. This field is supported for Cloud Composer environments in versions composer-2.*.*-airflow-*.*.* and newer.`, + }, + "resilience_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: false, + AtLeastOneOf: composerConfigKeys, + ValidateFunc: validation.StringInSlice([]string{"STANDARD_RESILIENCE", "HIGH_RESILIENCE"}, false), + Description: `Whether high resilience is enabled or not. This field is supported for Cloud Composer environments in versions composer-2.1.15-airflow-*.*.* and newer.`, + }, + "master_authorized_networks_config": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: composerConfigKeys, + MaxItems: 1, + Description: `Configuration options for the master authorized networks feature. Enabled master authorized networks will disallow all external traffic to access Kubernetes master through HTTPS except traffic from the given CIDR blocks, Google Compute Engine Public IPs and Google Prod IPs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not master authorized networks is enabled.`, + }, + "cidr_blocks": { + Type: schema.TypeSet, + Optional: true, + Elem: cidrBlocks, + Description: `cidr_blocks define up to 50 external networks that could access Kubernetes master through HTTPS.`, + }, + }, + }, + }, + "airflow_uri": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the Apache Airflow Web UI hosted within this environment.`, + }, + "dag_gcs_prefix": { + Type: schema.TypeString, + Computed: true, + Description: `The Cloud Storage prefix of the DAGs for this environment. Although Cloud Storage objects reside in a flat namespace, a hierarchical file tree can be simulated using '/'-delimited object name prefixes. DAG objects for this environment reside in a simulated directory with this prefix.`, + }, + "gke_cluster": { + Type: schema.TypeString, + Computed: true, + Description: `The Kubernetes Engine cluster used to run this environment.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `User-defined labels for this environment. The labels map can contain no more than 64 entries. Entries of the labels map are UTF8 strings that comply with the following restrictions: Label keys must be between 1 and 63 characters long and must conform to the following regular expression: [a-z]([-a-z0-9]*[a-z0-9])?. Label values must be between 0 and 63 characters long and must conform to the regular expression ([a-z]([-a-z0-9]*[a-z0-9])?)?. No more than 64 labels can be associated with a given environment. Both keys and values must be <= 128 bytes in size. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "storage_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration options for storage used by Composer environment.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Optional. Name of an existing Cloud Storage bucket to be used by the environment.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComposerEnvironmentCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + transformedConfig, err := expandComposerEnvironmentConfig(d.Get("config"), d, config) + if err != nil { + return err + } + + transformedStorageConfig, err := expandComposerStorageConfig(d.Get("storage_config"), d, config) + if err != nil { + return err + } + + env := &composer.Environment{ + Name: envName.ResourceName(), + Labels: tpgresource.ExpandEffectiveLabels(d), + Config: transformedConfig, + StorageConfig: transformedStorageConfig, + } + + // Some fields cannot be specified during create and must be updated post-creation. + updateOnlyEnv := getComposerEnvironmentPostCreateUpdateObj(env) + + log.Printf("[DEBUG] Creating new Environment %q", envName.ParentName()) + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Create(envName.ParentName(), env).Do() + if err != nil { + return err + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/environments/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + waitErr := ComposerOperationWaitTime( + config, op, envName.Project, "Creating Environment", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if waitErr != nil { + // The resource didn't actually get created, remove from state. + d.SetId("") + + errMsg := fmt.Sprintf("Error waiting to create Environment: %s", waitErr) + if err := handleComposerEnvironmentCreationOpFailure(id, envName, d, config); err != nil { + return fmt.Errorf("Error waiting to create Environment: %s. An initial "+ + "environment was or is still being created, and clean up failed with "+ + "error: %s.", errMsg, err) + } + + return fmt.Errorf("Error waiting to create Environment: %s", waitErr) + } + + log.Printf("[DEBUG] Finished creating Environment %q: %#v", d.Id(), op) + + if err := resourceComposerEnvironmentPostCreateUpdate(updateOnlyEnv, d, config, userAgent); err != nil { + return err + } + + return resourceComposerEnvironmentRead(d, meta) +} + +func resourceComposerEnvironmentRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComposerEnvironment %q", d.Id())) + } + + // Set from GetProject(d) + if err := d.Set("project", envName.Project); err != nil { + return fmt.Errorf("Error setting Environment: %s", err) + } + // Set from GetRegion(d) + if err := d.Set("region", envName.Region); err != nil { + return fmt.Errorf("Error setting Environment: %s", err) + } + if err := d.Set("name", tpgresource.GetResourceNameFromSelfLink(res.Name)); err != nil { + return fmt.Errorf("Error setting Environment: %s", err) + } + if err := d.Set("config", flattenComposerEnvironmentConfig(res.Config)); err != nil { + return fmt.Errorf("Error setting Environment: %s", err) + } + if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting Environment labels: %s", err) + } + if err := tpgresource.SetLabels(res.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("Error setting Environment effective_labels: %s", err) + } + if err := d.Set("storage_config", flattenComposerStorageConfig(res.StorageConfig)); err != nil { + return fmt.Errorf("Error setting Storage: %s", err) + } + return nil +} + +func resourceComposerEnvironmentUpdate(d *schema.ResourceData, meta interface{}) error { + tfConfig := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, tfConfig.UserAgent) + if err != nil { + return err + } + + d.Partial(true) + + // Composer only allows PATCHing one field at a time, so for each updatable field, we + // 1. determine if it needs to be updated + // 2. construct a PATCH object with only that field populated + // 3. call resourceComposerEnvironmentPatchField(...)to update that single field. + if d.HasChange("config") { + config, err := expandComposerEnvironmentConfig(d.Get("config"), d, tfConfig) + if err != nil { + return err + } + +{{ if ne $.TargetVersionName `ga` -}} + noChangeErrorMessage := "Update request does not result in any change to the environment's configuration" + if d.HasChange("config.0.node_config.0.network") || d.HasChange("config.0.node_config.0.subnetwork"){ + // step 1: update with empty network and subnetwork + patchObjEmpty := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + NodeConfig: &composer.NodeConfig{}, + }, + } + err = resourceComposerEnvironmentPatchField("config.nodeConfig.network,config.nodeConfig.subnetwork", userAgent, patchObjEmpty, d, tfConfig) + if err != nil && !strings.Contains(err.Error(), noChangeErrorMessage){ + return err + } + + // step 2: update with new network and subnetwork, if new values are not empty + if (config.NodeConfig.Network != "" && config.NodeConfig.Subnetwork != ""){ + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + NodeConfig: &composer.NodeConfig{}, + }, + } + if config != nil && config.NodeConfig != nil { + patchObj.Config.NodeConfig.Network = config.NodeConfig.Network + patchObj.Config.NodeConfig.Subnetwork = config.NodeConfig.Subnetwork + } + err = resourceComposerEnvironmentPatchField("config.nodeConfig.network,config.nodeConfig.subnetwork", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + } + + if d.HasChange("config.0.node_config.0.composer_network_attachment") { + // step 1: update with empty composer_network_attachment + patchObjEmpty := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + NodeConfig: &composer.NodeConfig{}, + }, + } + err = resourceComposerEnvironmentPatchField("config.nodeConfig.composerNetworkAttachment", userAgent, patchObjEmpty, d, tfConfig) + if err != nil && !strings.Contains(err.Error(), noChangeErrorMessage){ + return err + } + + // step 2: update with new composer_network_attachment + if (config.NodeConfig.ComposerNetworkAttachment != ""){ + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + NodeConfig: &composer.NodeConfig{}, + }, + } + if config != nil && config.NodeConfig != nil { + patchObj.Config.NodeConfig.ComposerNetworkAttachment = config.NodeConfig.ComposerNetworkAttachment + } + err = resourceComposerEnvironmentPatchField("config.nodeConfig.composerNetworkAttachment", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + } +{{- end }} + + if d.HasChange("config.0.software_config.0.image_version") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.ImageVersion = config.SoftwareConfig.ImageVersion + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.imageVersion", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.software_config.0.scheduler_count") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.SchedulerCount = config.SoftwareConfig.SchedulerCount + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.schedulerCount", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("config.0.software_config.0.cloud_data_lineage_integration") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.CloudDataLineageIntegration = config.SoftwareConfig.CloudDataLineageIntegration + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.cloudDataLineageIntegration", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } +{{- end }} + + if d.HasChange("config.0.software_config.0.airflow_config_overrides") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + AirflowConfigOverrides: make(map[string]string), + }, + }, + } + + if config != nil && config.SoftwareConfig != nil && len(config.SoftwareConfig.AirflowConfigOverrides) > 0 { + patchObj.Config.SoftwareConfig.AirflowConfigOverrides = config.SoftwareConfig.AirflowConfigOverrides + } + + err = resourceComposerEnvironmentPatchField("config.softwareConfig.airflowConfigOverrides", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.software_config.0.env_variables") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + EnvVariables: make(map[string]string), + }, + }, + } + if config != nil && config.SoftwareConfig != nil && len(config.SoftwareConfig.EnvVariables) > 0 { + patchObj.Config.SoftwareConfig.EnvVariables = config.SoftwareConfig.EnvVariables + } + + err = resourceComposerEnvironmentPatchField("config.softwareConfig.envVariables", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.software_config.0.pypi_packages") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + PypiPackages: make(map[string]string), + }, + }, + } + if config != nil && config.SoftwareConfig != nil && config.SoftwareConfig.PypiPackages != nil { + patchObj.Config.SoftwareConfig.PypiPackages = config.SoftwareConfig.PypiPackages + } + + err = resourceComposerEnvironmentPatchField("config.softwareConfig.pypiPackages", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("config.0.enable_private_environment") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + PrivateEnvironmentConfig: &composer.PrivateEnvironmentConfig{}, + }, + } + if config != nil && config.PrivateEnvironmentConfig != nil { + patchObj.Config.PrivateEnvironmentConfig.EnablePrivateEnvironment = config.PrivateEnvironmentConfig.EnablePrivateEnvironment + } + err = resourceComposerEnvironmentPatchField("config.PrivateEnvironmentConfig.EnablePrivateEnvironment", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.enable_private_builds_only") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + PrivateEnvironmentConfig: &composer.PrivateEnvironmentConfig{}, + }, + } + if config != nil && config.PrivateEnvironmentConfig != nil { + patchObj.Config.PrivateEnvironmentConfig.EnablePrivateBuildsOnly = config.PrivateEnvironmentConfig.EnablePrivateBuildsOnly + } + err = resourceComposerEnvironmentPatchField("config.PrivateEnvironmentConfig.EnablePrivateBuildsOnly", userAgent, patchObj, d, tfConfig) + } + + if d.HasChange("config.0.software_config.0.web_server_plugins_mode") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{}, + }, + } + if config != nil && config.SoftwareConfig != nil { + patchObj.Config.SoftwareConfig.WebServerPluginsMode = config.SoftwareConfig.WebServerPluginsMode + } + err = resourceComposerEnvironmentPatchField("config.softwareConfig.webServerPluginsMode", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } +{{- end }} + + if d.HasChange("config.0.node_count") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.NodeCount = config.NodeCount + } + err = resourceComposerEnvironmentPatchField("config.nodeCount", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + // If web_server_network_access_control has more fields added it may require changes here. + // This is scoped specifically to allowed_ip_range due to https://github.com/hashicorp/terraform-plugin-sdk/issues/98 + if d.HasChange("config.0.web_server_network_access_control.0.allowed_ip_range") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.WebServerNetworkAccessControl = config.WebServerNetworkAccessControl + } + err = resourceComposerEnvironmentPatchField("config.webServerNetworkAccessControl", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.database_config.0.machine_type") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.DatabaseConfig = config.DatabaseConfig + } + err = resourceComposerEnvironmentPatchField("config.databaseConfig.machineType", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.web_server_config.0.machine_type") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.WebServerConfig = config.WebServerConfig + } + err = resourceComposerEnvironmentPatchField("config.webServerConfig.machineType", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.maintenance_window") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.MaintenanceWindow = config.MaintenanceWindow + } + err = resourceComposerEnvironmentPatchField("config.maintenanceWindow", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.workloads_config") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.WorkloadsConfig = config.WorkloadsConfig + } + err = resourceComposerEnvironmentPatchField("config.WorkloadsConfig", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.data_retention_config.0.task_logs_retention_config.0.storage_mode") { + patchObj := &composer.Environment{ + Config: &composer.EnvironmentConfig{ + DataRetentionConfig: &composer.DataRetentionConfig{ + TaskLogsRetentionConfig: &composer.TaskLogsRetentionConfig{}, + }, + }, + } + if config != nil && config.DataRetentionConfig != nil && config.DataRetentionConfig.TaskLogsRetentionConfig != nil { + patchObj.Config.DataRetentionConfig.TaskLogsRetentionConfig.StorageMode = config.DataRetentionConfig.TaskLogsRetentionConfig.StorageMode + } + err = resourceComposerEnvironmentPatchField("config.DataRetentionConfig.TaskLogsRetentionConfig.StorageMode", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.recovery_config.0.scheduled_snapshots_config") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.RecoveryConfig = config.RecoveryConfig + } + err = resourceComposerEnvironmentPatchField("config.RecoveryConfig.ScheduledSnapshotsConfig", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + + if d.HasChange("config.0.environment_size") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.EnvironmentSize = config.EnvironmentSize + } + err = resourceComposerEnvironmentPatchField("config.EnvironmentSize", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.resilience_mode") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + if config.ResilienceMode == "STANDARD_RESILIENCE" { + patchObj.Config.ResilienceMode = "RESILIENCE_MODE_UNSPECIFIED" + } else { + patchObj.Config.ResilienceMode = config.ResilienceMode + } + } + err = resourceComposerEnvironmentPatchField("config.ResilienceMode", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + if d.HasChange("config.0.master_authorized_networks_config") { + patchObj := &composer.Environment{Config: &composer.EnvironmentConfig{}} + if config != nil { + patchObj.Config.MasterAuthorizedNetworksConfig = config.MasterAuthorizedNetworksConfig + } + err = resourceComposerEnvironmentPatchField("config.MasterAuthorizedNetworksConfig", userAgent, patchObj, d, tfConfig) + if err != nil { + return err + } + } + } + + if d.HasChange("effective_labels") { + patchEnv := &composer.Environment{Labels: tpgresource.ExpandEffectiveLabels(d)} + err := resourceComposerEnvironmentPatchField("labels", userAgent, patchEnv, d, tfConfig) + if err != nil { + return err + } + } + + d.Partial(false) + return resourceComposerEnvironmentRead(d, tfConfig) +} + +func resourceComposerEnvironmentPostCreateUpdate(updateEnv *composer.Environment, d *schema.ResourceData, cfg *transport_tpg.Config, userAgent string) error { + if updateEnv == nil { + return nil + } + + if updateEnv.Config != nil && updateEnv.Config.SoftwareConfig != nil && len(updateEnv.Config.SoftwareConfig.PypiPackages) > 0 { + log.Printf("[DEBUG] Running post-create update for Environment %q", d.Id()) + err := resourceComposerEnvironmentPatchField("config.softwareConfig.pypiPackages", userAgent, updateEnv, d, cfg) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finish update to Environment %q post create for update only fields", d.Id()) + } + + return resourceComposerEnvironmentRead(d, cfg) +} + +func resourceComposerEnvironmentPatchField(updateMask, userAgent string, env *composer.Environment, d *schema.ResourceData, config *transport_tpg.Config) error { + envJson, _ := env.MarshalJSON() + log.Printf("[DEBUG] Updating Environment %q (updateMask = %q): %s", d.Id(), updateMask, string(envJson)) + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments. + Patch(envName.ResourceName(), env). + UpdateMask(updateMask).Do() + if err != nil { + return err + } + + waitErr := ComposerOperationWaitTime( + config, op, envName.Project, "Updating newly created Environment", userAgent, + d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + // The resource didn't actually update. + return fmt.Errorf("Error waiting to update Environment: %s", waitErr) + } + + log.Printf("[DEBUG] Finished updating Environment %q (updateMask = %q)", d.Id(), updateMask) + return nil +} + +func resourceComposerEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + envName, err := resourceComposerEnvironmentName(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting Environment %q", d.Id()) + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.ResourceName()).Do() + if err != nil { + return err + } + + err = ComposerOperationWaitTime( + config, op, envName.Project, "Deleting Environment", userAgent, + d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Environment %q: %#v", d.Id(), op) + return nil +} + +func resourceComposerEnvironmentImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/environments/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenComposerStorageConfig(storageConfig *composer.StorageConfig) interface{} { + if storageConfig == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["bucket"] = storageConfig.Bucket + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfig(envCfg *composer.EnvironmentConfig) interface{} { + if envCfg == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["gke_cluster"] = envCfg.GkeCluster + transformed["dag_gcs_prefix"] = envCfg.DagGcsPrefix + transformed["node_count"] = envCfg.NodeCount + transformed["airflow_uri"] = envCfg.AirflowUri + transformed["node_config"] = flattenComposerEnvironmentConfigNodeConfig(envCfg.NodeConfig) + transformed["software_config"] = flattenComposerEnvironmentConfigSoftwareConfig(envCfg.SoftwareConfig) + imageVersion := envCfg.SoftwareConfig.ImageVersion + if !isComposer3(imageVersion){ + transformed["private_environment_config"] = flattenComposerEnvironmentConfigPrivateEnvironmentConfig(envCfg.PrivateEnvironmentConfig) + } +{{- if ne $.TargetVersionName "ga" }} + if isComposer3(imageVersion) && envCfg.PrivateEnvironmentConfig != nil { + transformed["enable_private_environment"] = envCfg.PrivateEnvironmentConfig.EnablePrivateEnvironment + transformed["enable_private_builds_only"] = envCfg.PrivateEnvironmentConfig.EnablePrivateBuildsOnly + } +{{- end }} + transformed["web_server_network_access_control"] = flattenComposerEnvironmentConfigWebServerNetworkAccessControl(envCfg.WebServerNetworkAccessControl) + transformed["database_config"] = flattenComposerEnvironmentConfigDatabaseConfig(envCfg.DatabaseConfig) + transformed["web_server_config"] = flattenComposerEnvironmentConfigWebServerConfig(envCfg.WebServerConfig) + transformed["encryption_config"] = flattenComposerEnvironmentConfigEncryptionConfig(envCfg.EncryptionConfig) + transformed["maintenance_window"] = flattenComposerEnvironmentConfigMaintenanceWindow(envCfg.MaintenanceWindow) + transformed["data_retention_config"] = flattenComposerEnvironmentConfigDataRetentionConfig(envCfg.DataRetentionConfig) + transformed["workloads_config"] = flattenComposerEnvironmentConfigWorkloadsConfig(envCfg.WorkloadsConfig) + transformed["recovery_config"] = flattenComposerEnvironmentConfigRecoveryConfig(envCfg.RecoveryConfig) + transformed["environment_size"] = envCfg.EnvironmentSize + if envCfg.ResilienceMode == "RESILIENCE_MODE_UNSPECIFIED" || envCfg.ResilienceMode == "" { + transformed["resilience_mode"] = "STANDARD_RESILIENCE" + } else { + transformed["resilience_mode"] = envCfg.ResilienceMode + } + transformed["master_authorized_networks_config"] = flattenComposerEnvironmentConfigMasterAuthorizedNetworksConfig(envCfg.MasterAuthorizedNetworksConfig) + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigWebServerNetworkAccessControl(accessControl *composer.WebServerNetworkAccessControl) interface{} { + if accessControl == nil || accessControl.AllowedIpRanges == nil { + return nil + } + + transformed := make([]interface{}, 0, len(accessControl.AllowedIpRanges)) + for _, ipRange := range accessControl.AllowedIpRanges { + data := map[string]interface{}{ + "value": ipRange.Value, + "description": ipRange.Description, + } + transformed = append(transformed, data) + } + + webServerNetworkAccessControl := make(map[string]interface{}) + + webServerNetworkAccessControl["allowed_ip_range"] = schema.NewSet(schema.HashResource(allowedIpRangesConfig), transformed) + + return []interface{}{webServerNetworkAccessControl} +} + +func flattenComposerEnvironmentConfigDatabaseConfig(databaseCfg *composer.DatabaseConfig) interface{} { + if databaseCfg == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["machine_type"] = databaseCfg.MachineType + transformed["zone"] = databaseCfg.Zone + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigWebServerConfig(webServerCfg *composer.WebServerConfig) interface{} { + if webServerCfg == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["machine_type"] = webServerCfg.MachineType + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigEncryptionConfig(encryptionCfg *composer.EncryptionConfig) interface{} { + if encryptionCfg == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = encryptionCfg.KmsKeyName + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigRecoveryConfig(recoveryCfg *composer.RecoveryConfig) interface{} { + if recoveryCfg == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformedScheduledSnapshotsConfig := make(map[string]interface{}) + + scheduledSnapshotsConfig := recoveryCfg.ScheduledSnapshotsConfig + + if scheduledSnapshotsConfig == nil { + transformedScheduledSnapshotsConfig = nil + } else { + transformedScheduledSnapshotsConfig["enabled"] = scheduledSnapshotsConfig.Enabled + transformedScheduledSnapshotsConfig["snapshot_location"] = scheduledSnapshotsConfig.SnapshotLocation + transformedScheduledSnapshotsConfig["time_zone"] = scheduledSnapshotsConfig.TimeZone + transformedScheduledSnapshotsConfig["snapshot_creation_schedule"] = scheduledSnapshotsConfig.SnapshotCreationSchedule + } + + transformed["scheduled_snapshots_config"] = []interface{}{transformedScheduledSnapshotsConfig} + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigMaintenanceWindow(maintenanceWindow *composer.MaintenanceWindow) interface{} { + if maintenanceWindow == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["start_time"] = maintenanceWindow.StartTime + transformed["end_time"] = maintenanceWindow.EndTime + transformed["recurrence"] = maintenanceWindow.Recurrence + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigDataRetentionConfig(dataRetentionConfig *composer.DataRetentionConfig) interface{} { + if dataRetentionConfig == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["task_logs_retention_config"] = flattenComposerEnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig(dataRetentionConfig.TaskLogsRetentionConfig) + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig(taskLogsRetentionConfig *composer.TaskLogsRetentionConfig) interface{} { + if taskLogsRetentionConfig == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["storage_mode"] = taskLogsRetentionConfig.StorageMode + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigWorkloadsConfig(workloadsConfig *composer.WorkloadsConfig) interface{} { + if workloadsConfig == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformedScheduler := make(map[string]interface{}) + transformedTriggerer := make(map[string]interface{}) + transformedWebServer := make(map[string]interface{}) + transformedWorker := make(map[string]interface{}) +{{- if ne $.TargetVersionName "ga" }} + transformedDagProcessor := make(map[string]interface{}) +{{- end }} + + wlCfgScheduler := workloadsConfig.Scheduler + wlCfgTriggerer := workloadsConfig.Triggerer + wlCfgWebServer := workloadsConfig.WebServer + wlCfgWorker := workloadsConfig.Worker +{{- if ne $.TargetVersionName "ga" }} + wlCfgDagProcessor := workloadsConfig.DagProcessor +{{- end }} + + if wlCfgScheduler == nil { + transformedScheduler = nil + } else { + transformedScheduler["cpu"] = wlCfgScheduler.Cpu + transformedScheduler["memory_gb"] = wlCfgScheduler.MemoryGb + transformedScheduler["storage_gb"] = wlCfgScheduler.StorageGb + transformedScheduler["count"] = wlCfgScheduler.Count + } + + if wlCfgTriggerer == nil { + transformedTriggerer = nil + } else { + transformedTriggerer["cpu"] = wlCfgTriggerer.Cpu + transformedTriggerer["memory_gb"] = wlCfgTriggerer.MemoryGb + transformedTriggerer["count"] = wlCfgTriggerer.Count + } + + if wlCfgWebServer == nil { + transformedWebServer = nil + } else { + transformedWebServer["cpu"] = wlCfgWebServer.Cpu + transformedWebServer["memory_gb"] = wlCfgWebServer.MemoryGb + transformedWebServer["storage_gb"] = wlCfgWebServer.StorageGb + } + + if wlCfgWorker == nil { + transformedWorker = nil + } else { + transformedWorker["cpu"] = wlCfgWorker.Cpu + transformedWorker["memory_gb"] = wlCfgWorker.MemoryGb + transformedWorker["storage_gb"] = wlCfgWorker.StorageGb + transformedWorker["min_count"] = wlCfgWorker.MinCount + transformedWorker["max_count"] = wlCfgWorker.MaxCount + } + +{{ if ne $.TargetVersionName `ga` -}} + if wlCfgDagProcessor == nil { + transformedDagProcessor = nil + } else { + transformedDagProcessor["cpu"] = wlCfgDagProcessor.Cpu + transformedDagProcessor["memory_gb"] = wlCfgDagProcessor.MemoryGb + transformedDagProcessor["storage_gb"] = wlCfgDagProcessor.StorageGb + transformedDagProcessor["count"] = wlCfgDagProcessor.Count + } +{{- end }} + + transformed["scheduler"] = []interface{}{transformedScheduler} + if transformedTriggerer != nil { + transformed["triggerer"] = []interface{}{transformedTriggerer} + } + transformed["web_server"] = []interface{}{transformedWebServer} + transformed["worker"] = []interface{}{transformedWorker} +{{- if ne $.TargetVersionName "ga" }} + if transformedDagProcessor != nil { + transformed["dag_processor"] = []interface{}{transformedDagProcessor} + } +{{- end }} + + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigPrivateEnvironmentConfig(envCfg *composer.PrivateEnvironmentConfig) interface{} { + if envCfg == nil { + return nil + } + + transformed := make(map[string]interface{}) + if envCfg.NetworkingConfig != nil{ + transformed["connection_type"] = envCfg.NetworkingConfig.ConnectionType + } + if envCfg.PrivateClusterConfig != nil{ + transformed["enable_private_endpoint"] = envCfg.PrivateClusterConfig.EnablePrivateEndpoint + transformed["master_ipv4_cidr_block"] = envCfg.PrivateClusterConfig.MasterIpv4CidrBlock + } + transformed["cloud_sql_ipv4_cidr_block"] = envCfg.CloudSqlIpv4CidrBlock + transformed["web_server_ipv4_cidr_block"] = envCfg.WebServerIpv4CidrBlock + transformed["cloud_composer_network_ipv4_cidr_block"] = envCfg.CloudComposerNetworkIpv4CidrBlock + transformed["enable_privately_used_public_ips"] = envCfg.EnablePrivatelyUsedPublicIps + transformed["cloud_composer_connection_subnetwork"] = envCfg.CloudComposerConnectionSubnetwork + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigNodeConfig(nodeCfg *composer.NodeConfig) interface{} { + if nodeCfg == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["zone"] = nodeCfg.Location + transformed["machine_type"] = nodeCfg.MachineType + transformed["network"] = nodeCfg.Network + transformed["subnetwork"] = nodeCfg.Subnetwork +{{- if ne $.TargetVersionName "ga" }} + transformed["composer_network_attachment"] = nodeCfg.ComposerNetworkAttachment +{{- end }} + transformed["disk_size_gb"] = nodeCfg.DiskSizeGb + transformed["service_account"] = nodeCfg.ServiceAccount + transformed["oauth_scopes"] = flattenComposerEnvironmentConfigNodeConfigOauthScopes(nodeCfg.OauthScopes) +{{- if ne $.TargetVersionName "ga" }} + transformed["max_pods_per_node"] = nodeCfg.MaxPodsPerNode +{{- end }} + transformed["enable_ip_masq_agent"] = nodeCfg.EnableIpMasqAgent + transformed["tags"] = flattenComposerEnvironmentConfigNodeConfigTags(nodeCfg.Tags) + transformed["ip_allocation_policy"] = flattenComposerEnvironmentConfigNodeConfigIPAllocationPolicy(nodeCfg.IpAllocationPolicy) +{{- if ne $.TargetVersionName "ga" }} + transformed["composer_internal_ipv4_cidr_block"] = nodeCfg.ComposerInternalIpv4CidrBlock +{{- end }} + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigNodeConfigIPAllocationPolicy(ipPolicy *composer.IPAllocationPolicy) interface{} { + if ipPolicy == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["use_ip_aliases"] = ipPolicy.UseIpAliases + transformed["cluster_ipv4_cidr_block"] = ipPolicy.ClusterIpv4CidrBlock + transformed["cluster_secondary_range_name"] = ipPolicy.ClusterSecondaryRangeName + transformed["services_ipv4_cidr_block"] = ipPolicy.ServicesIpv4CidrBlock + transformed["services_secondary_range_name"] = ipPolicy.ServicesSecondaryRangeName + + return []interface{}{transformed} +} + +func flattenComposerEnvironmentConfigNodeConfigOauthScopes(v interface{}) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(v.([]string))) +} + +func flattenComposerEnvironmentConfigNodeConfigTags(v interface{}) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(v.([]string))) +} + +func flattenComposerEnvironmentConfigSoftwareConfig(softwareCfg *composer.SoftwareConfig) interface{} { + if softwareCfg == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["image_version"] = softwareCfg.ImageVersion + transformed["python_version"] = softwareCfg.PythonVersion + transformed["airflow_config_overrides"] = softwareCfg.AirflowConfigOverrides + transformed["pypi_packages"] = softwareCfg.PypiPackages + transformed["env_variables"] = softwareCfg.EnvVariables + transformed["scheduler_count"] = softwareCfg.SchedulerCount +{{- if ne $.TargetVersionName "ga" }} + transformed["cloud_data_lineage_integration"] = flattenComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(softwareCfg.CloudDataLineageIntegration) + if softwareCfg.WebServerPluginsMode == "PLUGINS_DISABLED"{ + transformed["web_server_plugins_mode"] = "DISABLED" + } else if softwareCfg.WebServerPluginsMode == "PLUGINS_ENABLED"{ + transformed["web_server_plugins_mode"] = "ENABLED" + } else { + transformed["web_server_plugins_mode"] = softwareCfg.WebServerPluginsMode + } +{{- end }} + return []interface{}{transformed} +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(cloudDataLineageIntegration *composer.CloudDataLineageIntegration) interface{} { + if cloudDataLineageIntegration == nil { + return nil + } + + transformed := make(map[string]interface{}) + transformed["enabled"] = cloudDataLineageIntegration.Enabled + + return []interface{}{transformed} +} +{{- end }} + +func flattenComposerEnvironmentConfigMasterAuthorizedNetworksConfig(masterAuthNetsCfg *composer.MasterAuthorizedNetworksConfig) interface{} { + if masterAuthNetsCfg == nil { + return nil + } + + transformed := make([]interface{}, 0, len(masterAuthNetsCfg.CidrBlocks)) + for _, cidrBlock := range masterAuthNetsCfg.CidrBlocks { + data := map[string]interface{}{ + "display_name": cidrBlock.DisplayName, + "cidr_block": cidrBlock.CidrBlock, + } + transformed = append(transformed, data) + } + + masterAuthorizedNetworksConfig := make(map[string]interface{}) + masterAuthorizedNetworksConfig["enabled"] = masterAuthNetsCfg.Enabled + masterAuthorizedNetworksConfig["cidr_blocks"] = schema.NewSet(schema.HashResource(cidrBlocks), transformed) + + return []interface{}{masterAuthorizedNetworksConfig} +} + +func expandComposerEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.EnvironmentConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + original := l[0].(map[string]interface{}) + transformed := &composer.EnvironmentConfig{} + + if nodeCountRaw, ok := original["node_count"]; ok { + transformedNodeCount, err := expandComposerEnvironmentConfigNodeCount(nodeCountRaw, d, config) + if err != nil { + return nil, err + } + transformed.NodeCount = transformedNodeCount + } + + transformedNodeConfig, err := expandComposerEnvironmentConfigNodeConfig(original["node_config"], d, config) + if err != nil { + return nil, err + } + transformed.NodeConfig = transformedNodeConfig + + transformedSoftwareConfig, err := expandComposerEnvironmentConfigSoftwareConfig(original["software_config"], d, config) + if err != nil { + return nil, err + } + transformed.SoftwareConfig = transformedSoftwareConfig + + transformedPrivateEnvironmentConfig, err := expandComposerEnvironmentConfigPrivateEnvironmentConfig(original["private_environment_config"], d, config) + if err != nil { + return nil, err + } + transformed.PrivateEnvironmentConfig = transformedPrivateEnvironmentConfig + +{{ if ne $.TargetVersionName `ga` -}} + /* + config.enable_private_environment in terraform maps to + composer.PrivateEnvironmentConfig.EnablePrivateEnvironment in API. + Check image version to avoid overriding EnablePrivateEnvironment in case of other versions. + */ + imageVersion := d.Get("config.0.software_config.0.image_version").(string) + if isComposer3(imageVersion) { + transformed.PrivateEnvironmentConfig = &composer.PrivateEnvironmentConfig{} + if enablePrivateEnvironmentRaw, ok := original["enable_private_environment"]; ok { + transformed.PrivateEnvironmentConfig.EnablePrivateEnvironment = enablePrivateEnvironmentRaw.(bool) + } + if enablePrivateBuildsOnlyRaw, ok := original["enable_private_builds_only"]; ok { + transformed.PrivateEnvironmentConfig.EnablePrivateBuildsOnly = enablePrivateBuildsOnlyRaw.(bool) + } + } +{{- end }} + + transformedWebServerNetworkAccessControl, err := expandComposerEnvironmentConfigWebServerNetworkAccessControl(original["web_server_network_access_control"], d, config) + if err != nil { + return nil, err + } + transformed.WebServerNetworkAccessControl = transformedWebServerNetworkAccessControl + + transformedDatabaseConfig, err := expandComposerEnvironmentConfigDatabaseConfig(original["database_config"], d, config) + if err != nil { + return nil, err + } + transformed.DatabaseConfig = transformedDatabaseConfig + + transformedWebServerConfig, err := expandComposerEnvironmentConfigWebServerConfig(original["web_server_config"], d, config) + if err != nil { + return nil, err + } + transformed.WebServerConfig = transformedWebServerConfig + + transformedEncryptionConfig, err := expandComposerEnvironmentConfigEncryptionConfig(original["encryption_config"], d, config) + if err != nil { + return nil, err + } + transformed.EncryptionConfig = transformedEncryptionConfig + + transformedMaintenanceWindow, err := expandComposerEnvironmentConfigMaintenanceWindow(original["maintenance_window"], d, config) + if err != nil { + return nil, err + } + transformed.MaintenanceWindow = transformedMaintenanceWindow + + transformedDataRetentionConfig, err := expandComposerEnvironmentConfigDataRetentionConfig(original["data_retention_config"], d, config) + if err != nil { + return nil, err + } + transformed.DataRetentionConfig = transformedDataRetentionConfig + + transformedWorkloadsConfig, err := expandComposerEnvironmentConfigWorkloadsConfig(original["workloads_config"], d, config) + if err != nil { + return nil, err + } + transformed.WorkloadsConfig = transformedWorkloadsConfig + + transformedEnvironmentSize, err := expandComposerEnvironmentConfigEnvironmentSize(original["environment_size"], d, config) + if err != nil { + return nil, err + } + transformed.EnvironmentSize = transformedEnvironmentSize + + transformedResilienceMode, err := expandComposerEnvironmentConfigResilienceMode(original["resilience_mode"], d, config) + if err != nil { + return nil, err + } + if transformedResilienceMode == "STANDARD_RESILIENCE" { + transformed.ResilienceMode = "RESILIENCE_MODE_UNSPECIFIED" + } else { + transformed.ResilienceMode = transformedResilienceMode + } + + + transformedMasterAuthorizedNetworksConfig, err := expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(original["master_authorized_networks_config"], d, config) + if err != nil { + return nil, err + } + transformed.MasterAuthorizedNetworksConfig = transformedMasterAuthorizedNetworksConfig + + transformedRecoveryConfig, err := expandComposerEnvironmentConfigRecoveryConfig(original["recovery_config"], d, config) + if err != nil { + return nil, err + } + transformed.RecoveryConfig = transformedRecoveryConfig + + return transformed, nil +} + +func expandComposerEnvironmentConfigNodeCount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (int64, error) { + if v == nil { + return 0, nil + } + return int64(v.(int)), nil +} + +func expandComposerEnvironmentConfigWebServerNetworkAccessControl(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.WebServerNetworkAccessControl, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + allowedIpRangesRaw := original["allowed_ip_range"].(*schema.Set).List() + if len(allowedIpRangesRaw) == 0 { + return nil, nil + } + + transformed := &composer.WebServerNetworkAccessControl{} + allowedIpRanges := make([]*composer.AllowedIpRange, 0, len(original)) + + for _, originalIpRange := range allowedIpRangesRaw { + originalRangeRaw := originalIpRange.(map[string]interface{}) + transformedRange := &composer.AllowedIpRange{Value: originalRangeRaw["value"].(string)} + if v, ok := originalRangeRaw["description"]; ok { + transformedRange.Description = v.(string) + } + allowedIpRanges = append(allowedIpRanges, transformedRange) + } + + transformed.AllowedIpRanges = allowedIpRanges + return transformed, nil +} + +func expandComposerEnvironmentConfigMasterAuthorizedNetworksConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.MasterAuthorizedNetworksConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + cidrBlocksRaw := original["cidr_blocks"].(*schema.Set).List() + + transformed := &composer.MasterAuthorizedNetworksConfig{} + cidrBlocks := make([]*composer.CidrBlock, 0, len(original)) + + for _, originalCidrBlock := range cidrBlocksRaw { + originalCidrBlockRaw := originalCidrBlock.(map[string]interface{}) + transformedCidrBlock := &composer.CidrBlock{} + if v, ok := originalCidrBlockRaw["display_name"]; ok { + transformedCidrBlock.DisplayName = v.(string) + } + transformedCidrBlock.CidrBlock = originalCidrBlockRaw["cidr_block"].(string) + cidrBlocks = append(cidrBlocks, transformedCidrBlock) + } + transformed.Enabled = original["enabled"].(bool) + transformed.CidrBlocks = cidrBlocks + return transformed, nil +} + + +func expandComposerEnvironmentConfigDatabaseConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.DatabaseConfig, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformed := &composer.DatabaseConfig{} + transformed.MachineType = original["machine_type"].(string) + transformed.Zone = original["zone"].(string) + + return transformed, nil +} + +func expandComposerEnvironmentConfigWebServerConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.WebServerConfig, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformed := &composer.WebServerConfig{} + transformed.MachineType = original["machine_type"].(string) + + return transformed, nil +} + +func expandComposerEnvironmentConfigEncryptionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.EncryptionConfig, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformed := &composer.EncryptionConfig{} + transformed.KmsKeyName = original["kms_key_name"].(string) + + return transformed, nil +} + +func expandComposerEnvironmentConfigMaintenanceWindow(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.MaintenanceWindow, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.MaintenanceWindow{} + + if v, ok := original["start_time"]; ok { + transformed.StartTime = v.(string) + } + + if v, ok := original["end_time"]; ok { + transformed.EndTime = v.(string) + } + + if v, ok := original["recurrence"]; ok { + transformed.Recurrence = v.(string) + } + + return transformed, nil +} + +func expandComposerEnvironmentConfigDataRetentionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.DataRetentionConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.DataRetentionConfig{} + + if taskLogsRetentionConfig, ok := original["task_logs_retention_config"]; ok { + transformedTaskLogsRetentionConfig, err := expandComposerEnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig(taskLogsRetentionConfig, d, config) + if err != nil { + return nil, err + } + transformed.TaskLogsRetentionConfig = transformedTaskLogsRetentionConfig + } + + return transformed, nil +} + +func expandComposerEnvironmentConfigDataRetentionConfigTaskLogsRetentionConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.TaskLogsRetentionConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.TaskLogsRetentionConfig{} + + if v, ok := original["storage_mode"]; ok { + transformed.StorageMode = v.(string) + } + + return transformed, nil +} + +func expandComposerEnvironmentConfigWorkloadsConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.WorkloadsConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.WorkloadsConfig{} + + if v, ok := original["scheduler"]; ok { + if len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + transformedScheduler := &composer.SchedulerResource{} + originalSchedulerRaw := v.([]interface{})[0].(map[string]interface{}) + transformedScheduler.Count = int64(originalSchedulerRaw["count"].(int)) + transformedScheduler.Cpu = originalSchedulerRaw["cpu"].(float64) + transformedScheduler.MemoryGb = originalSchedulerRaw["memory_gb"].(float64) + transformedScheduler.StorageGb = originalSchedulerRaw["storage_gb"].(float64) + transformed.Scheduler = transformedScheduler + } + } + + if v, ok := original["triggerer"]; ok { + if len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + transformedTriggerer := &composer.TriggererResource{} + originalTriggererRaw := v.([]interface{})[0].(map[string]interface{}) + transformedTriggerer.Count = int64(originalTriggererRaw["count"].(int)) + transformedTriggerer.Cpu = originalTriggererRaw["cpu"].(float64) + transformedTriggerer.MemoryGb = originalTriggererRaw["memory_gb"].(float64) + transformed.Triggerer = transformedTriggerer + } + } + + if v, ok := original["web_server"]; ok { + if len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + transformedWebServer := &composer.WebServerResource{} + originalWebServerRaw := v.([]interface{})[0].(map[string]interface{}) + transformedWebServer.Cpu = originalWebServerRaw["cpu"].(float64) + transformedWebServer.MemoryGb = originalWebServerRaw["memory_gb"].(float64) + transformedWebServer.StorageGb = originalWebServerRaw["storage_gb"].(float64) + transformed.WebServer = transformedWebServer + } + } + + if v, ok := original["worker"]; ok { + if len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + transformedWorker := &composer.WorkerResource{} + originalWorkerRaw := v.([]interface{})[0].(map[string]interface{}) + transformedWorker.Cpu = originalWorkerRaw["cpu"].(float64) + transformedWorker.MemoryGb = originalWorkerRaw["memory_gb"].(float64) + transformedWorker.StorageGb = originalWorkerRaw["storage_gb"].(float64) + transformedWorker.MinCount = int64(originalWorkerRaw["min_count"].(int)) + transformedWorker.MaxCount = int64(originalWorkerRaw["max_count"].(int)) + transformed.Worker = transformedWorker + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := original["dag_processor"]; ok { + if len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + transformedDagProcessor := &composer.DagProcessorResource{} + originalDagProcessorRaw := v.([]interface{})[0].(map[string]interface{}) + transformedDagProcessor.Cpu = originalDagProcessorRaw["cpu"].(float64) + transformedDagProcessor.MemoryGb = originalDagProcessorRaw["memory_gb"].(float64) + transformedDagProcessor.StorageGb = originalDagProcessorRaw["storage_gb"].(float64) + transformedDagProcessor.Count = int64(originalDagProcessorRaw["count"].(int)) + transformed.DagProcessor = transformedDagProcessor + } + } +{{- end }} + + return transformed, nil +} + +func expandComposerEnvironmentConfigRecoveryConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.RecoveryConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.RecoveryConfig{} + + if v, ok := original["scheduled_snapshots_config"]; ok { + if len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + transformedScheduledSnapshotsConfig := &composer.ScheduledSnapshotsConfig{} + originalScheduledSnapshotsConfigRaw := v.([]interface{})[0].(map[string]interface{}) + transformedScheduledSnapshotsConfig.Enabled = originalScheduledSnapshotsConfigRaw["enabled"].(bool) + transformedScheduledSnapshotsConfig.SnapshotLocation = originalScheduledSnapshotsConfigRaw["snapshot_location"].(string) + transformedScheduledSnapshotsConfig.TimeZone = originalScheduledSnapshotsConfigRaw["time_zone"].(string) + transformedScheduledSnapshotsConfig.SnapshotCreationSchedule = originalScheduledSnapshotsConfigRaw["snapshot_creation_schedule"].(string) + transformed.ScheduledSnapshotsConfig = transformedScheduledSnapshotsConfig + } + } + + return transformed, nil +} + +func expandComposerEnvironmentConfigEnvironmentSize(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + if v == nil { + return "", nil + } + return v.(string), nil +} + +func expandComposerEnvironmentConfigResilienceMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + if v == nil { + return "", nil + } + return v.(string), nil +} + +func expandComposerEnvironmentConfigPrivateEnvironmentConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.PrivateEnvironmentConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.PrivateEnvironmentConfig{ + EnablePrivateEnvironment: true, + } + + subBlock := &composer.PrivateClusterConfig{} + networkConfig := &composer.NetworkingConfig{} + + if v, ok := original["connection_type"]; ok { + networkConfig.ConnectionType = v.(string) + } + + if v, ok := original["enable_private_endpoint"]; ok { + subBlock.EnablePrivateEndpoint = v.(bool) + } + + if v, ok := original["master_ipv4_cidr_block"]; ok { + subBlock.MasterIpv4CidrBlock = v.(string) + } + + if v, ok := original["cloud_sql_ipv4_cidr_block"]; ok { + transformed.CloudSqlIpv4CidrBlock = v.(string) + } + + if v, ok := original["web_server_ipv4_cidr_block"]; ok { + transformed.WebServerIpv4CidrBlock = v.(string) + } + + if v, ok := original["cloud_composer_network_ipv4_cidr_block"]; ok { + transformed.CloudComposerNetworkIpv4CidrBlock = v.(string) + } + if v, ok := original["enable_privately_used_public_ips"]; ok { + transformed.EnablePrivatelyUsedPublicIps = v.(bool) + } + if v, ok := original["cloud_composer_connection_subnetwork"]; ok { + transformed.CloudComposerConnectionSubnetwork = v.(string) + } + + transformed.PrivateClusterConfig = subBlock + transformed.NetworkingConfig = networkConfig + + return transformed, nil +} + +func expandComposerEnvironmentConfigNodeConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.NodeConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.NodeConfig{} + + if transformedDiskSizeGb, ok := original["disk_size_gb"]; ok { + transformed.DiskSizeGb = int64(transformedDiskSizeGb.(int)) + } + + if v, ok := original["service_account"]; ok { + transformedServiceAccount, err := expandComposerEnvironmentServiceAccount(v, d, config) + if err != nil { + return nil, err + } + transformed.ServiceAccount = transformedServiceAccount + } + +{{ if ne $.TargetVersionName `ga` -}} + if transformedMaxPodsPerNode, ok := original["max_pods_per_node"]; ok { + transformed.MaxPodsPerNode = int64(transformedMaxPodsPerNode.(int)) + } +{{- end }} + if transformedEnableIpMasqAgent, ok := original["enable_ip_masq_agent"]; ok { + transformed.EnableIpMasqAgent = transformedEnableIpMasqAgent.(bool) + } + + var nodeConfigZone string + if v, ok := original["zone"]; ok { + transformedZone, err := expandComposerEnvironmentZone(v, d, config) + if err != nil { + return nil, err + } + transformed.Location = transformedZone + nodeConfigZone = transformedZone + } + + if v, ok := original["machine_type"]; ok { + transformedMachineType, err := expandComposerEnvironmentMachineType(v, d, config, nodeConfigZone) + if err != nil { + return nil, err + } + transformed.MachineType = transformedMachineType + } + + if v, ok := original["network"]; ok { + transformedNetwork, err := expandComposerEnvironmentNetwork(v, d, config) + if err != nil { + return nil, err + } + transformed.Network = transformedNetwork + } + + if v, ok := original["subnetwork"]; ok { + transformedSubnetwork, err := expandComposerEnvironmentSubnetwork(v, d, config) + if err != nil { + return nil, err + } + transformed.Subnetwork = transformedSubnetwork + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := original["composer_network_attachment"]; ok { + transformed.ComposerNetworkAttachment = v.(string) + } +{{- end }} + + transformedIPAllocationPolicy, err := expandComposerEnvironmentIPAllocationPolicy(original["ip_allocation_policy"], d, config) + if err != nil { + return nil, err + } + transformed.IpAllocationPolicy = transformedIPAllocationPolicy + + transformedOauthScopes, err := expandComposerEnvironmentSetList(original["oauth_scopes"], d, config) + if err != nil { + return nil, err + } + transformed.OauthScopes = transformedOauthScopes + + transformedTags, err := expandComposerEnvironmentSetList(original["tags"], d, config) + if err != nil { + return nil, err + } + transformed.Tags = transformedTags + +{{ if ne $.TargetVersionName `ga` -}} + if transformedComposerInternalIpv4CidrBlock, ok := original["composer_internal_ipv4_cidr_block"]; ok { + transformed.ComposerInternalIpv4CidrBlock = transformedComposerInternalIpv4CidrBlock.(string) + } +{{- end }} + + return transformed, nil +} + +func expandComposerEnvironmentIPAllocationPolicy(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.IPAllocationPolicy, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.IPAllocationPolicy{} + + if v, ok := original["use_ip_aliases"]; ok { + transformed.UseIpAliases = v.(bool) + } + + if v, ok := original["cluster_ipv4_cidr_block"]; ok { + transformed.ClusterIpv4CidrBlock = v.(string) + } + + if v, ok := original["cluster_secondary_range_name"]; ok { + transformed.ClusterSecondaryRangeName = v.(string) + } + + if v, ok := original["services_ipv4_cidr_block"]; ok { + transformed.ServicesIpv4CidrBlock = v.(string) + } + + if v, ok := original["services_secondary_range_name"]; ok { + transformed.ServicesSecondaryRangeName = v.(string) + } + return transformed, nil +} + +func expandComposerStorageConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.StorageConfig, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.StorageConfig{} + + if v, ok := original["bucket"]; ok { + transformed.Bucket = v.(string) + } + + return transformed, nil +} + +func expandComposerEnvironmentServiceAccount(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + serviceAccount := v.(string) + if len(serviceAccount) == 0 { + return "", nil + } + + return tpgresource.GetResourceNameFromSelfLink(serviceAccount), nil +} + +func expandComposerEnvironmentZone(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + zone := v.(string) + if len(zone) == 0 { + return zone, nil + } + if !strings.Contains(zone, "/") { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return "", err + } + return fmt.Sprintf("projects/%s/zones/%s", project, zone), nil + } + + return tpgresource.GetRelativePath(zone) +} + +func expandComposerEnvironmentMachineType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config, nodeCfgZone string) (string, error) { + machineType := v.(string) + requiredZone := tpgresource.GetResourceNameFromSelfLink(nodeCfgZone) + + fv, err := tpgresource.ParseMachineTypesFieldValue(v.(string), d, config) + if err != nil { + + // Try to construct machine type with zone/project given in config. + project, err := tpgresource.GetProject(d, config) + if err != nil { + return "", err + } + + fv = &tpgresource.ZonalFieldValue{ + Project: project, + Zone: requiredZone, + Name: tpgresource.GetResourceNameFromSelfLink(machineType), + ResourceType: "machineTypes", + } + } + + // Make sure zone in node_config.machineType matches node_config.zone if + // given. + if requiredZone != "" && fv.Zone != requiredZone { + return "", fmt.Errorf("node_config machine_type %q must be in node_config zone %q", machineType, requiredZone) + } + return fv.RelativeLink(), nil +} + +func expandComposerEnvironmentNetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + fv, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) + if err != nil { + return "", err + } + return fv.RelativeLink(), nil +} + +func expandComposerEnvironmentSubnetwork(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (string, error) { + fv, err := tpgresource.ParseSubnetworkFieldValue(v.(string), d, config) + if err != nil { + return "", err + } + return fv.RelativeLink(), nil +} + +func expandComposerEnvironmentSetList(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) ([]string, error) { + if v == nil { + return nil, nil + } + return tpgresource.ConvertStringArr(v.(*schema.Set).List()), nil +} + +func expandComposerEnvironmentConfigSoftwareConfig(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.SoftwareConfig, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := &composer.SoftwareConfig{} + + transformed.ImageVersion = original["image_version"].(string) + transformed.PythonVersion = original["python_version"].(string) + transformed.AirflowConfigOverrides = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "airflow_config_overrides") + transformed.PypiPackages = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "pypi_packages") + transformed.EnvVariables = expandComposerEnvironmentConfigSoftwareConfigStringMap(original, "env_variables") + transformed.SchedulerCount = int64(original["scheduler_count"].(int)) + +{{ if ne $.TargetVersionName `ga` -}} + transformedCloudDataLineageIntegration, err := expandComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(original["cloud_data_lineage_integration"], d, config) + if err != nil { + return nil, err + } + transformed.CloudDataLineageIntegration = transformedCloudDataLineageIntegration + + if original["web_server_plugins_mode"].(string) == "DISABLED"{ + transformed.WebServerPluginsMode = "PLUGINS_DISABLED" + } else if original["web_server_plugins_mode"].(string) == "ENABLED"{ + transformed.WebServerPluginsMode = "PLUGINS_ENABLED" + } else { + transformed.WebServerPluginsMode = original["web_server_plugins_mode"].(string) + } +{{- end }} + + return transformed, nil +} + +func expandComposerEnvironmentConfigSoftwareConfigStringMap(softwareConfig map[string]interface{}, k string) map[string]string { + v, ok := softwareConfig[k] + if ok && v != nil { + return tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + return map[string]string{} +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandComposerEnvironmentConfigSoftwareConfigCloudDataLineageIntegration(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) (*composer.CloudDataLineageIntegration, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformed := &composer.CloudDataLineageIntegration{} + transformed.Enabled = original["enabled"].(bool) + + return transformed, nil +} +{{- end }} + +func validateComposerEnvironmentPypiPackages(v interface{}, k string) (ws []string, errors []error) { + if v == nil { + return ws, errors + } + for pkgName := range v.(map[string]interface{}) { + if pkgName != strings.ToLower(pkgName) { + errors = append(errors, + fmt.Errorf("PYPI package %q can only contain lowercase characters", pkgName)) + } + } + + return ws, errors +} + +func validateComposerEnvironmentEnvVariables(v interface{}, k string) (ws []string, errors []error) { + if v == nil { + return ws, errors + } + + reEnvVarName := regexp.MustCompile(composerEnvironmentEnvVariablesRegexp) + reAirflowReserved := regexp.MustCompile(composerEnvironmentReservedAirflowEnvVarRegexp) + + for envVarName := range v.(map[string]interface{}) { + if !reEnvVarName.MatchString(envVarName) { + errors = append(errors, + fmt.Errorf("env_variable %q must match regexp %q", envVarName, composerEnvironmentEnvVariablesRegexp)) + } else if _, ok := composerEnvironmentReservedEnvVar[envVarName]; ok { + errors = append(errors, + fmt.Errorf("env_variable %q is a reserved name and cannot be used", envVarName)) + } else if reAirflowReserved.MatchString(envVarName) { + errors = append(errors, + fmt.Errorf("env_variable %q cannot match reserved Airflow variable names with regexp %q", + envVarName, composerEnvironmentReservedAirflowEnvVarRegexp)) + } + } + + return ws, errors +} + +func handleComposerEnvironmentCreationOpFailure(id string, envName *ComposerEnvironmentName, d *schema.ResourceData, config *transport_tpg.Config) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + log.Printf("[WARNING] Creation operation for Composer Environment %q failed, check Environment isn't still running", id) + // Try to get possible created but invalid environment. + env, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() + if err != nil { + // If error is 401, we don't have to clean up environment, return nil. + // Otherwise, we encountered another error. + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Composer Environment %q", envName.ResourceName())) + } + + if env.State == "CREATING" { + return fmt.Errorf( + "Getting creation operation state failed while waiting for environment to finish creating, "+ + "but environment seems to still be in 'CREATING' state. Wait for operation to finish and either "+ + "manually delete environment or import %q into your state", id) + } + + log.Printf("[WARNING] Environment %q from failed creation operation was created, deleting.", id) + op, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.Delete(envName.ResourceName()).Do() + if err != nil { + return fmt.Errorf("Could not delete the invalid created environment with state %q: %s", env.State, err) + } + + waitErr := ComposerOperationWaitTime( + config, op, envName.Project, + fmt.Sprintf("Deleting invalid created Environment with state %q", env.State), userAgent, + d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + return fmt.Errorf("Error waiting to delete invalid Environment with state %q: %s", env.State, waitErr) + } + + return nil +} + +func getComposerEnvironmentPostCreateUpdateObj(env *composer.Environment) (updateEnv *composer.Environment) { + // pypiPackages can only be added via update + if env != nil && env.Config != nil && env.Config.SoftwareConfig != nil { + if len(env.Config.SoftwareConfig.PypiPackages) > 0 { + updateEnv = &composer.Environment{ + Config: &composer.EnvironmentConfig{ + SoftwareConfig: &composer.SoftwareConfig{ + PypiPackages: env.Config.SoftwareConfig.PypiPackages, + }, + }, + } + // Clear PYPI packages - otherwise, API will return error + // that the create request is invalid. + env.Config.SoftwareConfig.PypiPackages = make(map[string]string) + } + } + + return updateEnv +} + +func resourceComposerEnvironmentName(d *schema.ResourceData, config *transport_tpg.Config) (*ComposerEnvironmentName, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + return &ComposerEnvironmentName{ + Project: project, + Region: region, + Environment: d.Get("name").(string), + }, nil +} + +type ComposerEnvironmentName struct { + Project string + Region string + Environment string +} + +func (n *ComposerEnvironmentName) ResourceName() string { + return fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) +} + +func (n *ComposerEnvironmentName) ParentName() string { + return fmt.Sprintf("projects/%s/locations/%s", n.Project, n.Region) +} + +// The value we store (i.e. `old` in this method), might be only the service account email, +// but we expect either the email or the name (projects/.../serviceAccounts/...) +func compareServiceAccountEmailToLink(_, old, new string, _ *schema.ResourceData) bool { + // old is the service account email returned from the server. + if !strings.HasPrefix("projects/", old) { + return old == tpgresource.GetResourceNameFromSelfLink(new) + } + return tpgresource.CompareSelfLinkRelativePaths("", old, new, nil) +} + +func validateServiceAccountRelativeNameOrEmail(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + serviceAccountRe := "(" + strings.Join(verify.PossibleServiceAccountNames, "|") + ")" + if strings.HasPrefix(value, "projects/") { + serviceAccountRe = fmt.Sprintf("projects/(.+)/serviceAccounts/%s", serviceAccountRe) + } + r := regexp.MustCompile(serviceAccountRe) + if !r.MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q (%q) doesn't match regexp %q", k, value, serviceAccountRe)) + } + + return +} + +func composerImageVersionDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + versionRe := regexp.MustCompile(composerEnvironmentVersionRegexp) + oldVersions := versionRe.FindStringSubmatch(old) + newVersions := versionRe.FindStringSubmatch(new) + if oldVersions == nil || len(oldVersions) < 11 { + // Somehow one of the versions didn't match the regexp or didn't + // have values in the capturing groups. In that case, fall back to + // an equality check. + if old != "" { + log.Printf("[WARN] Image version didn't match regexp: %s", old) + } + return old == new + } + if newVersions == nil || len(newVersions) < 11 { + // Somehow one of the versions didn't match the regexp or didn't + // have values in the capturing groups. In that case, fall back to + // an equality check. + if new != "" { + log.Printf("[WARN] Image version didn't match regexp: %s", new) + } + return old == new + } + + oldAirflow := oldVersions[5] + oldAirflowMajor := oldVersions[6] + oldAirflowMajorMinor := oldVersions[6] + oldVersions[8] + oldAirflowMajorMinorPatch := oldVersions[6] + oldVersions[8] + oldVersions[9] + newAirflow := newVersions[5] + newAirflowMajor := newVersions[6] + newAirflowMajorMinor := newVersions[6] + newVersions[8] + newAirflowMajorMinorPatch := newVersions[6] + newVersions[8] + newVersions[9] + // Check Airflow versions. + if oldAirflow == oldAirflowMajor || newAirflow == newAirflowMajor { + // If one of the Airflow versions specifies only major version + // (like 1), we can only compare major versions. + eq, err := versionsEqual(oldAirflowMajor, newAirflowMajor) + if err != nil { + log.Printf("[WARN] Could not parse airflow version, %s", err) + } + if !eq { + return false + } + } else if oldAirflow == oldAirflowMajorMinor || newAirflow == newAirflowMajorMinor { + // If one of the Airflow versions specifies only major and minor version + // (like 1.10), we can only compare major and minor versions. + eq, err := versionsEqual(oldAirflowMajorMinor, newAirflowMajorMinor) + if err != nil { + log.Printf("[WARN] Could not parse airflow version, %s", err) + } + if !eq { + return false + } + } else if oldAirflow == oldAirflowMajorMinorPatch || newAirflow == newAirflowMajorMinorPatch { + // If one of the Airflow versions specifies only major, minor and patch version + // (like 1.10.15), we can only compare major, minor and patch versions. + eq, err := versionsEqual(oldAirflowMajorMinorPatch, newAirflowMajorMinorPatch) + if err != nil { + log.Printf("[WARN] Could not parse airflow version, %s", err) + } + if !eq { + return false + } + } else { + // Otherwise, we compare the full Airflow versions (like 1.10.15-build.5). + eq, err := versionsEqual(oldAirflow, newAirflow) + if err != nil { + log.Printf("[WARN] Could not parse airflow version, %s", err) + } + if !eq { + return false + } + } + + oldComposer := oldVersions[1] + oldComposerMajor := oldVersions[2] + newComposer := newVersions[1] + newComposerMajor := newVersions[2] + // Check Composer versions. + if oldComposer == "latest" || newComposer == "latest" { + // We don't know what the latest version is so we suppress the diff. + return true + } else if oldComposer == oldComposerMajor || newComposer == newComposerMajor { + // If one of the Composer versions specifies only major version + // (like 1), we can only compare major versions. + eq, err := versionsEqual(oldComposerMajor, newComposerMajor) + if err != nil { + log.Printf("[WARN] Could not parse composer version, %s", err) + } + return eq + } else { + // Otherwise, we compare the full Composer versions (like 1.18.1). + eq, err := versionsEqual(oldComposer, newComposer) + if err != nil { + log.Printf("[WARN] Could not parse composer version, %s", err) + } + return eq + } +} + +func versionsEqual(old, new string) (bool, error) { + o, err := version.NewVersion(old) + if err != nil { + return false, err + } + n, err := version.NewVersion(new) + if err != nil { + return false, err + } + return o.Equal(n), nil +} + +func isComposer3(imageVersion string) bool { + return strings.Contains(imageVersion, "composer-3") +} + +func forceNewCustomDiff(key string) customdiff.ResourceConditionFunc { + return func(ctx context.Context, d *schema.ResourceDiff, meta interface{}) bool { + old, new := d.GetChange(key) + imageVersion := d.Get("config.0.software_config.0.image_version").(string) + if isComposer3(imageVersion) || tpgresource.CompareSelfLinkRelativePaths("", old.(string), new.(string), nil) { + return false + } + return true + } +} + +func imageVersionChangeValidationFunc(ctx context.Context, old, new, meta any) error { + if old.(string) != "" && !isComposer3(old.(string)) && isComposer3(new.(string)) { + return fmt.Errorf("upgrade to composer 3 is not yet supported") + } + return nil +} + +func validateComposer3FieldUsage(d *schema.ResourceDiff, key string, requireComposer3 bool) error { + _, ok := d.GetOk(key) + imageVersion := d.Get("config.0.software_config.0.image_version").(string) + if ok && ( isComposer3(imageVersion) != requireComposer3 ) { + if requireComposer3 { + return fmt.Errorf("error in configuration, %s should only be used in Composer 3", key) + } else { + return fmt.Errorf("error in configuration, %s should not be used in Composer 3", key) + } + } + return nil +} + +func versionValidationCustomizeDiffFunc(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + composer3FieldUsagePolicy := map[string]bool{ + "config.0.node_config.0.max_pods_per_node": false, // not allowed in composer 3 + "config.0.node_config.0.enable_ip_masq_agent": false, + "config.0.node_config.0.config.0.node_config.0.ip_allocation_policy": false, + "config.0.private_environment_config": false, + "config.0.master_authorized_networks_config": false, + "config.0.node_config.0.composer_network_attachment": true, // allowed only in composer 3 + "config.0.node_config.0.composer_internal_ipv4_cidr_block": true, + "config.0.software_config.0.web_server_plugins_mode": true, + "config.0.enable_private_environment": true, + "config.0.enable_private_builds_only": true, + "config.0.workloads_config.0.dag_processor": true, + } + for key, allowed := range composer3FieldUsagePolicy { + if err := validateComposer3FieldUsage(d, key, allowed); err != nil { + return err + } + } + return nil +} + +func validateComposerInternalIpv4CidrBlock(v any, k string) (warns []string, errs []error) { + cidr_range := v.(string) + _, ip_net, err := net.ParseCIDR(cidr_range) + if err != nil { + errs = append(errs, fmt.Errorf("Invalid CIDR range: %s", err)) + return + } + ones, _ := ip_net.Mask.Size() + if ones != 20 { + errs = append(errs, fmt.Errorf("Composer Internal IPv4 CIDR range must have size /20")) + } + return +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/composer/go/resource_composer_environment_test.go.tmpl b/mmv1/third_party/terraform/services/composer/go/resource_composer_environment_test.go.tmpl new file mode 100644 index 000000000000..8bb76ae8a560 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/resource_composer_environment_test.go.tmpl @@ -0,0 +1,3536 @@ +package composer_test + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/composer" + "testing" + + "log" + "regexp" + "strings" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const testComposerEnvironmentPrefix = "tf-test-composer-env" +const testComposerNetworkPrefix = "tf-test-composer-net" +const testComposerBucketPrefix = "tf-test-composer-bucket" +const testComposerNetworkAttachmentPrefix = "tf-test-composer-nta" + +func allComposerServiceAgents() []string { + return []string{ + "cloudcomposer-accounts", + "compute-system", + "container-engine-robot", + "gcp-sa-artifactregistry", + "gcp-sa-pubsub", + } +} + +// Checks environment creation with minimum required information. +func TestAccComposerEnvironment_basic(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_basic(envName, network, subnetwork), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_composer_environment.test", "config.0.airflow_uri"), + resource.TestCheckResourceAttrSet("google_composer_environment.test", "config.0.gke_cluster"), + resource.TestCheckResourceAttrSet("google_composer_environment.test", "config.0.node_count"), + resource.TestCheckResourceAttrSet("google_composer_environment.test", "config.0.node_config.0.zone"), + resource.TestCheckResourceAttrSet("google_composer_environment.test", "config.0.node_config.0.machine_type")), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/environments/%s", envvar.GetTestProjectFromEnv(), "us-central1", envName), + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_basic(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +// Checks that all updatable fields can be updated in one apply +// (PATCH for Environments only is per-field) +func TestAccComposerEnvironment_update(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_basic(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_update(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_update(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +// Checks private environment creation for composer 1 and 2. +func TestAccComposerEnvironmentComposer1_private(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer1_private(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/environments/%s", envvar.GetTestProjectFromEnv(), "us-central1", envName), + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer1_private(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer2_private(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer2_private(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/environments/%s", envvar.GetTestProjectFromEnv(), "us-central1", envName), + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer2_private(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +// Checks environment creation with minimum required information. +func TestAccComposerEnvironment_privateWithWebServerControl(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_privateWithWebServerControl(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerEnvironment_privateWithWebServerControlUpdated(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/environments/%s", envvar.GetTestProjectFromEnv(), "us-central1", envName), + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_privateWithWebServerControlUpdated(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withDatabaseConfig(t *testing.T) { + t.Parallel() + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_databaseCfg(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_databaseCfgUpdated(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_databaseCfgUpdated(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withWebServerConfig(t *testing.T) { + t.Parallel() + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + + grantServiceAgentsRole(t, "service-", []string{"gcp-sa-cloudbuild"}, "roles/cloudbuild.builds.builder") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_webServerCfg(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_webServerCfgUpdated(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_webServerCfgUpdated(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withEncryptionConfigComposer1(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + pid := envvar.GetTestProjectFromEnv() + grantServiceAgentsRole(t, "service-", allComposerServiceAgents(), "roles/cloudkms.cryptoKeyEncrypterDecrypter") + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_encryptionCfg(pid, "1", "1", envName, kms.CryptoKey.Name, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_encryptionCfg(pid, "1", "1", envName, kms.CryptoKey.Name, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withEncryptionConfigComposer2(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + pid := envvar.GetTestProjectFromEnv() + grantServiceAgentsRole(t, "service-", allComposerServiceAgents(), "roles/cloudkms.cryptoKeyEncrypterDecrypter") + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_encryptionCfg(pid, "2", "2", envName, kms.CryptoKey.Name, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_encryptionCfg(pid, "2", "2", envName, kms.CryptoKey.Name, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withMaintenanceWindow(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_maintenanceWindow(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_maintenanceWindow(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_maintenanceWindowUpdate(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_maintenanceWindow(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_maintenanceWindowUpdate(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_maintenanceWindowUpdate(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_ComposerV2(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV2(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_composerV2(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_UpdateComposerV2ImageVersion(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV250(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_composerV260(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_composerV260(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_UpdateComposerV2ResilienceMode(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV2HighResilience(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_updateComposerV2StandardResilience(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_updateComposerV2StandardResilience(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + + + +func TestAccComposerEnvironment_ComposerV2HighResilience(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV2HighResilience(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_composerV2HighResilience(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_UpdateComposerV2WithTriggerer(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV2(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_composerV2WithDisabledTriggerer(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_composerV2WithDisabledTriggerer(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_UpdateComposerV2(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV2(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_updateComposerV2(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_updateComposerV2(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_composerV2PrivateServiceConnect(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_composerV2PrivateServiceConnect(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_composerV2PrivateServiceConnect(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_composerV1MasterAuthNetworks(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_MasterAuthNetworks("1", "1", envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_MasterAuthNetworks("1", "1", envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_composerV2MasterAuthNetworks(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_MasterAuthNetworks("2", "2", envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_MasterAuthNetworks("2", "2", envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_composerV1MasterAuthNetworksUpdate(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_MasterAuthNetworks("1", "1", envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_MasterAuthNetworksUpdate("1", "1", envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_MasterAuthNetworksUpdate("1", "1", envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_composerV2MasterAuthNetworksUpdate(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_MasterAuthNetworks("2", "2", envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironment_MasterAuthNetworksUpdate("2", "2", envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO(dzarmola): Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_MasterAuthNetworksUpdate("2", "2", envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposer1Environment_withNodeConfig(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + serviceAccount := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposer1Environment_nodeCfg(envName, network, subnetwork, serviceAccount), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposer1Environment_nodeCfg(envName, network, subnetwork, serviceAccount), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposer2Environment_withNodeConfig(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + serviceAccount := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposer2Environment_nodeCfg(envName, network, subnetwork, serviceAccount), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposer2Environment_nodeCfg(envName, network, subnetwork, serviceAccount), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentAirflow2_withRecoveryConfig(t *testing.T) { + t.Parallel() + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_airflow2RecoveryCfg(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerEnvironmentUpdate_airflow2RecoveryCfg(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentUpdate_airflow2RecoveryCfg(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_withSoftwareConfig(t *testing.T) { + t.Parallel() + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_softwareCfg(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_softwareCfg(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentAirflow2_withSoftwareConfig(t *testing.T) { + t.Parallel() + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_airflow2SoftwareCfg(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerEnvironmentUpdate_airflow2SoftwareCfg(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentUpdate_airflow2SoftwareCfg(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +// Checks behavior of config for creation for attributes that must +// be updated during create. +func TestAccComposerEnvironment_withUpdateOnCreate(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_updateOnlyFields(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_updateOnlyFields(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironment_fixPyPiPackages(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + serviceAccount := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_fixPyPiPackages(envName, network, subnetwork, serviceAccount), + ExpectError: regexp.MustCompile("Failed to install pypi packages"), + }, + { + Config: testAccComposerEnvironment_fixPyPiPackagesUpdate(envName, network, subnetwork, serviceAccount), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// This bootstraps the IAM roles needed for the service agents. +func grantServiceAgentsRole(t *testing.T, prefix string, agentNames []string, role string) { + if acctest.BootstrapAllPSARole(t, prefix, agentNames, role) { + // Fail this test run because the policy needs time to reconcile. + t.Fatal("Stopping test because permissions were added.") + } +} + +func testAccComposerEnvironmentDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_composer_environment" { + continue + } + + idTokens := strings.Split(rs.Primary.ID, "/") + if len(idTokens) != 6 { + return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}", rs.Primary.ID) + } + envName := &composer.ComposerEnvironmentName{ + Project: idTokens[1], + Region: idTokens[3], + Environment: idTokens[5], + } + + _, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.Get(envName.ResourceName()).Do() + if err == nil { + return fmt.Errorf("environment %s still exists", envName.ResourceName()) + } + } + + return nil + } +} + +// Checks environment creation with custom bucket +func TestAccComposerEnvironment_customBucket(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("%s-%d", testComposerBucketPrefix, acctest.RandInt(t)) + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironment_customBucket(bucketName, envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironment_customBucket(bucketName, envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +// Checks Composer 3 environment creation with new fields. +func TestAccComposerEnvironmentComposer3_basic(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_basic(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer3_basic(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +// Checks Composer 3 specific updatable fields. +func TestAccComposerEnvironmentComposer3_update(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_basic(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironmentComposer3_update(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer3_update(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer3_withNetworkSubnetworkAndAttachment_expectError(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + networkAttachment := fmt.Sprintf("%s-%d", testComposerNetworkAttachmentPrefix, acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_withNetworkSubnetworkAndAttachment_expectError(envName, networkAttachment, network, subnetwork), + ExpectError: regexp.MustCompile("Conflicting configuration arguments"), + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: true, + Config: testAccComposerEnvironmentComposer3_basic(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer3_withNetworkAttachment(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + networkAttachment := fmt.Sprintf("%s-%d", testComposerNetworkAttachmentPrefix, acctest.RandInt(t)) + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), networkAttachment) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_withNetworkAttachment(envName, networkAttachment, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + Config: testAccComposerEnvironmentComposer3_withNetworkAttachment(envName, fullFormNetworkAttachmentName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer3_updateWithNetworkAttachment(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + networkAttachment := fmt.Sprintf("%s-%d", testComposerNetworkAttachmentPrefix, acctest.RandInt(t)) + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), networkAttachment) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_withNetworkAndSubnetwork(envName, networkAttachment, network, subnetwork), + }, + { + Config: testAccComposerEnvironmentComposer3_withNetworkAttachment(envName, networkAttachment, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + Config: testAccComposerEnvironmentComposer3_withNetworkAttachment(envName, fullFormNetworkAttachmentName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer3_updateWithNetworkAndSubnetwork(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + networkAttachment := fmt.Sprintf("%s-%d", testComposerNetworkAttachmentPrefix, acctest.RandInt(t)) + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), networkAttachment) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_withNetworkAttachment(envName, networkAttachment, network, subnetwork), + }, + { + Config: testAccComposerEnvironmentComposer3_withNetworkAndSubnetwork(envName, networkAttachment, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + Config: testAccComposerEnvironmentComposer3_withNetworkAttachment(envName, fullFormNetworkAttachmentName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +// Checks Composer 3 specific updatable fields. +func TestAccComposerEnvironmentComposer3_updateToEmpty(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_basic(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironmentComposer3_empty(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer3_empty(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +// Checks Composer 3 specific updatable fields. +func TestAccComposerEnvironmentComposer3_updateFromEmpty(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_empty(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironmentComposer3_update(envName, network, subnetwork), + }, + { + ResourceName: "google_composer_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer3_update(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer3_upgrade_expectError(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + network := fmt.Sprintf("%s-%d", testComposerNetworkPrefix, acctest.RandInt(t)) + subnetwork := network + "-1" + errorRegExp, _ := regexp.Compile(".*upgrade to composer 3 is not yet supported.*") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer2_empty(envName, network, subnetwork), + }, + { + Config: testAccComposerEnvironmentComposer3_empty(envName, network, subnetwork), + ExpectError: errorRegExp, + }, + // This is a terrible clean-up step in order to get destroy to succeed, + // due to dangling firewall rules left by the Composer Environment blocking network deletion. + // TODO: Remove this check if firewall rules bug gets fixed by Composer. + { + PlanOnly: true, + ExpectNonEmptyPlan: false, + Config: testAccComposerEnvironmentComposer2_empty(envName, network, subnetwork), + Check: testAccCheckClearComposerEnvironmentFirewalls(t, network), + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer2_usesUnsupportedField_expectError(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + errorRegExp, _ := regexp.Compile(".*error in configuration, .* should only be used in Composer 3.*") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer2_usesUnsupportedField(envName), + ExpectError: errorRegExp, + }, + }, + }) +} + +func TestAccComposerEnvironmentComposer3_usesUnsupportedField_expectError(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + errorRegExp, _ := regexp.Compile(".*error in configuration, .* should not be used in Composer 3.*") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerEnvironmentComposer3_usesUnsupportedField(envName), + ExpectError: errorRegExp, + }, + }, + }) +} +{{- end }} + +func testAccComposerEnvironment_customBucket(bucketName, envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "test" { + name = "%s" + location = "us-central1" + force_destroy = true +} + +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + software_config { + image_version = "composer-2.4.2-airflow-2" + } + } + storage_config { + bucket = google_storage_bucket.test.name + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, bucketName, envName, network, subnetwork) +} + +func testAccComposerEnvironment_basic(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + machine_type = "n1-standard-1" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + software_config { + image_version = "composer-1-airflow-2.3" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentComposer1_private(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + enable_ip_masq_agent = true + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + software_config { + image_version = "composer-1-airflow-2" + } + private_environment_config { + enable_private_endpoint = true + enable_privately_used_public_ips = true + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentComposer2_private(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + enable_ip_masq_agent = true + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.56.0.0/14" + } + } + software_config { + image_version = "composer-2-airflow-2" + } + private_environment_config { + connection_type = "VPC_PEERING" + enable_private_endpoint = true + enable_privately_used_public_ips = true + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_privateWithWebServerControl(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.56.0.0/14" + services_ipv4_cidr_block = "10.122.0.0/20" + } + } + private_environment_config { + enable_private_endpoint = false + web_server_ipv4_cidr_block = "172.30.240.0/24" + cloud_sql_ipv4_cidr_block = "10.32.0.0/12" + master_ipv4_cidr_block = "172.17.50.0/28" + } + software_config { + image_version = "composer-1-airflow-2" + } + web_server_network_access_control { + allowed_ip_range { + value = "192.168.0.1" + description = "my range1" + } + allowed_ip_range { + value = "0.0.0.0/0" + } + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_privateWithWebServerControlUpdated(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.56.0.0/14" + services_ipv4_cidr_block = "10.122.0.0/20" + } + } + private_environment_config { + enable_private_endpoint = false + web_server_ipv4_cidr_block = "172.30.240.0/24" + cloud_sql_ipv4_cidr_block = "10.32.0.0/12" + master_ipv4_cidr_block = "172.17.50.0/28" + } + software_config { + image_version = "composer-1-airflow-2" + } + web_server_network_access_control { + allowed_ip_range { + value = "192.168.0.1" + description = "my range1" + } + allowed_ip_range { + value = "0.0.0.0/0" + } + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_databaseCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + database_config { + machine_type = "db-n1-standard-4" + } + software_config { + image_version = "composer-1-airflow-2" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_databaseCfgUpdated(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + database_config { + machine_type = "db-n1-standard-8" + } + software_config { + image_version = "composer-1-airflow-2" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_webServerCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + } + web_server_config { + machine_type = "composer-n1-webserver-4" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_webServerCfgUpdated(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + } + web_server_config { + machine_type = "composer-n1-webserver-8" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_encryptionCfg(pid, compVersion, airflowVersion, name, kmsKey, network, subnetwork string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_kms_crypto_key_iam_member" "iam" { + crypto_key_id = "%s" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gs-project-accounts.iam.gserviceaccount.com" +} +resource "google_composer_environment" "test" { + depends_on = [google_kms_crypto_key_iam_member.iam] + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-%s-airflow-%s" + } + + encryption_config { + kms_key_name = "%s" + } + } +} +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, + pid, kmsKey, name, compVersion, airflowVersion, kmsKey, network, subnetwork) +} + + +func testAccComposerEnvironment_maintenanceWindow(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + maintenance_window { + start_time = "2019-08-01T01:00:00Z" + end_time = "2019-08-01T07:00:00Z" + recurrence = "FREQ=WEEKLY;BYDAY=TU,WE" + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_maintenanceWindowUpdate(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + maintenance_window { + start_time = "2019-08-01T01:00:00Z" + end_time = "2019-08-01T07:00:00Z" + recurrence = "FREQ=DAILY" + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_composerV2WithDisabledTriggerer(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" + } + + workloads_config { + scheduler { + cpu = 1.25 + memory_gb = 2.5 + storage_gb = 5.4 + count = 2 + } + web_server { + cpu = 1.75 + memory_gb = 3.0 + storage_gb = 4.4 + } + worker { + cpu = 0.5 + memory_gb = 2.0 + storage_gb = 3.4 + min_count = 2 + max_count = 5 + } + } + environment_size = "ENVIRONMENT_SIZE_MEDIUM" + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } + +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_composerV2(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" +{{- if ne $.TargetVersionName "ga" }} + cloud_data_lineage_integration { + enabled = true + } +{{- end }} + } + + workloads_config { + scheduler { + cpu = 1.25 + memory_gb = 2.5 + storage_gb = 5.4 + count = 2 + } + web_server { + cpu = 1.75 + memory_gb = 3.0 + storage_gb = 4.4 + } + worker { + cpu = 0.5 + memory_gb = 2.0 + storage_gb = 3.4 + min_count = 2 + max_count = 5 + } + triggerer { + cpu = 0.5 + memory_gb = 2.0 + count = 1 + } + } + database_config { + zone = "us-east1-c" + } + environment_size = "ENVIRONMENT_SIZE_MEDIUM" + data_retention_config { + task_logs_retention_config { + storage_mode = "CLOUD_LOGGING_ONLY" + } + } + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } + +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_composerV250(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-2.5.0-airflow-2.6.3" + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_composerV260(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-2.6.0-airflow-2.6.3" + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_composerV2HighResilience(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-2-airflow-2" + } + + workloads_config { + scheduler { + cpu = 1.25 + memory_gb = 2.5 + storage_gb = 5.4 + count = 2 + } + web_server { + cpu = 1.75 + memory_gb = 3.0 + storage_gb = 4.4 + } + worker { + cpu = 0.5 + memory_gb = 2.0 + storage_gb = 3.4 + min_count = 2 + max_count = 5 + } + } + environment_size = "ENVIRONMENT_SIZE_MEDIUM" + resilience_mode = "HIGH_RESILIENCE" + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_composerV2PrivateServiceConnect(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-2-airflow-2" + } + private_environment_config { + cloud_composer_connection_subnetwork = google_compute_subnetwork.test.self_link + } + } + +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + +func testAccComposerEnvironment_MasterAuthNetworks(compVersion, airflowVersion, envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-%s-airflow-%s" + } + + master_authorized_networks_config { + enabled = true + cidr_blocks { + display_name = "foo" + cidr_block = "8.8.8.8/32" + } + cidr_blocks { + cidr_block = "8.8.8.0/24" + } + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +`, envName, compVersion, airflowVersion, network, subnetwork) +} + + +func testAccComposerEnvironment_updateComposerV2StandardResilience(envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-2-airflow-2" + } + + workloads_config { + scheduler { + cpu = 1.25 + memory_gb = 2.5 + storage_gb = 5.4 + count = 2 + } + web_server { + cpu = 1.75 + memory_gb = 3.0 + storage_gb = 4.4 + } + worker { + cpu = 0.5 + memory_gb = 2.0 + storage_gb = 3.4 + min_count = 2 + max_count = 5 + } + } + environment_size = "ENVIRONMENT_SIZE_MEDIUM" + resilience_mode = "STANDARD_RESILIENCE" + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} + +`, envName, network, subnetwork) +} + + +func testAccComposerEnvironment_MasterAuthNetworksUpdate(compVersion, airflowVersion, envName, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + } + + software_config { + image_version = "composer-%s-airflow-%s" + } + + master_authorized_networks_config { + enabled = true + cidr_blocks { + display_name = "foo_update" + cidr_block = "9.9.9.8/30" + } + } + } +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +`, envName, compVersion, airflowVersion, network, subnetwork) +} + + +func testAccComposerEnvironment_update(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_count = 4 + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + machine_type = "n1-standard-1" + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-1-airflow-2" + + airflow_config_overrides = { + core-load_example = "True" + } + + pypi_packages = { + numpy = "" + } + + env_variables = { + FOO = "bar" + } + } +{{- if ne $.TargetVersionName "ga" }} + web_server_config { + machine_type = "composer-n1-webserver-4" + } + + database_config { + machine_type = "db-n1-standard-4" + } +{{- end }} + } + + labels = { + foo = "bar" + anotherlabel = "boo" + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + + +func testAccComposerEnvironment_updateComposerV2(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-east1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" +{{- if ne $.TargetVersionName "ga" }} + cloud_data_lineage_integration { + enabled = false + } +{{- end }} + } + + workloads_config { + scheduler { + cpu = 2.25 + memory_gb = 3.5 + storage_gb = 6.4 + count = 3 + } + web_server { + cpu = 2.75 + memory_gb = 4.0 + storage_gb = 5.4 + } + worker { + cpu = 1.5 + memory_gb = 3.0 + storage_gb = 4.4 + min_count = 3 + max_count = 6 + } + triggerer { + cpu = 0.75 + memory_gb = 2 + count = 1 + } + } + environment_size = "ENVIRONMENT_SIZE_LARGE" + data_retention_config { + task_logs_retention_config { + storage_mode = "CLOUD_LOGGING_AND_CLOUD_STORAGE" + } + } + private_environment_config { + enable_private_endpoint = true + cloud_composer_network_ipv4_cidr_block = "10.3.192.0/24" + master_ipv4_cidr_block = "172.16.194.0/23" + cloud_sql_ipv4_cidr_block = "10.3.224.0/20" + } + } + +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-east1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposer1Environment_nodeCfg(environment, network, subnetwork, serviceAccount string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + + service_account = google_service_account.test.name +{{- if ne $.TargetVersionName "ga" }} + max_pods_per_node = 33 +{{- end }} + ip_allocation_policy { + use_ip_aliases = true + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + tags = toset(["t1", "t2"]) + machine_type = "n2-highcpu-2" + disk_size_gb = 20 + oauth_scopes = toset(["https://www.googleapis.com/auth/cloud-platform","https://www.googleapis.com/auth/bigquery"]) + } + software_config { + image_version = "composer-1-airflow-2" + } + } + depends_on = [google_project_iam_member.composer-worker] +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +resource "google_service_account" "test" { + account_id = "%s" + display_name = "Test Service Account for Composer Environment" +} + +resource "google_project_iam_member" "composer-worker" { + project = data.google_project.project.project_id + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" +} +`, environment, network, subnetwork, serviceAccount) +} + +func testAccComposer2Environment_nodeCfg(environment, network, subnetwork, serviceAccount string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + + service_account = google_service_account.test.name + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + tags = toset(["t1", "t2"]) + } + software_config { + image_version = "composer-2-airflow-2" + } + } + depends_on = [google_project_iam_member.composer-worker] +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +resource "google_service_account" "test" { + account_id = "%s" + display_name = "Test Service Account for Composer Environment" +} + +resource "google_project_iam_member" "composer-worker" { + project = data.google_project.project.project_id + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" +} +`, environment, network, subnetwork, serviceAccount) +} + +func testAccComposerEnvironment_airflow2RecoveryCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" + } + + recovery_config { + scheduled_snapshots_config { + enabled = true + snapshot_location = "gs://example-bucket/environment_snapshots" + snapshot_creation_schedule = "0 4 * * *" + time_zone = "UTC+01" + } + } + } + +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentUpdate_airflow2RecoveryCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + } + } + + software_config { + image_version = "composer-2-airflow-2" + } + + recovery_config { + scheduled_snapshots_config { + enabled = true + snapshot_location = "gs://example-bucket/environment_snapshots2" + snapshot_creation_schedule = "1 2 * * *" + time_zone = "UTC+02" + } + } + } + +} + +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link + private_ip_google_access = true +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_softwareCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-1" + python_version = "3" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_updateOnlyFields(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + pypi_packages = { + numpy = "" + } + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_airflow2SoftwareCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + scheduler_count = 2 + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentUpdate_airflow2SoftwareCfg(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.self_link + subnetwork = google_compute_subnetwork.test.self_link + zone = "us-central1-a" + } + software_config { + image_version = "composer-1-airflow-2" + scheduler_count = 3 + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironment_fixPyPiPackages(environment, network, subnetwork, serviceAccount string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + + software_config { + image_version = "composer-2-airflow-2" + + pypi_packages = { + "google-cloud-bigquery" = "==1" + } + } + + private_environment_config { + enable_private_endpoint = true + master_ipv4_cidr_block = "10.10.0.0/28" + } + + workloads_config { + scheduler { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + count = 1 + } + web_server { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + } + worker { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + min_count = 1 + max_count = 3 + } + } + + environment_size = "ENVIRONMENT_SIZE_SMALL" + + node_config { + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + service_account = google_service_account.test.name + } + } + depends_on = [google_project_iam_member.composer-worker] +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +resource "google_service_account" "test" { + account_id = "%s" + display_name = "Test Service Account for Composer Environment" +} + +data "google_project" "project" {} + +resource "google_project_iam_member" "composer-worker" { + project = data.google_project.project.project_id + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" +}`, environment, network, subnetwork, serviceAccount) +} + +func testAccComposerEnvironment_fixPyPiPackagesUpdate(environment, network, subnetwork, serviceAccount string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + + software_config { + image_version = "composer-2-airflow-2" + } + + private_environment_config { + enable_private_endpoint = true + master_ipv4_cidr_block = "10.10.0.0/28" + } + + workloads_config { + scheduler { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + count = 1 + } + web_server { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + } + worker { + cpu = 0.5 + memory_gb = 1.875 + storage_gb = 1 + min_count = 1 + max_count = 3 + } + } + + environment_size = "ENVIRONMENT_SIZE_SMALL" + + node_config { + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + service_account = google_service_account.test.name + } + } + depends_on = [google_project_iam_member.composer-worker] +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +resource "google_service_account" "test" { + account_id = "%s" + display_name = "Test Service Account for Composer Environment" +} + +data "google_project" "project" {} + +resource "google_project_iam_member" "composer-worker" { + project = data.google_project.project.project_id + role = "roles/composer.worker" + member = "serviceAccount:${google_service_account.test.email}" +} +`, environment, network, subnetwork, serviceAccount) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComposerEnvironmentComposer2_empty(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + software_config { + image_version = "composer-2-airflow-2" + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentComposer3_empty(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + node_config { + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + } + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentComposer2_usesUnsupportedField(name string) string { +return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + software_config { + image_version = "composer-2-airflow-2" + web_server_plugins_mode = "ENABLED" + } + } +} +`, name) +} + +func testAccComposerEnvironmentComposer3_usesUnsupportedField(name string) string { +return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + enable_ip_masq_agent = true + } + software_config { + image_version = "composer-3-airflow-2" + } + } +} +`, name) +} + +func testAccComposerEnvironmentComposer3_basic(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + composer_internal_ipv4_cidr_block = "100.64.128.0/20" + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + } + software_config { + image_version = "composer-3-airflow-2" + } + workloads_config { + dag_processor { + cpu = 1 + memory_gb = 2.5 + storage_gb = 2 + count = 1 + } + } + enable_private_environment = true + enable_private_builds_only = true + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, network, subnetwork) +} + +func testAccComposerEnvironmentComposer3_update(name, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test_1.id + subnetwork = google_compute_subnetwork.test_1.id + composer_internal_ipv4_cidr_block = "100.64.128.0/20" + } + software_config { + web_server_plugins_mode = "DISABLED" + image_version = "composer-3-airflow-2" + } + workloads_config { + dag_processor { + cpu = 2 + memory_gb = 2 + storage_gb = 1 + count = 2 + } + } + enable_private_environment = false + enable_private_builds_only = false + } +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} + +resource "google_compute_network" "test_1" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test_1" { + name = "%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-central1" + network = google_compute_network.test_1.self_link +} +`, name, network, subnetwork, network + "-update", subnetwork + "update") +} + +func testAccComposerEnvironmentComposer3_withNetworkAttachment(name, networkAttachment, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + composer_network_attachment = google_compute_network_attachment.test.id + } + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_compute_network_attachment" "test" { + name = "%s" + region = "us-central1" + subnetworks = [ google_compute_subnetwork.test-att.id ] + connection_preference = "ACCEPT_MANUAL" + // Composer 3 is modifying producer_accept_lists outside terraform, ignoring this change for now + lifecycle { + ignore_changes = [producer_accept_lists] + } +} + +resource "google_compute_network" "test-att" { + name = "%s-att" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test-att" { + name = "%s-att" + ip_cidr_range = "10.3.0.0/16" + region = "us-central1" + network = google_compute_network.test-att.self_link +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, networkAttachment, network, subnetwork, network, subnetwork) +} + +func testAccComposerEnvironmentComposer3_withNetworkAndSubnetwork(name, networkAttachment, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + } + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_compute_network_attachment" "test" { + name = "%s" + region = "us-central1" + subnetworks = [ google_compute_subnetwork.test-att.id ] + connection_preference = "ACCEPT_MANUAL" + // Composer 3 is modifying producer_accept_lists outside terraform, ignoring this change for now + lifecycle { + ignore_changes = [producer_accept_lists] + } +} + +resource "google_compute_network" "test-att" { + name = "%s-att" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test-att" { + name = "%s-att" + ip_cidr_range = "10.3.0.0/16" + region = "us-central1" + network = google_compute_network.test-att.self_link +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, networkAttachment, network, subnetwork, network, subnetwork) +} + +func testAccComposerEnvironmentComposer3_withNetworkSubnetworkAndAttachment_expectError(name, networkAttachment, network, subnetwork string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + region = "us-central1" + config { + node_config { + network = google_compute_network.test.id + subnetwork = google_compute_subnetwork.test.id + composer_network_attachment = google_compute_network_attachment.test.id + } + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_compute_network_attachment" "test" { + name = "%s" + region = "us-central1" + subnetworks = [ google_compute_subnetwork.test.id ] + connection_preference = "ACCEPT_MANUAL" +} + +// use a separate network to avoid conflicts with other tests running in parallel +// that use the default network/subnet +resource "google_compute_network" "test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.test.self_link +} +`, name, networkAttachment, network, subnetwork) +} +{{- end }} + +// WARNING: This is not actually a check and is a terrible clean-up step because Composer Environments +// have a bug that hasn't been fixed. Composer will add firewalls to non-default networks for environments +// but will not remove them when the Environment is deleted. +// +// Destroy test step for config with a network will fail unless we clean up the firewalls before. +func testAccCheckClearComposerEnvironmentFirewalls(t *testing.T, networkName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + config.Project = envvar.GetTestProjectFromEnv() + network, err := config.NewComputeClient(config.UserAgent).Networks.Get(envvar.GetTestProjectFromEnv(), networkName).Do() + if err != nil { + return err + } + + foundFirewalls, err := config.NewComputeClient(config.UserAgent).Firewalls.List(config.Project).Do() + if err != nil { + return fmt.Errorf("Unable to list firewalls for network %q: %s", network.Name, err) + } + + var allErrors error + for _, firewall := range foundFirewalls.Items { + if !strings.HasPrefix(firewall.Name, testComposerNetworkPrefix) { + continue + } + log.Printf("[DEBUG] Deleting firewall %q for test-resource network %q", firewall.Name, network.Name) + op, err := config.NewComputeClient(config.UserAgent).Firewalls.Delete(config.Project, firewall.Name).Do() + if err != nil { + allErrors = multierror.Append(allErrors, + fmt.Errorf("Unable to delete firewalls for network %q: %s", network.Name, err)) + continue + } + + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, + "Sweeping test composer environment firewalls", config.UserAgent, 10) + if waitErr != nil { + allErrors = multierror.Append(allErrors, + fmt.Errorf("Error while waiting to delete firewall %q: %s", firewall.Name, waitErr)) + } + } + return allErrors + } +} diff --git a/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_config_map_test.go.tmpl b/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_config_map_test.go.tmpl new file mode 100644 index 000000000000..15261c54b26b --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_config_map_test.go.tmpl @@ -0,0 +1,170 @@ +package composer_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComposerUserWorkloadsConfigMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_basic(context), + }, + { + ResourceName: "google_composer_user_workloads_config_map.config_map", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_update(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_composer_user_workloads_config_map.config_map", "data.db_host", "dbhost:5432"), + resource.TestCheckNoResourceAttr("google_composer_user_workloads_config_map.config_map", "data.api_host"), + ), + }, + { + ResourceName: "google_composer_user_workloads_config_map.config_map", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_delete(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComposerUserWorkloadsConfigMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_basic(context), + }, + { + ResourceName: "google_composer_user_workloads_config_map.config_map", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_delete(context), + Check: resource.ComposeTestCheckFunc( + testAccComposerUserWorkloadsConfigMapDestroyed(t), + ), + }, + }, + }) +} + +func testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "environment" { + provider = google-beta + name = "tf-test-test-environment%{random_suffix}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_config_map" "config_map" { + provider = google-beta + name = "tf-test-test-config-map%{random_suffix}" + region = "us-central1" + environment = google_composer_environment.environment.name + data = { + api_host: "apihost:443", + } +} +`, context) +} + +func testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "environment" { + provider = google-beta + name = "tf-test-test-environment%{random_suffix}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} + +resource "google_composer_user_workloads_config_map" "config_map" { + provider = google-beta + name = "tf-test-test-config-map%{random_suffix}" + region = "us-central1" + environment = google_composer_environment.environment.name + data = { + db_host: "dbhost:5432", + } +} +`, context) +} + +func testAccComposerUserWorkloadsConfigMap_composerUserWorkloadsConfigMapBasicExample_delete(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_composer_environment" "environment" { + provider = google-beta + name = "tf-test-test-environment%{random_suffix}" + region = "us-central1" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +`, context) +} + +func testAccComposerUserWorkloadsConfigMapDestroyed(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_composer_user_workloads_config_map" { + continue + } + + idTokens := strings.Split(rs.Primary.ID, "/") + if len(idTokens) != 8 { + return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}/userWorkloadsConfigMaps/{name}", rs.Primary.ID) + } + _, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.UserWorkloadsConfigMaps.Get(rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("config map %s still exists", rs.Primary.ID) + } + } + + return nil + } +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret.go.tmpl b/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret.go.tmpl new file mode 100644 index 000000000000..94ebd8182ebc --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret.go.tmpl @@ -0,0 +1,271 @@ +package composer + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "log" + "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/composer/v1" +{{- else }} + composer "google.golang.org/api/composer/v1beta1" +{{- end }} +) + +func ResourceComposerUserWorkloadsSecret() *schema.Resource { + return &schema.Resource{ + Create: resourceComposerUserWorkloadsSecretCreate, + Read: resourceComposerUserWorkloadsSecretRead, + Update: resourceComposerUserWorkloadsSecretUpdate, + Delete: resourceComposerUserWorkloadsSecretDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComposerUserWorkloadsSecretImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(time.Minute), + Update: schema.DefaultTimeout(time.Minute), + Delete: schema.DefaultTimeout(time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderRegion, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the secret.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The location or Compute Engine region for the environment.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + "environment": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the environment.`, + }, + "data": { + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + Sensitive: true, + Description: `A map of the secret data.`, + }, + }, + } +} + +func resourceComposerUserWorkloadsSecretCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + secretName, err := resourceComposerUserWorkloadsSecretName(d, config) + if err != nil { + return err + } + + secret := &composer.UserWorkloadsSecret{ + Name: secretName.ResourceName(), + Data: tpgresource.ConvertStringMap(d.Get("data").(map[string]interface{})), + } + + log.Printf("[DEBUG] Creating new UserWorkloadsSecret %q", secretName.ParentName()) + resp, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Create(secretName.ParentName(), secret).Do() + if err != nil { + return fmt.Errorf("Error creating UserWorkloadsSecret: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/environments/{{"{{"}}environment{{"}}"}}/userWorkloadsSecrets/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + respJson, _ := resp.MarshalJSON() + log.Printf("[DEBUG] Finished creating UserWorkloadsSecret %q: %#v", d.Id(), string(respJson)) + + return resourceComposerUserWorkloadsSecretRead(d, meta) +} + +func resourceComposerUserWorkloadsSecretRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + secretName, err := resourceComposerUserWorkloadsSecretName(d, config) + if err != nil { + return err + } + + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Get(secretName.ResourceName()).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("UserWorkloadsSecret %q", d.Id())) + } + + if err := d.Set("project", secretName.Project); err != nil { + return fmt.Errorf("Error setting UserWorkloadsSecret Project: %s", err) + } + if err := d.Set("region", secretName.Region); err != nil { + return fmt.Errorf("Error setting UserWorkloadsSecret Region: %s", err) + } + if err := d.Set("environment", secretName.Environment); err != nil { + return fmt.Errorf("Error setting UserWorkloadsSecret Environment: %s", err) + } + if err := d.Set("name", tpgresource.GetResourceNameFromSelfLink(res.Name)); err != nil { + return fmt.Errorf("Error setting UserWorkloadsSecret Name: %s", err) + } + return nil +} + +func resourceComposerUserWorkloadsSecretUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + secretName, err := resourceComposerUserWorkloadsSecretName(d, config) + if err != nil { + return err + } + + if d.HasChange("data") { + secret := &composer.UserWorkloadsSecret{ + Name: secretName.ResourceName(), + Data: tpgresource.ConvertStringMap(d.Get("data").(map[string]interface{})), + } + + secretJson, _ := secret.MarshalJSON() + log.Printf("[DEBUG] Updating UserWorkloadsSecret %q: %s", d.Id(), string(secretJson)) + + resp, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Update(secretName.ResourceName(), secret).Do() + if err != nil { + return err + } + + respJson, _ := resp.MarshalJSON() + log.Printf("[DEBUG] Finished updating UserWorkloadsSecret %q: %s", d.Id(), string(respJson)) + } + + return nil +} + +func resourceComposerUserWorkloadsSecretDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + secretName, err := resourceComposerUserWorkloadsSecretName(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Deleting UserWorkloadsSecret %q", d.Id()) + _, err = config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Delete(secretName.ResourceName()).Do() + if err != nil { + return err + } + log.Printf("[DEBUG] Finished deleting UserWorkloadsSecret %q", d.Id()) + + return nil +} + +func resourceComposerUserWorkloadsSecretImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/environments/(?P[^/]+)/userWorkloadsSecrets/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/environments/{{"{{"}}environment{{"}}"}}/userWorkloadsSecrets/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // retrieve "data" in advance, because Read function won't do it. + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + res, err := config.NewComposerClient(userAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Get(id).Do() + if err != nil { + return nil, err + } + + if err := d.Set("data", res.Data); err != nil { + return nil, fmt.Errorf("Error setting UserWorkloadsSecret Data: %s", err) + } + + return []*schema.ResourceData{d}, nil +} + +func resourceComposerUserWorkloadsSecretName(d *schema.ResourceData, config *transport_tpg.Config) (*UserWorkloadsSecretName, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + return &UserWorkloadsSecretName{ + Project: project, + Region: region, + Environment: d.Get("environment").(string), + Secret: d.Get("name").(string), + }, nil +} + +type UserWorkloadsSecretName struct { + Project string + Region string + Environment string + Secret string +} + +func (n *UserWorkloadsSecretName) ResourceName() string { + return fmt.Sprintf("projects/%s/locations/%s/environments/%s/userWorkloadsSecrets/%s", n.Project, n.Region, n.Environment, n.Secret) +} + +func (n *UserWorkloadsSecretName) ParentName() string { + return fmt.Sprintf("projects/%s/locations/%s/environments/%s", n.Project, n.Region, n.Environment) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret_test.go.tmpl b/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret_test.go.tmpl new file mode 100644 index 000000000000..d148981f8462 --- /dev/null +++ b/mmv1/third_party/terraform/services/composer/go/resource_composer_user_workloads_secret_test.go.tmpl @@ -0,0 +1,183 @@ +package composer_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "testing" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/composer" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +const testComposerUserWorkloadsSecretPrefix = "tf-test-composer-secret" + +func TestAccComposerUserWorkloadsSecret_basic(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + secretName := fmt.Sprintf("%s-%d", testComposerUserWorkloadsSecretPrefix, acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsSecret_basic(envName, secretName, envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv()), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_composer_user_workloads_secret.test", "data.username"), + resource.TestCheckResourceAttrSet("google_composer_user_workloads_secret.test", "data.password"), + ), + }, + { + ResourceName: "google_composer_user_workloads_secret.test", + ImportState: true, + }, + }, + }) +} + +func TestAccComposerUserWorkloadsSecret_update(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + secretName := fmt.Sprintf("%s-%d", testComposerUserWorkloadsSecretPrefix, acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsSecret_basic(envName, secretName, envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv()), + }, + { + Config: testAccComposerUserWorkloadsSecret_update(envName, secretName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_composer_user_workloads_secret.test", "data.email"), + resource.TestCheckResourceAttrSet("google_composer_user_workloads_secret.test", "data.password"), + resource.TestCheckNoResourceAttr("google_composer_user_workloads_secret.test", "data.username"), + ), + }, + }, + }) +} + +func TestAccComposerUserWorkloadsSecret_delete(t *testing.T) { + t.Parallel() + + envName := fmt.Sprintf("%s-%d", testComposerEnvironmentPrefix, acctest.RandInt(t)) + secretName := fmt.Sprintf("%s-%d", testComposerUserWorkloadsSecretPrefix, acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComposerEnvironmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComposerUserWorkloadsSecret_basic(envName, secretName, envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv()), + }, + { + Config: testAccComposerUserWorkloadsSecret_delete(envName), + Check: resource.ComposeTestCheckFunc( + testAccComposerUserWorkloadsSecretDestroyed(t), + ), + }, + }, + }) +} + +func testAccComposerUserWorkloadsSecret_basic(envName, secretName, project, region string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_secret" "test" { + environment = google_composer_environment.test.name + name = "%s" + project = "%s" + region = "%s" + data = { + username: base64encode("username"), + password: base64encode("password"), + } +} +`, envName, secretName, project, region) +} + +func testAccComposerUserWorkloadsSecret_update(envName, secretName string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +resource "google_composer_user_workloads_secret" "test" { + environment = google_composer_environment.test.name + name = "%s" + data = { + email: base64encode("email"), + password: base64encode("password"), + } +} +`, envName, secretName) +} + +func testAccComposerUserWorkloadsSecret_delete(envName string) string { + return fmt.Sprintf(` +resource "google_composer_environment" "test" { + name = "%s" + config { + software_config { + image_version = "composer-3-airflow-2" + } + } +} +`, envName) +} + +func testAccComposerUserWorkloadsSecretDestroyed(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_composer_user_workloads_secret" { + continue + } + + idTokens := strings.Split(rs.Primary.ID, "/") + if len(idTokens) != 8 { + return fmt.Errorf("Invalid ID %q, expected format projects/{project}/regions/{region}/environments/{environment}/userWorkloadsSecrets/{name}", rs.Primary.ID) + } + secretName := &composer.UserWorkloadsSecretName{ + Project: idTokens[1], + Region: idTokens[3], + Environment: idTokens[5], + Secret: idTokens[7], + } + + _, err := config.NewComposerClient(config.UserAgent).Projects.Locations.Environments.UserWorkloadsSecrets.Get(secretName.ResourceName()).Do() + if err == nil { + return fmt.Errorf("secret %s still exists", secretName.ResourceName()) + } + } + + return nil + } +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/compute/go/compute_backend_service_helpers.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_backend_service_helpers.go.tmpl new file mode 100644 index 000000000000..49e49eac8a8a --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/compute_backend_service_helpers.go.tmpl @@ -0,0 +1,18 @@ +package compute + +import ( +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +// Incredibly hacky way of getting a reference to an SPR of the right type into +// the generated BackendService code. goimports will always import `compute`, so +// we need to provide the import manually to be able to switch libraries. Since +// this is a problem exactly once, just provide a function in a file where we +// *can* easily pick the imported copy, and return the correct struct. +func emptySecurityPolicyReference() *compute.SecurityPolicyReference { + return &compute.SecurityPolicyReference{} +} diff --git a/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl new file mode 100644 index 000000000000..1b6d42dafc33 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl @@ -0,0 +1,871 @@ +package compute + +import ( + "fmt" + "reflect" + "strconv" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "google.golang.org/api/googleapi" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func instanceSchedulingNodeAffinitiesElemSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"IN", "NOT_IN"}, false), + }, + "values": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + }, + } +} + +func expandAliasIpRanges(ranges []interface{}) []*compute.AliasIpRange { + ipRanges := make([]*compute.AliasIpRange, 0, len(ranges)) + for _, raw := range ranges { + data := raw.(map[string]interface{}) + ipRanges = append(ipRanges, &compute.AliasIpRange{ + IpCidrRange: data["ip_cidr_range"].(string), + SubnetworkRangeName: data["subnetwork_range_name"].(string), + }) + } + return ipRanges +} + +func flattenAliasIpRange(ranges []*compute.AliasIpRange) []map[string]interface{} { + rangesSchema := make([]map[string]interface{}, 0, len(ranges)) + for _, ipRange := range ranges { + rangesSchema = append(rangesSchema, map[string]interface{}{ + "ip_cidr_range": ipRange.IpCidrRange, + "subnetwork_range_name": ipRange.SubnetworkRangeName, + }) + } + return rangesSchema +} + +func expandScheduling(v interface{}) (*compute.Scheduling, error) { + if v == nil { + // We can't set default values for lists. + return &compute.Scheduling{ + AutomaticRestart: googleapi.Bool(true), + }, nil + } + + ls := v.([]interface{}) + if len(ls) == 0 { + // We can't set default values for lists + return &compute.Scheduling{ + AutomaticRestart: googleapi.Bool(true), + }, nil + } + + if len(ls) > 1 || ls[0] == nil { + return nil, fmt.Errorf("expected exactly one scheduling block") + } + + original := ls[0].(map[string]interface{}) + scheduling := &compute.Scheduling{ + ForceSendFields: make([]string, 0, 4), + } + + if v, ok := original["automatic_restart"]; ok { + scheduling.AutomaticRestart = googleapi.Bool(v.(bool)) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "AutomaticRestart") + } + + if v, ok := original["preemptible"]; ok { + scheduling.Preemptible = v.(bool) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "Preemptible") + } + + if v, ok := original["on_host_maintenance"]; ok { + scheduling.OnHostMaintenance = v.(string) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnHostMaintenance") + } + + if v, ok := original["node_affinities"]; ok && v != nil { + naSet := v.(*schema.Set).List() + scheduling.NodeAffinities = make([]*compute.SchedulingNodeAffinity, len(ls)) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "NodeAffinities") + for _, nodeAffRaw := range naSet { + if nodeAffRaw == nil { + continue + } + nodeAff := nodeAffRaw.(map[string]interface{}) + transformed := &compute.SchedulingNodeAffinity{ + Key: nodeAff["key"].(string), + Operator: nodeAff["operator"].(string), + Values: tpgresource.ConvertStringArr(nodeAff["values"].(*schema.Set).List()), + } + scheduling.NodeAffinities = append(scheduling.NodeAffinities, transformed) + } + } + + if v, ok := original["min_node_cpus"]; ok { + scheduling.MinNodeCpus = int64(v.(int)) + } + if v, ok := original["provisioning_model"]; ok { + scheduling.ProvisioningModel = v.(string) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "ProvisioningModel") + } + if v, ok := original["instance_termination_action"]; ok { + scheduling.InstanceTerminationAction = v.(string) + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "InstanceTerminationAction") + } +{{- if ne $.TargetVersionName "ga" }} + if v, ok := original["max_run_duration"]; ok { + transformedMaxRunDuration, err := expandComputeMaxRunDuration(v) + if err != nil { + return nil, err + } + scheduling.MaxRunDuration = transformedMaxRunDuration + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "MaxRunDuration") + } + if v, ok := original["maintenance_interval"]; ok { + scheduling.MaintenanceInterval = v.(string) + } + + if v, ok := original["on_instance_stop_action"]; ok { + transformedOnInstanceStopAction, err := expandComputeOnInstanceStopAction(v) + if err != nil { + return nil, err + } + scheduling.OnInstanceStopAction = transformedOnInstanceStopAction + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnInstanceStopAction") + } +{{- end }} + if v, ok := original["local_ssd_recovery_timeout"]; ok { + transformedLocalSsdRecoveryTimeout, err := expandComputeLocalSsdRecoveryTimeout(v) + if err != nil { + return nil, err + } + scheduling.LocalSsdRecoveryTimeout = transformedLocalSsdRecoveryTimeout + scheduling.ForceSendFields = append(scheduling.ForceSendFields, "LocalSsdRecoveryTimeout") + } + return scheduling, nil +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandComputeMaxRunDuration(v interface{}) (*compute.Duration, error) { + l := v.([]interface{}) + duration := compute.Duration{} + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformedNanos, err := expandComputeMaxRunDurationNanos(original["nanos"]) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + duration.Nanos = int64(transformedNanos.(int)) + } + + transformedSeconds, err := expandComputeMaxRunDurationSeconds(original["seconds"]) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + duration.Seconds = int64(transformedSeconds.(int)) + } + + return &duration, nil +} + +func expandComputeMaxRunDurationNanos(v interface{}) (interface{}, error) { + return v, nil +} + +func expandComputeMaxRunDurationSeconds(v interface{}) (interface{}, error) { + return v, nil +} + +func expandComputeOnInstanceStopAction(v interface{}) (*compute.SchedulingOnInstanceStopAction, error){ + l := v.([]interface{}) + onInstanceStopAction := compute.SchedulingOnInstanceStopAction{} + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + if d, ok := original["discard_local_ssd"]; ok { + onInstanceStopAction.DiscardLocalSsd = d.(bool) + } else { + return nil, nil + } + + return &onInstanceStopAction, nil +} +{{- end }} + +func expandComputeLocalSsdRecoveryTimeout(v interface{}) (*compute.Duration, error) { + l := v.([]interface{}) + duration := compute.Duration{} + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + + transformedNanos, err := expandComputeLocalSsdRecoveryTimeoutNanos(original["nanos"]) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNanos); val.IsValid() && !tpgresource.IsEmptyValue(val) { + duration.Nanos = int64(transformedNanos.(int)) + } + + transformedSeconds, err := expandComputeLocalSsdRecoveryTimeoutSeconds(original["seconds"]) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSeconds); val.IsValid() && !tpgresource.IsEmptyValue(val) { + duration.Seconds = int64(transformedSeconds.(int)) + } + return &duration, nil +} + +func expandComputeLocalSsdRecoveryTimeoutNanos(v interface{}) (interface{}, error) { + return v, nil +} + +func expandComputeLocalSsdRecoveryTimeoutSeconds(v interface{}) (interface{}, error) { + return v, nil +} + +func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { + schedulingMap := map[string]interface{}{ + "on_host_maintenance": resp.OnHostMaintenance, + "preemptible": resp.Preemptible, + "min_node_cpus": resp.MinNodeCpus, + "provisioning_model": resp.ProvisioningModel, + "instance_termination_action": resp.InstanceTerminationAction, + } + + if resp.AutomaticRestart != nil { + schedulingMap["automatic_restart"] = *resp.AutomaticRestart + } + +{{ if ne $.TargetVersionName `ga` -}} + if resp.MaxRunDuration != nil { + schedulingMap["max_run_duration"] = flattenComputeMaxRunDuration(resp.MaxRunDuration) + } + if resp.MaintenanceInterval != "" { + schedulingMap["maintenance_interval"] = resp.MaintenanceInterval + } + if resp.OnInstanceStopAction != nil { + schedulingMap["on_instance_stop_action"] = flattenOnInstanceStopAction(resp.OnInstanceStopAction) + } +{{- end }} + + if resp.LocalSsdRecoveryTimeout != nil { + schedulingMap["local_ssd_recovery_timeout"] = flattenComputeLocalSsdRecoveryTimeout(resp.LocalSsdRecoveryTimeout) + } + + nodeAffinities := schema.NewSet(schema.HashResource(instanceSchedulingNodeAffinitiesElemSchema()), nil) + for _, na := range resp.NodeAffinities { + nodeAffinities.Add(map[string]interface{}{ + "key": na.Key, + "operator": na.Operator, + "values": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(na.Values)), + }) + } + schedulingMap["node_affinities"] = nodeAffinities + + return []map[string]interface{}{schedulingMap} +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenComputeMaxRunDuration(v *compute.Duration) []interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["nanos"] = v.Nanos + transformed["seconds"] = v.Seconds + return []interface{}{transformed} +} + +func flattenOnInstanceStopAction(v *compute.SchedulingOnInstanceStopAction) []interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["discard_local_ssd"] = v.DiscardLocalSsd + return []interface{}{transformed} +} +{{- end }} + +func flattenComputeLocalSsdRecoveryTimeout(v *compute.Duration) []interface{} { + if v == nil { + return nil + } + transformed := make(map[string]interface{}) + transformed["nanos"] = v.Nanos + transformed["seconds"] = v.Seconds + return []interface{}{transformed} +} + +func flattenAccessConfigs(accessConfigs []*compute.AccessConfig) ([]map[string]interface{}, string) { + flattened := make([]map[string]interface{}, len(accessConfigs)) + natIP := "" + for i, ac := range accessConfigs { + flattened[i] = map[string]interface{}{ + "nat_ip": ac.NatIP, + "network_tier": ac.NetworkTier, + } + if ac.SetPublicPtr { + flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName + } + if natIP == "" { + natIP = ac.NatIP + } + {{- if ne $.TargetVersionName "ga" }} + if ac.SecurityPolicy != "" { + flattened[i]["security_policy"] = ac.SecurityPolicy + } + {{- end }} + } + return flattened, natIP +} + +func flattenIpv6AccessConfigs(ipv6AccessConfigs []*compute.AccessConfig) []map[string]interface{} { + flattened := make([]map[string]interface{}, len(ipv6AccessConfigs)) + for i, ac := range ipv6AccessConfigs { + flattened[i] = map[string]interface{}{ + "network_tier": ac.NetworkTier, + } + flattened[i]["public_ptr_domain_name"] = ac.PublicPtrDomainName + flattened[i]["external_ipv6"] = ac.ExternalIpv6 + flattened[i]["external_ipv6_prefix_length"] = strconv.FormatInt(ac.ExternalIpv6PrefixLength, 10) + flattened[i]["name"] = ac.Name + {{- if ne $.TargetVersionName "ga" }} + if ac.SecurityPolicy != "" { + flattened[i]["security_policy"] = ac.SecurityPolicy + } + {{- end }} + } + return flattened +} + +func flattenNetworkInterfaces(d *schema.ResourceData, config *transport_tpg.Config, networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string, string, string, error) { + flattened := make([]map[string]interface{}, len(networkInterfaces)) + var region, internalIP, externalIP string + + for i, iface := range networkInterfaces { + var ac []map[string]interface{} + ac, externalIP = flattenAccessConfigs(iface.AccessConfigs) + + subnet, err := tpgresource.ParseSubnetworkFieldValue(iface.Subnetwork, d, config) + if err != nil { + return nil, "", "", "", err + } + region = subnet.Region + + flattened[i] = map[string]interface{}{ + "network_ip": iface.NetworkIP, + "network": tpgresource.ConvertSelfLinkToV1(iface.Network), + "subnetwork": tpgresource.ConvertSelfLinkToV1(iface.Subnetwork), + "subnetwork_project": subnet.Project, + "access_config": ac, + "alias_ip_range": flattenAliasIpRange(iface.AliasIpRanges), + "nic_type": iface.NicType, + "stack_type": iface.StackType, + "ipv6_access_config": flattenIpv6AccessConfigs(iface.Ipv6AccessConfigs), + "ipv6_address": iface.Ipv6Address, + "queue_count": iface.QueueCount, + } + // Instance template interfaces never have names, so they're absent + // in the instance template network_interface schema. We want to use the + // same flattening code for both resource types, so we avoid trying to + // set the name field when it's not set at the GCE end. + if iface.Name != "" { + flattened[i]["name"] = iface.Name + } + if internalIP == "" { + internalIP = iface.NetworkIP + } + + {{ if ne $.TargetVersionName `ga` -}} + if iface.NetworkAttachment != "" { + networkAttachment, err := tpgresource.GetRelativePath(iface.NetworkAttachment) + if err != nil { + return nil, "", "", "", err + } + flattened[i]["network_attachment"] = networkAttachment + } + {{- end }} + + {{ if ne $.TargetVersionName `ga` -}} + // the security_policy for a network_interface is found in one of its accessConfigs. + if len(iface.AccessConfigs) > 0 && iface.AccessConfigs[0].SecurityPolicy != "" { + flattened[i]["security_policy"] = iface.AccessConfigs[0].SecurityPolicy + } else if len(iface.Ipv6AccessConfigs) > 0 && iface.Ipv6AccessConfigs[0].SecurityPolicy != "" { + flattened[i]["security_policy"] = iface.Ipv6AccessConfigs[0].SecurityPolicy + } + {{- end }} + } + return flattened, region, internalIP, externalIP, nil +} + +func expandAccessConfigs(configs []interface{}) []*compute.AccessConfig { + acs := make([]*compute.AccessConfig, len(configs)) + for i, raw := range configs { + acs[i] = &compute.AccessConfig{} + acs[i].Type = "ONE_TO_ONE_NAT" + if raw != nil { + data := raw.(map[string]interface{}) + acs[i].NatIP = data["nat_ip"].(string) + acs[i].NetworkTier = data["network_tier"].(string) + if ptr, ok := data["public_ptr_domain_name"]; ok && ptr != "" { + acs[i].SetPublicPtr = true + acs[i].PublicPtrDomainName = ptr.(string) + } + } + } + return acs +} + +func expandIpv6AccessConfigs(configs []interface{}) []*compute.AccessConfig { + iacs := make([]*compute.AccessConfig, len(configs)) + for i, raw := range configs { + iacs[i] = &compute.AccessConfig{} + if raw != nil { + data := raw.(map[string]interface{}) + iacs[i].NetworkTier = data["network_tier"].(string) + if ptr, ok := data["public_ptr_domain_name"]; ok && ptr != "" { + iacs[i].PublicPtrDomainName = ptr.(string) + } + if eip, ok := data["external_ipv6"]; ok && eip != "" { + iacs[i].ExternalIpv6 = eip.(string) + } + if eipl, ok := data["external_ipv6_prefix_length"]; ok && eipl != "" { + if strVal, ok := eipl.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + iacs[i].ExternalIpv6PrefixLength = intVal + } + } + } + if name, ok := data["name"]; ok && name != "" { + iacs[i].Name = name.(string) + } + iacs[i].Type = "DIRECT_IPV6" // Currently only type supported + } + } + return iacs +} + +func expandNetworkInterfaces(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*compute.NetworkInterface, error) { + configs := d.Get("network_interface").([]interface{}) + ifaces := make([]*compute.NetworkInterface, len(configs)) + for i, raw := range configs { + data := raw.(map[string]interface{}) + + + {{ if ne $.TargetVersionName `ga` -}} + var networkAttachment = "" + network := data["network"].(string) + subnetwork := data["subnetwork"].(string) + if networkAttachmentObj, ok := data["network_attachment"]; ok { + networkAttachment = networkAttachmentObj.(string) + } + // Checks if networkAttachment is not specified in resource, network or subnetwork have to be specifed. + if networkAttachment == "" && network == "" && subnetwork == "" { + return nil, fmt.Errorf("exactly one of network, subnetwork, or network_attachment must be provided") + } + + {{- else }} + + network := data["network"].(string) + subnetwork := data["subnetwork"].(string) + if network == "" && subnetwork == "" { + return nil, fmt.Errorf("exactly one of network or subnetwork must be provided") + } + + {{ end }} + + nf, err := tpgresource.ParseNetworkFieldValue(network, d, config) + if err != nil { + return nil, fmt.Errorf("cannot determine self_link for network %q: %s", network, err) + } + + subnetProjectField := fmt.Sprintf("network_interface.%d.subnetwork_project", i) + sf, err := tpgresource.ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) + if err != nil { + return nil, fmt.Errorf("cannot determine self_link for subnetwork %q: %s", subnetwork, err) + } + + ifaces[i] = &compute.NetworkInterface{ + NetworkIP: data["network_ip"].(string), + Network: nf.RelativeLink(), + {{- if ne $.TargetVersionName "ga" }} + NetworkAttachment: networkAttachment, + {{- end }} + Subnetwork: sf.RelativeLink(), + AccessConfigs: expandAccessConfigs(data["access_config"].([]interface{})), + AliasIpRanges: expandAliasIpRanges(data["alias_ip_range"].([]interface{})), + NicType: data["nic_type"].(string), + StackType: data["stack_type"].(string), + QueueCount: int64(data["queue_count"].(int)), + Ipv6AccessConfigs: expandIpv6AccessConfigs(data["ipv6_access_config"].([]interface{})), + Ipv6Address: data["ipv6_address"].(string), + } + } + return ifaces, nil +} + +func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} { + result := make([]map[string]interface{}, len(serviceAccounts)) + for i, serviceAccount := range serviceAccounts { + result[i] = map[string]interface{}{ + "email": serviceAccount.Email, + "scopes": schema.NewSet(tpgresource.StringScopeHashcode, tpgresource.ConvertStringArrToInterface(serviceAccount.Scopes)), + } + } + return result +} + +func expandServiceAccounts(configs []interface{}) []*compute.ServiceAccount { + accounts := make([]*compute.ServiceAccount, len(configs)) + for i, raw := range configs { + data := raw.(map[string]interface{}) + + accounts[i] = &compute.ServiceAccount{ + Email: data["email"].(string), + Scopes: tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(data["scopes"].(*schema.Set))), + } + + if accounts[i].Email == "" { + accounts[i].Email = "default" + } + } + return accounts +} + +func flattenGuestAccelerators(accelerators []*compute.AcceleratorConfig) []map[string]interface{} { + acceleratorsSchema := make([]map[string]interface{}, len(accelerators)) + for i, accelerator := range accelerators { + acceleratorsSchema[i] = map[string]interface{}{ + "count": accelerator.AcceleratorCount, + "type": accelerator.AcceleratorType, + } + } + return acceleratorsSchema +} + +func resourceInstanceTags(d tpgresource.TerraformResourceData) *compute.Tags { + // Calculate the tags + var tags *compute.Tags + if v := d.Get("tags"); v != nil { + vs := v.(*schema.Set) + tags = new(compute.Tags) + tags.Items = make([]string, vs.Len()) + for i, v := range vs.List() { + tags.Items[i] = v.(string) + } + + tags.Fingerprint = d.Get("tags_fingerprint").(string) + } + + return tags +} + +func expandShieldedVmConfigs(d tpgresource.TerraformResourceData) *compute.ShieldedInstanceConfig { + if _, ok := d.GetOk("shielded_instance_config"); !ok { + return nil + } + + prefix := "shielded_instance_config.0" + return &compute.ShieldedInstanceConfig{ + EnableSecureBoot: d.Get(prefix + ".enable_secure_boot").(bool), + EnableVtpm: d.Get(prefix + ".enable_vtpm").(bool), + EnableIntegrityMonitoring: d.Get(prefix + ".enable_integrity_monitoring").(bool), + ForceSendFields: []string{"EnableSecureBoot", "EnableVtpm", "EnableIntegrityMonitoring"}, + } +} + +func expandConfidentialInstanceConfig(d tpgresource.TerraformResourceData) *compute.ConfidentialInstanceConfig { + if _, ok := d.GetOk("confidential_instance_config"); !ok { + return nil + } + + prefix := "confidential_instance_config.0" + return &compute.ConfidentialInstanceConfig{ + EnableConfidentialCompute: d.Get(prefix + ".enable_confidential_compute").(bool), + {{- if ne $.TargetVersionName "ga" }} + ConfidentialInstanceType: d.Get(prefix + ".confidential_instance_type").(string), + {{- end }} + } +} + +func flattenConfidentialInstanceConfig(ConfidentialInstanceConfig *compute.ConfidentialInstanceConfig) []map[string]interface{} { + if ConfidentialInstanceConfig == nil { + return nil + } + + return []map[string]interface{}{{"{{"}} + "enable_confidential_compute": ConfidentialInstanceConfig.EnableConfidentialCompute, + {{- if ne $.TargetVersionName "ga" }} + "confidential_instance_type": ConfidentialInstanceConfig.ConfidentialInstanceType, + {{- end }} + {{"}}"}} +} + +func expandAdvancedMachineFeatures(d tpgresource.TerraformResourceData) *compute.AdvancedMachineFeatures { + if _, ok := d.GetOk("advanced_machine_features"); !ok { + return nil + } + + prefix := "advanced_machine_features.0" + return &compute.AdvancedMachineFeatures{ + EnableNestedVirtualization: d.Get(prefix + ".enable_nested_virtualization").(bool), + ThreadsPerCore: int64(d.Get(prefix + ".threads_per_core").(int)), + VisibleCoreCount: int64(d.Get(prefix + ".visible_core_count").(int)), + } +} + +func flattenAdvancedMachineFeatures(AdvancedMachineFeatures *compute.AdvancedMachineFeatures) []map[string]interface{} { + if AdvancedMachineFeatures == nil { + return nil + } + return []map[string]interface{}{{"{{"}} + "enable_nested_virtualization": AdvancedMachineFeatures.EnableNestedVirtualization, + "threads_per_core": AdvancedMachineFeatures.ThreadsPerCore, + "visible_core_count": AdvancedMachineFeatures.VisibleCoreCount, + {{"}}"}} +} + +func flattenShieldedVmConfig(shieldedVmConfig *compute.ShieldedInstanceConfig) []map[string]bool { + if shieldedVmConfig == nil { + return nil + } + + return []map[string]bool{{"{{"}} + "enable_secure_boot": shieldedVmConfig.EnableSecureBoot, + "enable_vtpm": shieldedVmConfig.EnableVtpm, + "enable_integrity_monitoring": shieldedVmConfig.EnableIntegrityMonitoring, + {{"}}"}} +} + +func expandDisplayDevice(d tpgresource.TerraformResourceData) *compute.DisplayDevice { + if _, ok := d.GetOk("enable_display"); !ok { + return nil + } + return &compute.DisplayDevice{ + EnableDisplay: d.Get("enable_display").(bool), + ForceSendFields: []string{"EnableDisplay"}, + } +} + +func flattenEnableDisplay(displayDevice *compute.DisplayDevice) interface{} { + if displayDevice == nil { + return nil + } + + return displayDevice.EnableDisplay +} + +// Node affinity updates require a reboot +func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { + o, n := d.GetChange("scheduling") + oScheduling := o.([]interface{})[0].(map[string]interface{}) + newScheduling := n.([]interface{})[0].(map[string]interface{}) + +{{ if ne $.TargetVersionName `ga` -}} + return hasNodeAffinitiesChanged(oScheduling, newScheduling) || hasMaxRunDurationChanged(oScheduling, newScheduling) +{{- else }} + return hasNodeAffinitiesChanged(oScheduling, newScheduling) +{{- end }} +} + +// Terraform doesn't correctly calculate changes on schema.Set, so we do it manually +// https://github.com/hashicorp/terraform-plugin-sdk/issues/98 +func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { + if !d.HasChange("scheduling") { + // This doesn't work correctly, which is why this method exists + // But it is here for posterity + return false + } + o, n := d.GetChange("scheduling") + oScheduling := o.([]interface{})[0].(map[string]interface{}) + newScheduling := n.([]interface{})[0].(map[string]interface{}) + + if schedulingHasChangeRequiringReboot(d) { + return false + } + + if oScheduling["automatic_restart"] != newScheduling["automatic_restart"] { + return true + } + + if oScheduling["preemptible"] != newScheduling["preemptible"] { + return true + } + + if oScheduling["on_host_maintenance"] != newScheduling["on_host_maintenance"] { + return true + } + + if oScheduling["provisioning_model"] != newScheduling["provisioning_model"] { + return true + } + + if oScheduling["instance_termination_action"] != newScheduling["instance_termination_action"] { + return true + } + + return false +} + +{{ if ne $.TargetVersionName `ga` -}} +func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { + oMrd := oScheduling["max_run_duration"].([]interface{}) + nMrd := nScheduling["max_run_duration"].([]interface{}) + + if (len(oMrd) == 0 || oMrd[0] == nil) && (len(nMrd) == 0 || nMrd[0] == nil) { + return false + } + if (len(oMrd) == 0 || oMrd[0] == nil) || (len(nMrd) == 0 || nMrd[0] == nil) { + return true + } + + oldMrd := oMrd[0].(map[string]interface{}) + newMrd := nMrd[0].(map[string]interface{}) + + if oldMrd["seconds"] != newMrd["seconds"] { + return true + } + if oldMrd["nanos"] != newMrd["nanos"] { + return true + } + + return false +} +{{- end }} + +func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) bool { + oldNAs := oScheduling["node_affinities"].(*schema.Set).List() + newNAs := newScheduling["node_affinities"].(*schema.Set).List() + if len(oldNAs) != len(newNAs) { + return true + } + for i := range oldNAs { + oldNodeAffinity := oldNAs[i].(map[string]interface{}) + newNodeAffinity := newNAs[i].(map[string]interface{}) + if oldNodeAffinity["key"] != newNodeAffinity["key"] { + return true + } + if oldNodeAffinity["operator"] != newNodeAffinity["operator"] { + return true + } + + // ConvertStringSet will sort the set into a slice, allowing DeepEqual + if !reflect.DeepEqual(tpgresource.ConvertStringSet(oldNodeAffinity["values"].(*schema.Set)), tpgresource.ConvertStringSet(newNodeAffinity["values"].(*schema.Set))) { + return true + } + } + + return false +} + +func expandReservationAffinity(d *schema.ResourceData) (*compute.ReservationAffinity, error) { + _, ok := d.GetOk("reservation_affinity") + if !ok { + return nil, nil + } + + prefix := "reservation_affinity.0" + reservationAffinityType := d.Get(prefix + ".type").(string) + + affinity := compute.ReservationAffinity{ + ConsumeReservationType: reservationAffinityType, + ForceSendFields: []string{"ConsumeReservationType"}, + } + + _, hasSpecificReservation := d.GetOk(prefix + ".specific_reservation") + if (reservationAffinityType == "SPECIFIC_RESERVATION") != hasSpecificReservation { + return nil, fmt.Errorf("specific_reservation must be set when reservation_affinity is SPECIFIC_RESERVATION, and not set otherwise") + } + + prefix = prefix + ".specific_reservation.0" + if hasSpecificReservation { + affinity.Key = d.Get(prefix + ".key").(string) + affinity.ForceSendFields = append(affinity.ForceSendFields, "Key", "Values") + + for _, v := range d.Get(prefix + ".values").([]interface{}) { + affinity.Values = append(affinity.Values, v.(string)) + } + } + + return &affinity, nil +} + +func flattenReservationAffinity(affinity *compute.ReservationAffinity) []map[string]interface{} { + if affinity == nil { + return nil + } + + flattened := map[string]interface{}{ + "type": affinity.ConsumeReservationType, + } + + if affinity.ConsumeReservationType == "SPECIFIC_RESERVATION" { + flattened["specific_reservation"] = []map[string]interface{}{{"{{"}} + "key": affinity.Key, + "values": affinity.Values, + {{"}}"}} + } + + return []map[string]interface{}{flattened} +} + +func expandNetworkPerformanceConfig(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (*compute.NetworkPerformanceConfig, error) { + configs, ok := d.GetOk("network_performance_config") + if !ok { + return nil, nil + } + + npcSlice := configs.([]interface{}) + if len(npcSlice) > 1 { + return nil, fmt.Errorf("cannot specify multiple network_performance_configs") + } + + if len(npcSlice) == 0 || npcSlice[0] == nil { + return nil, nil + } + npc := npcSlice[0].(map[string]interface{}) + return &compute.NetworkPerformanceConfig{ + TotalEgressBandwidthTier: npc["total_egress_bandwidth_tier"].(string), + }, nil +} + +func flattenNetworkPerformanceConfig(c *compute.NetworkPerformanceConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "total_egress_bandwidth_tier": c.TotalEgressBandwidthTier, + }, + } +} diff --git a/mmv1/third_party/terraform/services/compute/go/compute_instance_network_interface_helpers.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_instance_network_interface_helpers.go.tmpl new file mode 100644 index 000000000000..1fa3d287ab1d --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/compute_instance_network_interface_helpers.go.tmpl @@ -0,0 +1,165 @@ +package compute + +import ( + "fmt" + + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +{{- end }} + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func computeInstanceDeleteAccessConfigs(d *schema.ResourceData, config *transport_tpg.Config, instNetworkInterface *compute.NetworkInterface, project, zone, userAgent, instanceName string) error { + // Delete any accessConfig that currently exists in instNetworkInterface + for _, ac := range instNetworkInterface.AccessConfigs { + op, err := config.NewComputeClient(userAgent).Instances.DeleteAccessConfig( + project, zone, instanceName, ac.Name, instNetworkInterface.Name).Do() + if err != nil { + return fmt.Errorf("Error deleting old access_config: %s", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "old access_config to delete", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + return nil +} + +func computeInstanceAddAccessConfigs(d *schema.ResourceData, config *transport_tpg.Config, instNetworkInterface *compute.NetworkInterface, accessConfigs []*compute.AccessConfig, project, zone, userAgent, instanceName string) error { + // Create new ones + for _, ac := range accessConfigs { + op, err := config.NewComputeClient(userAgent).Instances.AddAccessConfig(project, zone, instanceName, instNetworkInterface.Name, ac).Do() + if err != nil { + return fmt.Errorf("Error adding new access_config: %s", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "new access_config to add", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + return nil +} + +func computeInstanceCreateUpdateWhileStoppedCall(d *schema.ResourceData, config *transport_tpg.Config, networkInterfacePatchObj *compute.NetworkInterface, accessConfigs []*compute.AccessConfig, accessConfigsHaveChanged bool, index int, project, zone, userAgent, instanceName string) func(inst *compute.Instance) error { + + // Access configs' ip changes when the instance stops invalidating our fingerprint + // expect caller to re-validate instance before calling patch this is why we expect + // instance to be passed in + return func(instance *compute.Instance) error { + + instNetworkInterface := instance.NetworkInterfaces[index] + networkInterfacePatchObj.Fingerprint = instNetworkInterface.Fingerprint + + // Access config can run into some issues since we can't tell the difference between + // the users declared intent (config within their hcl file) and what we have inferred from the + // server (terraform state). Access configs contain an ip subproperty that can be incompatible + // with the subnetwork/network we are transitioning to. Due to this we only change access + // configs if we notice the configuration (user intent) changes. + if accessConfigsHaveChanged { + err := computeInstanceDeleteAccessConfigs(d, config, instNetworkInterface, project, zone, userAgent, instanceName) + if err != nil { + return err + } + } + + op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instanceName, instNetworkInterface.Name, networkInterfacePatchObj).Do() + if err != nil { + return errwrap.Wrapf("Error updating network interface: {{"{{"}}err{{"}}"}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + + if accessConfigsHaveChanged { + err := computeInstanceAddAccessConfigs(d, config, instNetworkInterface, accessConfigs, project, zone, userAgent, instanceName) + if err != nil { + return err + } + } + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func computeInstanceAddSecurityPolicy(d *schema.ResourceData, config *transport_tpg.Config, securityPolicyWithNics map[string][]string, project, zone, userAgent, instanceName string) error { + for sp, nics := range securityPolicyWithNics { + req := &compute.InstancesSetSecurityPolicyRequest{ + NetworkInterfaces: nics, + SecurityPolicy: sp, + } + op, err := config.NewComputeClient(userAgent).Instances.SetSecurityPolicy(project, zone, instanceName, req).Do() + if err != nil { + return fmt.Errorf("Error adding security policy: %s", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "security_policy to add", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + return nil +} + +func computeInstanceMapSecurityPoliciesCreate(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string][]string, error) { + securityPolicies := make(map[string][]string) + configs := d.Get("network_interface").([]interface{}) + for i, raw := range configs { + data := raw.(map[string]interface{}) + secPolicy := data["security_policy"].(string) + err := validateSecurityPolicy(data) + if err != nil { + return securityPolicies, err + } + + if secPolicy != "" { + // Network interfaces use the nicN naming format and is only know after the instance is created. + nicName := fmt.Sprintf("nic%d", i) + securityPolicies[secPolicy] = append(securityPolicies[secPolicy], nicName) + } + } + + return securityPolicies, nil +} + +func computeInstanceMapSecurityPoliciesUpdate(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string][]string, error) { + securityPolicies := make(map[string][]string) + configs := d.Get("network_interface").([]interface{}) + for i, raw := range configs { + data := raw.(map[string]interface{}) + secPolicy := data["security_policy"].(string) + err := validateSecurityPolicy(data) + if err != nil { + return securityPolicies, err + } + + // Network interfaces use the nicN naming format and is only know after the instance is created. + nicName := fmt.Sprintf("nic%d", i) + // To cleanup the security policy from the interface we should send something like this on the api: {"":[nic0, nic1]} + securityPolicies[secPolicy] = append(securityPolicies[secPolicy], nicName) + } + + return securityPolicies, nil +} + +func validateSecurityPolicy(rawNetworkInterface map[string]interface{}) error { + acessConfigs := expandAccessConfigs(rawNetworkInterface["access_config"].([]interface{})) + ipv6AccessConfigs := expandIpv6AccessConfigs(rawNetworkInterface["ipv6_access_config"].([]interface{})) + secPolicy := rawNetworkInterface["security_policy"].(string) + + if secPolicy != "" && len(acessConfigs) == 0 && len(ipv6AccessConfigs) == 0 { + return fmt.Errorf("Error setting security policy to the instance since at least one access config must exist") + } + + return nil +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/compute_operation.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_operation.go.tmpl new file mode 100644 index 000000000000..1c9b176575e5 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/compute_operation.go.tmpl @@ -0,0 +1,228 @@ +package compute + +import ( + "bytes" + "context" +{{- if ne $.TargetVersionName "ga" }} + "encoding/json" +{{- end }} + "errors" + "fmt" + "io" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +type ComputeOperationWaiter struct { + Service *compute.Service + Op *compute.Operation + Context context.Context + Project string +{{- if ne $.TargetVersionName "ga" }} + Parent string +{{- end }} +} + +func (w *ComputeOperationWaiter) State() string { + if w == nil || w.Op == nil { + return "" + } + + return w.Op.Status +} + +func (w *ComputeOperationWaiter) Error() error { + if w != nil && w.Op != nil && w.Op.Error != nil { + return ComputeOperationError(*w.Op.Error) + } + return nil +} + +func (w *ComputeOperationWaiter) IsRetryable(err error) bool { + if oe, ok := err.(ComputeOperationError); ok { + for _, e := range oe.Errors { + if e.Code == "RESOURCE_NOT_READY" { + return true + } + } + } + return false +} + +func (w *ComputeOperationWaiter) SetOp(op interface{}) error { + var ok bool + w.Op, ok = op.(*compute.Operation) + if !ok { + return fmt.Errorf("Unable to set operation. Bad type!") + } + return nil +} + +func (w *ComputeOperationWaiter) QueryOp() (interface{}, error) { + if w == nil || w.Op == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + if w.Context != nil { + select { + case <-w.Context.Done(): + log.Println("[WARN] request has been cancelled early") + return w.Op, errors.New("unable to finish polling, context has been cancelled") + default: + // default must be here to keep the previous case from blocking + } + } + if w.Op.Zone != "" { + zone := tpgresource.GetResourceNameFromSelfLink(w.Op.Zone) + return w.Service.ZoneOperations.Get(w.Project, zone, w.Op.Name).Do() + } else if w.Op.Region != "" { + region := tpgresource.GetResourceNameFromSelfLink(w.Op.Region) + return w.Service.RegionOperations.Get(w.Project, region, w.Op.Name).Do() +{{- if ne $.TargetVersionName "ga" }} + } else if w.Parent != "" { + return w.Service.GlobalOrganizationOperations.Get(w.Op.Name).ParentId(w.Parent).Do() +{{- end }} + } + return w.Service.GlobalOperations.Get(w.Project, w.Op.Name).Do() +} + +func (w *ComputeOperationWaiter) OpName() string { + if w == nil || w.Op == nil { + return " Compute Op" + } + + return w.Op.Name +} + +func (w *ComputeOperationWaiter) PendingStates() []string { + return []string{"PENDING", "RUNNING"} +} + +func (w *ComputeOperationWaiter) TargetStates() []string { + return []string{"DONE"} +} + +func ComputeOperationWaitTime(config *transport_tpg.Config, res interface{}, project, activity, userAgent string, timeout time.Duration) error { + op := &compute.Operation{} + err := tpgresource.Convert(res, op) + if err != nil { + return err + } + + w := &ComputeOperationWaiter{ + Service: config.NewComputeClient(userAgent), + Context: config.Context, + Op: op, + Project: project, + } + + if err := w.SetOp(op); err != nil { + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +{{ if ne $.TargetVersionName `ga` -}} +func ComputeOrgOperationWaitTimeWithResponse(config *transport_tpg.Config, res interface{}, response *map[string]interface{}, parent, activity, userAgent string, timeout time.Duration) error { + op := &compute.Operation{} + err := tpgresource.Convert(res, op) + if err != nil { + return err + } + + w := &ComputeOperationWaiter{ + Service: config.NewComputeClient(userAgent), + Op: op, + Parent: parent, + } + + if err := w.SetOp(op); err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + e, err := json.Marshal(w.Op) + if err != nil { + return err + } + return json.Unmarshal(e, response) +} + +{{ end }} + +// ComputeOperationError wraps compute.OperationError and implements the +// error interface so it can be returned. +type ComputeOperationError compute.OperationError + +func (e ComputeOperationError) Error() string { + buf := bytes.NewBuffer(nil) + for _, err := range e.Errors { + writeOperationError(buf, err) + } + + return buf.String() +} + +const errMsgSep = "\n\n" + +func writeOperationError(w io.StringWriter, opError *compute.OperationErrorErrors) { + w.WriteString(opError.Message + "\n") + + var lm *compute.LocalizedMessage + var link *compute.HelpLink + + for _, ed := range opError.ErrorDetails { + if opError.Code == "QUOTA_EXCEEDED" && ed.QuotaInfo != nil { + w.WriteString("\tmetric name = " + ed.QuotaInfo.MetricName + "\n") + w.WriteString("\tlimit name = " + ed.QuotaInfo.LimitName + "\n") + if ed.QuotaInfo.Limit != 0 { + w.WriteString("\tlimit = " + fmt.Sprint(ed.QuotaInfo.Limit) + "\n") + } + if ed.QuotaInfo.FutureLimit != 0 { + w.WriteString("\tfuture limit = " + fmt.Sprint(ed.QuotaInfo.FutureLimit) + "\n") + w.WriteString("\trollout status = in progress\n") + } + if ed.QuotaInfo.Dimensions != nil { + w.WriteString("\tdimensions = " + fmt.Sprint(ed.QuotaInfo.Dimensions) + "\n") + } + break + } + if lm == nil && ed.LocalizedMessage != nil { + lm = ed.LocalizedMessage + } + + if link == nil && ed.Help != nil && len(ed.Help.Links) > 0 { + link = ed.Help.Links[0] + } + + if lm != nil && link != nil { + break + } + } + + if lm != nil && lm.Message != "" { + w.WriteString(errMsgSep) + w.WriteString(lm.Message + "\n") + } + + if link != nil { + w.WriteString(errMsgSep) + + if link.Description != "" { + w.WriteString(link.Description + "\n") + } + + if link.Url != "" { + w.WriteString(link.Url + "\n") + } + } +} diff --git a/mmv1/third_party/terraform/services/compute/go/compute_operation_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_operation_test.go.tmpl new file mode 100644 index 000000000000..3be59325d9b0 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/compute_operation_test.go.tmpl @@ -0,0 +1,308 @@ +package compute + +import ( + "fmt" + "strings" + "testing" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +const ( + topLevelMsg = "Top-level message." + localizedMsgTmpl = "LocalizedMessage%d message" + helpLinkDescriptionTmpl = "Help%dLink%d Description" + helpLinkUrlTmpl = "https://help%d.com/link%d" + quotaExceededMsg = "Quota DISKS_TOTAL_GB exceeded. Limit: 1100.0 in region us-central1." + quotaExceededCode = "QUOTA_EXCEEDED" + quotaMetricName = "compute.googleapis.com/disks_total_storage" + quotaLimitName = "DISKS-TOTAL-GB-per-project-region" +) + +var locales = []string{"en-US", "es-US", "es-ES", "es-MX", "de-DE"} + +func buildOperationError(numLocalizedMsg int, numHelpWithLinks []int) compute.OperationError { + opError := &compute.OperationErrorErrors{Message: topLevelMsg} + opErrorErrors := []*compute.OperationErrorErrors{opError} + + for n := 1; n <= numLocalizedMsg; n++ { + opError.ErrorDetails = append(opError.ErrorDetails, + &compute.OperationErrorErrorsErrorDetails{ + LocalizedMessage: &compute.LocalizedMessage{ + Locale: locales[n-1%len(locales)], + Message: formatLocalizedMsg(n), + }, + }) + } + + for i := 0; i < len(numHelpWithLinks); i++ { + errorDetail := &compute.OperationErrorErrorsErrorDetails{ + Help: &compute.Help{}, + } + + for nLinks := 1; nLinks <= numHelpWithLinks[i]; nLinks++ { + desc, url := formatLink(i+1, nLinks) + errorDetail.Help.Links = append(errorDetail.Help.Links, &compute.HelpLink{ + Description: desc, + Url: url, + }) + } + + opError.ErrorDetails = append(opError.ErrorDetails, errorDetail) + } + + return compute.OperationError{Errors: opErrorErrors} + +} + +func buildOperationErrorQuotaExceeded(withDetails bool, withDimensions bool, withFutureLimit bool) compute.OperationError { + opError := &compute.OperationErrorErrors{Message: quotaExceededMsg, Code: quotaExceededCode} + opErrorErrors := []*compute.OperationErrorErrors{opError} + if withDetails { + quotaInfo := &compute.QuotaExceededInfo{ + MetricName: quotaMetricName, + LimitName: quotaLimitName, + Limit: 1100, + } + if withFutureLimit { + quotaInfo.FutureLimit = 2200 + } + if withDimensions { + quotaInfo.Dimensions = map[string]string{"region": "us-central1"} + } + opError.ErrorDetails = append(opError.ErrorDetails, + &compute.OperationErrorErrorsErrorDetails{ + QuotaInfo: quotaInfo, + }) + } + + return compute.OperationError{Errors: opErrorErrors} +} + +func omitAlways(numLocalizedMsg int, numHelpWithLinks []int) []string { + var omits []string + + for n := 2; n <= numLocalizedMsg; n++ { + omits = append(omits, fmt.Sprintf("LocalizedMessage%d", n)) + } + + for i := 0; i < len(numHelpWithLinks); i++ { + for j := maxLinks(i); j < numHelpWithLinks[i]; j++ { + desc, url := formatLink(i+1, j+1) + omits = append(omits, desc, url) + } + } + + return omits + +} + +func maxLinks(helpIndex int) int { + if helpIndex == 0 { + return 1 + } + + return 0 +} + +func formatLocalizedMsg(localizedMsgNum int) string { + return fmt.Sprintf(localizedMsgTmpl, localizedMsgNum) +} + +func formatLink(helpNum, linkNum int) (string, string) { + return fmt.Sprintf(helpLinkDescriptionTmpl, helpNum, linkNum), fmt.Sprintf(helpLinkUrlTmpl, helpNum, linkNum) +} + +func TestComputeOperationError_Error(t *testing.T) { + testCases := []struct { + name string + input compute.OperationError + expectContains []string + expectOmits []string + }{ + { + name: "MessageOnly", + input: buildOperationError(0, []int{}), + expectContains: []string{ + "Top-level", + }, + expectOmits: append(omitAlways(0, []int{}), []string{ + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }...), + }, + { + name: "WithLocalizedMessageAndNoHelp", + input: buildOperationError(1, []int{}), + expectContains: []string{ + "Top-level", + "LocalizedMessage1", + }, + expectOmits: append(omitAlways(1, []int{}), []string{ + "Help1Link1 Description", + "https://help1.com/link1", + }...), + }, + { + name: "WithLocalizedMessageAndHelp", + input: buildOperationError(1, []int{1}), + expectContains: []string{ + "Top-level", + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }, + expectOmits: append(omitAlways(1, []int{1}), []string{}...), + }, + { + name: "WithNoLocalizedMessageAndHelp", + input: buildOperationError(0, []int{1}), + expectContains: []string{ + "Top-level", + "Help1Link1 Description", + "https://help1.com/link1", + }, + expectOmits: append(omitAlways(0, []int{1}), []string{ + "LocalizedMessage1", + }...), + }, + { + name: "WithLocalizedMessageAndHelpWithTwoLinks", + input: buildOperationError(1, []int{2}), + expectContains: []string{ + "Top-level", + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }, + expectOmits: append(omitAlways(1, []int{2}), []string{}...), + }, + // The case below should never happen because the server should just send multiple links + // but the protobuf defition would allow it, so testing anyway. + { + name: "WithLocalizedMessageAndTwoHelpsWithTwoLinks", + input: buildOperationError(1, []int{2, 2}), + expectContains: []string{ + "Top-level", + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }, + expectOmits: append(omitAlways(1, []int{2, 2}), []string{}...), + }, + // This should never happen because the server should never respond with the messages for + // two locales at once, but should rather take the locale as input to the API and serve + // the appropriate message for that locale. However, the protobuf defition would allow it, + // so we'll test for it. The second message in the list would be ignored. + { + name: "WithTwoLocalizedMessageAndHelp", + input: buildOperationError(2, []int{1}), + expectContains: []string{ + "Top-level", + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }, + expectOmits: append(omitAlways(2, []int{1}), []string{}...), + }, + { + name: "QuotaMessageOnly", + input: buildOperationErrorQuotaExceeded(false, false, false), + expectContains: []string{ + "Quota DISKS_TOTAL_GB exceeded. Limit: 1100.0 in region us-central1.", + }, + expectOmits: append(omitAlways(0, []int{}), []string{ + "metric name = compute.googleapis.com/disks_total_storage", + "limit = 1100", + }...), + }, + { + name: "QuotaMessageWithDetailsNoDimensions", + input: buildOperationErrorQuotaExceeded(true, false, false), + expectContains: []string{ + "Quota DISKS_TOTAL_GB exceeded. Limit: 1100.0 in region us-central1.", + "metric name = compute.googleapis.com/disks_total_storage", + "limit name = DISKS-TOTAL-GB-per-project-region", + "limit = 1100", + }, + expectOmits: append(omitAlways(0, []int{}), []string{ + "dimensions = map[region:us-central1]", + }...), + }, + { + name: "QuotaMessageWithDetailsWithDimensions", + input: buildOperationErrorQuotaExceeded(true, true, false), + expectContains: []string{ + "Quota DISKS_TOTAL_GB exceeded. Limit: 1100.0 in region us-central1.", + "metric name = compute.googleapis.com/disks_total_storage", + "limit name = DISKS-TOTAL-GB-per-project-region", + "limit = 1100", + "dimensions = map[region:us-central1]", + }, + expectOmits: append(omitAlways(0, []int{}), []string{ + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }...), + }, + { + name: "QuotaMessageWithDetailsWithFutureLimit", + input: buildOperationErrorQuotaExceeded(true, false, true), + expectContains: []string{ + "Quota DISKS_TOTAL_GB exceeded. Limit: 1100.0 in region us-central1.", + "metric name = compute.googleapis.com/disks_total_storage", + "limit name = DISKS-TOTAL-GB-per-project-region", + "limit = 1100", + "future limit = 2200", + "rollout status = in progress", + }, + expectOmits: append(omitAlways(0, []int{}), []string{ + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }...), + }, + { + name: "QuotaMessageWithDetailsWithDimensionsWithFutureLimit", + input: buildOperationErrorQuotaExceeded(true, true, true), + expectContains: []string{ + "Quota DISKS_TOTAL_GB exceeded. Limit: 1100.0 in region us-central1.", + "metric name = compute.googleapis.com/disks_total_storage", + "limit name = DISKS-TOTAL-GB-per-project-region", + "limit = 1100", + "future limit = 2200", + "rollout status = in progress", + "dimensions = map[region:us-central1]", + }, + expectOmits: append(omitAlways(0, []int{}), []string{ + "LocalizedMessage1", + "Help1Link1 Description", + "https://help1.com/link1", + }...), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + err := ComputeOperationError(tc.input) + str := err.Error() + + for _, contains := range tc.expectContains { + if !strings.Contains(str, contains) { + t.Errorf("expected\n%s\nto contain, %q, and did not", str, contains) + } + } + + for _, omits := range tc.expectOmits { + if strings.Contains(str, omits) { + t.Errorf("expected\n%s\nnot to contain, %q, and did not", str, omits) + } + } + }) + } +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses.go.tmpl new file mode 100644 index 000000000000..f17ecd35063a --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses.go.tmpl @@ -0,0 +1,207 @@ +package compute + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeAddresses() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceGoogleComputeAddressesRead, + + Schema: map[string]*schema.Schema{ + "addresses": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the IP address.`, + }, + "address": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address.`, + }, + "address_type": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address type.`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, +{{- if ne $.TargetVersionName "ga" }} + "labels": { + Type: schema.TypeMap, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `Labels attached to this address`, + Computed: true, + }, +{{- end }} + }, + }, + }, + + "filter": { + Type: schema.TypeString, + Description: `Filter sets the optional parameter "filter": A filter expression that +filters resources listed in the response. The expression must specify +the field name, an operator, and the value that you want to use for +filtering. The value must be a string, a number, or a boolean. The +operator must be either "=", "!=", ">", "<", "<=", ">=" or ":". For +example, if you are filtering Compute Engine instances, you can +exclude instances named "example-instance" by specifying "name != +example-instance". The ":" operator can be used with string fields to +match substrings. For non-string fields it is equivalent to the "=" +operator. The ":*" comparison can be used to test whether a key has +been defined. For example, to find all objects with "owner" label +use: """ labels.owner:* """ You can also filter nested fields. For +example, you could specify "scheduling.automaticRestart = false" to +include instances only if they are not scheduled for automatic +restarts. You can use filtering on nested fields to filter based on +resource labels. To filter on multiple expressions, provide each +separate expression within parentheses. For example: """ +(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake") +""" By default, each expression is an "AND" expression. However, you +can include "AND" and "OR" expressions explicitly. For example: """ +(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") +AND (scheduling.automaticRestart = true) """`, + Optional: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Description: `Region that should be considered to search addresses. All regions are considered if missing.`, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The google project in which addresses are listed. Defaults to provider's configuration if missing.`, + }, + }, + } +} + +func dataSourceGoogleComputeAddressesRead(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return diag.FromErr(err) + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return diag.FromErr(err) + } + + allAddresses := make([]map[string]interface{}, 0) + + client := config.NewComputeClient(userAgent).Addresses + if region, has_region := d.GetOk("region"); has_region { + request := client.List(project, region.(string)) + if filter, has_filter := d.GetOk("filter"); has_filter { + request = request.Filter(filter.(string)) + } + err = request.Pages(context, func(addresses *compute.AddressList) error { + for _, address := range addresses.Items { + allAddresses = append(allAddresses, generateTfAddress(address)) + } + return nil + }) + } else { + request := client.AggregatedList(project) + if filter, has_filter := d.GetOk("filter"); has_filter { + request = request.Filter(filter.(string)) + } + err = request.Pages(context, func(addresses *compute.AddressAggregatedList) error { + for _, items := range addresses.Items { + for _, address := range items.Addresses { + allAddresses = append(allAddresses, generateTfAddress(address)) + } + } + return nil + }) + } + if err != nil { + return diag.FromErr(err) + } + + if err := d.Set("addresses", allAddresses); err != nil { + return diag.FromErr(fmt.Errorf("error setting addresses: %s", err)) + } + + if err := d.Set("project", project); err != nil { + return diag.FromErr(fmt.Errorf("error setting project: %s", err)) + } + d.SetId(computeId(project, d)) + return nil +} + +func generateTfAddress(address *compute.Address) map[string]interface{} { + return map[string]interface{}{ + "name": address.Name, + "address": address.Address, + "address_type": address.AddressType, + "description": address.Description, + "region": regionFromUrl(address.Region), + "status": address.Status, + "self_link": address.SelfLink, +{{- if ne $.TargetVersionName "ga" }} + "labels": address.Labels, +{{- end }} + } +} + +func computeId(project string, d *schema.ResourceData) string { + region := "ALL" + filter := "ALL" + if p_region, has_region := d.GetOk("region"); has_region { + region = p_region.(string) + } + if p_filter, has_filter := d.GetOk("filter"); has_filter { + filter = p_filter.(string) + } + return fmt.Sprintf("%s-%s-%s", project, region, filter) +} + +func regionFromUrl(url string) string { + parts := strings.Split(url, "/") + if count := len(parts); count > 0 { + return parts[count-1] + } + return "" +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses_test.go.tmpl new file mode 100644 index 000000000000..6f966110cd8b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_addresses_test.go.tmpl @@ -0,0 +1,189 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccDataSourceComputeAddresses(t *testing.T) { + t.Parallel() + + addressName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + region := "europe-west8" + region_bis := "asia-east1" + dsName := "regional_addresses" + dsFullName := fmt.Sprintf("data.google_compute_addresses.%s", dsName) + dsAllName := "all_addresses" + dsAllFullName := fmt.Sprintf("data.google_compute_addresses.%s", dsAllName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComputeAddressesConfig(addressName, region, region_bis), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceComputeAddressesRegionSpecificCheck(t, addressName, dsFullName, region), + testAccDataSourceComputeAddressesAllRegionsCheck(t, addressName, dsAllFullName, region, region_bis), + ), + }, + }, + }) +} + +func testAccDataSourceComputeAddressesAllRegionsCheck(t *testing.T, address_name string, data_source_name string, expected_region string, expected_region_bis string) resource.TestCheckFunc { + return func(s *terraform.State) error { + expected_addresses := buildAddressesList(3, address_name, expected_region) + expected_addresses = append(expected_addresses, buildAddressesList(3, address_name, expected_region_bis)...) + + return testDataSourceAdressContains(s, data_source_name, expected_addresses) + } +} + +func testAccDataSourceComputeAddressesRegionSpecificCheck(t *testing.T, address_name string, data_source_name string, expected_region string) resource.TestCheckFunc { + return func(s *terraform.State) error { + expected_addresses := buildAddressesList(3, address_name, expected_region) + return testDataSourceAdressContains(s, data_source_name, expected_addresses) + } +} + +func testAccDataSourceComputeAddressesConfig(addressName, region, region_bis string) string { + return fmt.Sprintf(` +locals { + region = "%s" + region_bis = "%s" + address_name = "%s" +} + +resource "google_compute_address" "address" { + count = 3 + + region = local.region + name = "${local.address_name}-${local.region}-${count.index}" +{{- if ne $.TargetVersionName "ga" }} + labels = { + mykey = "myvalue" + } +{{- end }} +} + +resource "google_compute_address" "address_region_bis" { + count = 3 + + region = local.region_bis + name = "${local.address_name}-${local.region_bis}-${count.index}" +{{- if ne $.TargetVersionName "ga" }} + labels = { + mykey = "myvalue" + } +{{- end }} +} + +data "google_compute_addresses" "regional_addresses" { + filter = "name:${local.address_name}-*" + depends_on = [google_compute_address.address] + region = local.region +} + +data "google_compute_addresses" "all_addresses" { + filter = "name:${local.address_name}-*" + depends_on = [google_compute_address.address, google_compute_address.address_region_bis] +} +`, region, region_bis, addressName) +} + +type expectedAddress struct { + name string + region string +} + +func (r expectedAddress) checkAddressMatch(index int, attrs map[string]string) (bool, error) { + map_name := fmt.Sprintf("addresses.%d.name", index) + address_name := attrs[map_name] + + if address_name != r.name { + return false, nil + } + + map_region := fmt.Sprintf("addresses.%d.region", index) + region, found := attrs[map_region] + if !found { + return false, fmt.Errorf("%s doesn't exists", map_region) + } + if region != r.region { + return false, fmt.Errorf("Unexpected region: got %s expected %s", region, r.region) + } + +{{ if ne $.TargetVersionName `ga` -}} + map_label := fmt.Sprintf("addresses.%d.labels.mykey", index) + label_value, found := attrs[map_label] + if !found { + return false, fmt.Errorf("label with key 'mykey' not found for %s", address_name) + } + if label_value != "myvalue" { + return false, fmt.Errorf("label value of 'mykey' not equal to 'myvalue' for %s, got %s", address_name, label_value) + } +{{- end }} + + return true, nil +} + +func testDataSourceAdressContains(state *terraform.State, data_source_name string, addresses []expectedAddress) error { + ds, ok := state.RootModule().Resources[data_source_name] + if !ok { + return fmt.Errorf("root module has no resource called %s", data_source_name) + } + + ds_attr := ds.Primary.Attributes + + addresses_length := len(addresses) + + if ds_attr["addresses.#"] != fmt.Sprintf("%d", addresses_length) { + return fmt.Errorf("addresses.# is not equal to %d", addresses_length) + } + + for address_index := 0; address_index < addresses_length; address_index++ { + has_match := false + for j := 0; j < len(addresses); j++ { + match, err := addresses[j].checkAddressMatch(address_index, ds_attr) + if err != nil { + return err + } else { + if match { + has_match = true + addresses = removeExpectedAddress(addresses, j) + break + } + } + } + if !has_match { + return fmt.Errorf("unexpected address at index %d", address_index) // TODO improve + } + } + + if len(addresses) != 0 { + return fmt.Errorf("%+v not found in data source", addresses) + } + return nil +} + +func buildAddressesList(numberofAddresses int, addressName string, region string) []expectedAddress { + var addresses []expectedAddress + for i := 0; i < numberofAddresses; i++ { + addresses = append(addresses, expectedAddress{ + name: fmt.Sprintf("%s-%s-%d", addressName, region, i), + region: region, + }) + } + return addresses +} + +func removeExpectedAddress(s []expectedAddress, i int) []expectedAddress { + s[i] = s[len(s)-1] + return s[:len(s)-1] +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_image.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_image.go.tmpl new file mode 100644 index 000000000000..a07ae1367a0a --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_image.go.tmpl @@ -0,0 +1,253 @@ +package compute + +import ( + "fmt" + "log" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeImage() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeImageRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ExactlyOneOf: []string{"name", "family", "filter"}, + }, + "family": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ExactlyOneOf: []string{"name", "family", "filter"}, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ExactlyOneOf: []string{"name", "family", "filter"}, + }, + "archive_size_bytes": { + Type: schema.TypeInt, + Computed: true, + }, + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "disk_size_gb": { + Type: schema.TypeInt, + Computed: true, + }, + "image_id": { + Type: schema.TypeString, + Computed: true, + }, + "image_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + }, + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + }, + "labels": { + Type: schema.TypeMap, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "licenses": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "source_disk": { + Type: schema.TypeString, + Computed: true, + }, + "source_disk_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + }, + "source_disk_id": { + Type: schema.TypeString, + Computed: true, + }, + "source_image_id": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + }, + "most_recent": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + }, + } +} + +func dataSourceGoogleComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + var image *compute.Image + if v, ok := d.GetOk("name"); ok { + log.Printf("[DEBUG] Fetching image %s", v.(string)) + image, err = config.NewComputeClient(userAgent).Images.Get(project, v.(string)).Do() + log.Printf("[DEBUG] Fetched image %s", v.(string)) + } else if v, ok := d.GetOk("family"); ok { + log.Printf("[DEBUG] Fetching latest non-deprecated image from family %s", v.(string)) + image, err = config.NewComputeClient(userAgent).Images.GetFromFamily(project, v.(string)).Do() + log.Printf("[DEBUG] Fetched latest non-deprecated image from family %s", v.(string)) + } else if v, ok := d.GetOk("filter"); ok { + images, err := config.NewComputeClient(userAgent).Images.List(project).Filter(v.(string)).Do() + if err != nil { + return fmt.Errorf("error retrieving list of images: %s", err) + } + + if len(images.Items) == 1 { + for _, im := range images.Items { + image = im + } + } else if mr, ok := d.GetOk("most_recent"); len(images.Items) >= 1 && ok && mr.(bool) { + most_recent := time.UnixMicro(0) + for _, im := range images.Items { + parsedTS, err := time.Parse(time.RFC3339, im.CreationTimestamp) + if err != nil { + return fmt.Errorf("error parsing creation timestamp: %w", err) + } + + if parsedTS.After(most_recent) { + most_recent = parsedTS + image = im + } + } + } else { + return fmt.Errorf("your filter has returned more than one image or no image. Please refine your filter to return exactly one image") + } + } else { + return fmt.Errorf("one of name, family or filters must be set") + } + + if err != nil { + return fmt.Errorf("error retrieving image information: %s", err) + } + + var ieks256, sdeks256 string + + if image.SourceDiskEncryptionKey != nil { + sdeks256 = image.SourceDiskEncryptionKey.Sha256 + } + + if image.ImageEncryptionKey != nil { + ieks256 = image.ImageEncryptionKey.Sha256 + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", image.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("family", image.Family); err != nil { + return fmt.Errorf("Error setting family: %s", err) + } + if err := d.Set("archive_size_bytes", image.ArchiveSizeBytes); err != nil { + return fmt.Errorf("Error setting archive_size_bytes: %s", err) + } + if err := d.Set("creation_timestamp", image.CreationTimestamp); err != nil { + return fmt.Errorf("Error setting creation_timestamp: %s", err) + } + if err := d.Set("description", image.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("disk_size_gb", image.DiskSizeGb); err != nil { + return fmt.Errorf("Error setting disk_size_gb: %s", err) + } + if err := d.Set("image_id", strconv.FormatUint(image.Id, 10)); err != nil { + return fmt.Errorf("Error setting image_id: %s", err) + } + if err := d.Set("image_encryption_key_sha256", ieks256); err != nil { + return fmt.Errorf("Error setting image_encryption_key_sha256: %s", err) + } + if err := d.Set("label_fingerprint", image.LabelFingerprint); err != nil { + return fmt.Errorf("Error setting label_fingerprint: %s", err) + } + if err := d.Set("labels", image.Labels); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := d.Set("licenses", image.Licenses); err != nil { + return fmt.Errorf("Error setting licenses: %s", err) + } + if err := d.Set("self_link", image.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("source_disk", image.SourceDisk); err != nil { + return fmt.Errorf("Error setting source_disk: %s", err) + } + if err := d.Set("source_disk_encryption_key_sha256", sdeks256); err != nil { + return fmt.Errorf("Error setting source_disk_encryption_key_sha256: %s", err) + } + if err := d.Set("source_disk_id", image.SourceDiskId); err != nil { + return fmt.Errorf("Error setting source_disk_id: %s", err) + } + if err := d.Set("source_image_id", image.SourceImageId); err != nil { + return fmt.Errorf("Error setting source_image_id: %s", err) + } + if err := d.Set("status", image.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/global/images/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go new file mode 100644 index 000000000000..f38a1f8971f2 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance.go @@ -0,0 +1,204 @@ +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleComputeInstance() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeInstance().Schema) + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "self_link", "project", "zone") + + return &schema.Resource{ + Read: dataSourceGoogleComputeInstanceRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, zone, name, err := tpgresource.GetZonalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + + id := fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, name) + + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, name).Do() + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("Instance %s", name), id) + } + + md := flattenMetadataBeta(instance.Metadata) + if err = d.Set("metadata", md); err != nil { + return fmt.Errorf("error setting metadata: %s", err) + } + + if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + if err := d.Set("machine_type", tpgresource.GetResourceNameFromSelfLink(instance.MachineType)); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + + // Set the networks + // Use the first external IP found for the default connection info. + networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) + if err != nil { + return err + } + if err := d.Set("network_interface", networkInterfaces); err != nil { + return err + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": sshIP, + }) + + // Set the metadata fingerprint if there is one. + if instance.Metadata != nil { + if err := d.Set("metadata_fingerprint", instance.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + } + + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + if err := d.Set("tags_fingerprint", instance.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + if err := d.Set("tags", tpgresource.ConvertStringArrToInterface(instance.Tags.Items)); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + + if err := d.Set("labels", instance.Labels); err != nil { + return err + } + + if err := d.Set("terraform_labels", instance.Labels); err != nil { + return err + } + + if instance.LabelFingerprint != "" { + if err := d.Set("label_fingerprint", instance.LabelFingerprint); err != nil { + return fmt.Errorf("Error setting label_fingerprint: %s", err) + } + } + + attachedDisks := []map[string]interface{}{} + scratchDisks := []map[string]interface{}{} + for _, disk := range instance.Disks { + if disk.Boot { + err = d.Set("boot_disk", flattenBootDisk(d, disk, config)) + if err != nil { + return err + } + } else if disk.Type == "SCRATCH" { + scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) + } else { + di := map[string]interface{}{ + "source": tpgresource.ConvertSelfLinkToV1(disk.Source), + "device_name": disk.DeviceName, + "mode": disk.Mode, + } + if key := disk.DiskEncryptionKey; key != nil { + di["disk_encryption_key_sha256"] = key.Sha256 + di["kms_key_self_link"] = key.KmsKeyName + } + attachedDisks = append(attachedDisks, di) + } + } + // Remove nils from map in case there were disks in the config that were not present on read; + // i.e. a disk was detached out of band + ads := []map[string]interface{}{} + for _, d := range attachedDisks { + if d != nil { + ads = append(ads, d) + } + } + + err = d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)) + if err != nil { + return err + } + + err = d.Set("scheduling", flattenScheduling(instance.Scheduling)) + if err != nil { + return err + } + + err = d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)) + if err != nil { + return err + } + + err = d.Set("scratch_disk", scratchDisks) + if err != nil { + return err + } + + err = d.Set("shielded_instance_config", flattenShieldedVmConfig(instance.ShieldedInstanceConfig)) + if err != nil { + return err + } + + err = d.Set("enable_display", flattenEnableDisplay(instance.DisplayDevice)) + if err != nil { + return err + } + + if err := d.Set("attached_disk", ads); err != nil { + return fmt.Errorf("Error setting attached_disk: %s", err) + } + if err := d.Set("cpu_platform", instance.CpuPlatform); err != nil { + return fmt.Errorf("Error setting cpu_platform: %s", err) + } + if err := d.Set("min_cpu_platform", instance.MinCpuPlatform); err != nil { + return fmt.Errorf("Error setting min_cpu_platform: %s", err) + } + if err := d.Set("deletion_protection", instance.DeletionProtection); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(instance.SelfLink)); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("instance_id", fmt.Sprintf("%d", instance.Id)); err != nil { + return fmt.Errorf("Error setting instance_id: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", tpgresource.GetResourceNameFromSelfLink(instance.Zone)); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("current_status", instance.Status); err != nil { + return fmt.Errorf("Error setting current_status: %s", err) + } + if err := d.Set("name", instance.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, tpgresource.GetResourceNameFromSelfLink(instance.Zone), instance.Name)) + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_group_test.go b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_group_test.go new file mode 100644 index 000000000000..bd4c20e755fc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_group_test.go @@ -0,0 +1,335 @@ +package compute_test + +import ( + "errors" + "fmt" + "sort" + "strconv" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccDataSourceGoogleComputeInstanceGroup_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckDataSourceGoogleComputeInstanceGroupConfig(acctest.RandString(t, 10), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceGoogleComputeInstanceGroup("data.google_compute_instance_group.test"), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleComputeInstanceGroup_withNamedPort(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckDataSourceGoogleComputeInstanceGroupConfigWithNamedPort(acctest.RandString(t, 10), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataSourceGoogleComputeInstanceGroup("data.google_compute_instance_group.test"), + ), + }, + }, + }) +} + +func TestAccDataSourceGoogleComputeInstanceGroup_fromIGM(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckDataSourceGoogleComputeInstanceGroup_fromIGM(fmt.Sprintf("tf-test-igm-%d", acctest.RandInt(t)), fmt.Sprintf("tf-test-igm-%d", acctest.RandInt(t))), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_compute_instance_group.test", "instances.#", "10"), + ), + }, + }, + }) +} + +func testAccCheckDataSourceGoogleComputeInstanceGroup(dataSourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + dsFullName := "data.google_compute_instance_group.test" + rsFullName := "google_compute_instance_group.test" + ds, ok := s.RootModule().Resources[dsFullName] + if !ok { + return fmt.Errorf("cant' find data source called %s in state", dsFullName) + } + + rs, ok := s.RootModule().Resources[rsFullName] + if !ok { + return fmt.Errorf("can't find resource called %s in state", rsFullName) + } + + dsAttrs := ds.Primary.Attributes + rsAttrs := rs.Primary.Attributes + + attrsToTest := []string{ + "id", + "name", + "zone", + "project", + "description", + "network", + "size", + } + + for _, attrToTest := range attrsToTest { + if dsAttrs[attrToTest] != rsAttrs[attrToTest] { + return fmt.Errorf("%s is %s; want %s", attrToTest, dsAttrs[attrToTest], rsAttrs[attrToTest]) + } + } + + if !tpgresource.CompareSelfLinkOrResourceName("", dsAttrs["self_link"], rsAttrs["self_link"], nil) && dsAttrs["self_link"] != rsAttrs["self_link"] { + return fmt.Errorf("self link does not match: %s vs %s", dsAttrs["self_link"], rsAttrs["self_link"]) + } + + dsNamedPortsCount, ok := dsAttrs["named_port.#"] + if !ok { + return errors.New("can't find 'named_port' attribute in data source") + } + + dsNoOfNamedPorts, err := strconv.Atoi(dsNamedPortsCount) + if err != nil { + return errors.New("failed to read number of named ports in data source") + } + + rsNamedPortsCount, ok := rsAttrs["named_port.#"] + if !ok { + return errors.New("can't find 'named_port' attribute in resource") + } + + rsNoOfNamedPorts, err := strconv.Atoi(rsNamedPortsCount) + if err != nil { + return errors.New("failed to read number of named ports in resource") + } + + if dsNoOfNamedPorts != rsNoOfNamedPorts { + return fmt.Errorf( + "expected %d number of named port, received %d, this is most likely a bug", + rsNoOfNamedPorts, + dsNoOfNamedPorts, + ) + } + + namedPortItemKeys := []string{"name", "value"} + for i := 0; i < dsNoOfNamedPorts; i++ { + for _, key := range namedPortItemKeys { + idx := fmt.Sprintf("named_port.%d.%s", i, key) + if dsAttrs[idx] != rsAttrs[idx] { + return fmt.Errorf("%s is %s; want %s", idx, dsAttrs[idx], rsAttrs[idx]) + } + } + } + + dsInstancesCount, ok := dsAttrs["instances.#"] + if !ok { + return errors.New("can't find 'instances' attribute in data source") + } + + dsNoOfInstances, err := strconv.Atoi(dsInstancesCount) + if err != nil { + return errors.New("failed to read number of named ports in data source") + } + + rsInstancesCount, ok := rsAttrs["instances.#"] + if !ok { + return errors.New("can't find 'instances' attribute in resource") + } + + rsNoOfInstances, err := strconv.Atoi(rsInstancesCount) + if err != nil { + return errors.New("failed to read number of instances in resource") + } + + if dsNoOfInstances != rsNoOfInstances { + return fmt.Errorf( + "expected %d number of instances, received %d, this is most likely a bug", + rsNoOfInstances, + dsNoOfInstances, + ) + } + + // We don't know the exact keys of the elements, so go through the whole list looking for matching ones + dsInstancesValues := []string{} + for k, v := range dsAttrs { + if strings.HasPrefix(k, "instances") && !strings.HasSuffix(k, "#") { + dsInstancesValues = append(dsInstancesValues, v) + } + } + + rsInstancesValues := []string{} + for k, v := range rsAttrs { + if strings.HasPrefix(k, "instances") && !strings.HasSuffix(k, "#") { + rsInstancesValues = append(rsInstancesValues, v) + } + } + + sort.Strings(dsInstancesValues) + sort.Strings(rsInstancesValues) + + for k, dsAttr := range dsInstancesValues { + rsAttr := rsInstancesValues[k] + if !tpgresource.CompareSelfLinkOrResourceName("", dsAttr, rsAttr, nil) && dsAttr != rsAttr { + return fmt.Errorf("instance expected value %s did not match real value %s. expected list of instances %v, received %v", rsAttr, dsAttr, rsInstancesValues, dsInstancesValues) + } + } + + return nil + } +} + +func testAccCheckDataSourceGoogleComputeInstanceGroupConfig(instanceName, igName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "test" { + name = "tf-test-%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } +} + +resource "google_compute_instance_group" "test" { + name = "tf-test-%s" + zone = google_compute_instance.test.zone + + instances = [ + google_compute_instance.test.self_link, + ] +} + +data "google_compute_instance_group" "test" { + name = google_compute_instance_group.test.name + zone = google_compute_instance_group.test.zone +} +`, instanceName, igName) +} + +func testAccCheckDataSourceGoogleComputeInstanceGroupConfigWithNamedPort(instanceName, igName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "test" { + name = "tf-test-%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } +} + +resource "google_compute_instance_group" "test" { + name = "tf-test-%s" + zone = google_compute_instance.test.zone + + named_port { + name = "http" + port = "8080" + } + + named_port { + name = "https" + port = "8443" + } + + instances = [ + google_compute_instance.test.self_link, + ] +} + +data "google_compute_instance_group" "test" { + name = google_compute_instance_group.test.name + zone = google_compute_instance_group.test.zone +} +`, instanceName, igName) +} + +func testAccCheckDataSourceGoogleComputeInstanceGroup_fromIGM(igmName, secondIgmName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group_manager" "igm" { + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + base_instance_name = "igm" + zone = "us-central1-a" + target_size = 10 + + wait_for_instances = true +} + +data "google_compute_instance_group" "test" { + self_link = google_compute_instance_group_manager.igm.instance_group +} +`, igmName, secondIgmName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_template.go.tmpl new file mode 100644 index 000000000000..77c440b3f917 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_template.go.tmpl @@ -0,0 +1,119 @@ +package compute + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeInstanceTemplate() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeInstanceTemplate().Schema) + + dsSchema["filter"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + dsSchema["self_link_unique"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + dsSchema["most_recent"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "project", "self_link_unique") + + mutuallyExclusive:= []string{"name", "filter", "self_link_unique"} + for _, n:= range mutuallyExclusive { + dsSchema[n].ExactlyOneOf = mutuallyExclusive + } + + return &schema.Resource{ + Read: datasourceComputeInstanceTemplateRead, + Schema: dsSchema, + } +} + +func datasourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + if v, ok := d.GetOk("name"); ok { + return retrieveInstance(d, meta, project, v.(string)) + } + if v, ok := d.GetOk("filter"); ok { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + templates, err := config.NewComputeClient(userAgent).InstanceTemplates.List(project).Filter(v.(string)).Do() + if err != nil { + return fmt.Errorf("error retrieving list of instance templates: %s", err) + } + + mostRecent := d.Get("most_recent").(bool) + if mostRecent { + sort.Sort(ByCreationTimestamp(templates.Items)) + } + + count := len(templates.Items) + if count == 1 || count > 1 && mostRecent { + return retrieveInstance(d, meta, project, templates.Items[0].Name) + } + + return fmt.Errorf("your filter has returned %d instance template(s). Please refine your filter or set most_recent to return exactly one instance template", len(templates.Items)) + } + if v, ok := d.GetOk("self_link_unique"); ok { + return retrieveInstanceFromUniqueId(d, meta, project, v.(string)) + } + + return fmt.Errorf("one of name, filters or self_link_unique must be set") +} + +func retrieveInstance(d *schema.ResourceData, meta interface{}, project, name string) error { + d.SetId("projects/" + project + "/global/instanceTemplates/" + name) + + if err := resourceComputeInstanceTemplateRead(d, meta); err != nil { + return err + } + return tpgresource.SetDataSourceLabels(d) +} + +func retrieveInstanceFromUniqueId(d *schema.ResourceData, meta interface{}, project, self_link_unique string) error { + normalId, _ := parseUniqueId(self_link_unique) + d.SetId(normalId) + d.Set("self_link_unique", self_link_unique) + + if err := resourceComputeInstanceTemplateRead(d, meta); err != nil { + return err + } + return tpgresource.SetDataSourceLabels(d) +} + +// ByCreationTimestamp implements sort.Interface for []*InstanceTemplate based on +// the CreationTimestamp field. +type ByCreationTimestamp []*compute.InstanceTemplate + +func (a ByCreationTimestamp) Len() int { return len(a) } +func (a ByCreationTimestamp) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByCreationTimestamp) Less(i, j int) bool { + return a[i].CreationTimestamp > a[j].CreationTimestamp +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_test.go.tmpl new file mode 100644 index 000000000000..65d3dd7834cf --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_instance_test.go.tmpl @@ -0,0 +1,233 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceComputeInstance_basic(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComputeInstanceConfig(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceComputeInstanceCheck("data.google_compute_instance.bar", "google_compute_instance.foo"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "network_interface.#", "1"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "boot_disk.0.initialize_params.0.size", "10"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "boot_disk.0.initialize_params.0.type", "pd-standard"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "scratch_disk.0.interface", "SCSI"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "network_interface.0.access_config.0.network_tier", "PREMIUM"), + resource.TestCheckResourceAttr("data.google_compute_instance.bar", "enable_display", "true"), + ), + }, + }, + }) +} + +func testAccDataSourceComputeInstanceCheck(datasourceName string, resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[datasourceName] + if !ok { + return fmt.Errorf("root module has no resource called %s", datasourceName) + } + + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("can't find %s in state", resourceName) + } + + datasourceAttributes := ds.Primary.Attributes + resourceAttributes := rs.Primary.Attributes + + instanceAttrsToTest := []string{ + "name", + "machine_type", + "current_status", + "can_ip_forward", + "description", + "deletion_protection", + "labels", + "metadata", + "min_cpu_platform", + "project", + "tags", + "zone", + "cpu_platform", + "instance_id", + "label_fingerprint", + "metadata_fingerprint", + "self_link", + "tags_fingerprint", + } + + for _, attrToCheck := range instanceAttrsToTest { + if datasourceAttributes[attrToCheck] != resourceAttributes[attrToCheck] { + return fmt.Errorf( + "%s is %s; want %s", + attrToCheck, + datasourceAttributes[attrToCheck], + resourceAttributes[attrToCheck], + ) + } + } + + return nil + } +} + +func testAccDataSourceComputeInstanceConfig(instanceName string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "foo" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + access_config { + // Ephemeral IP + } + } + + metadata = { + foo = "bar" + baz = "qux" + startup-script = "echo Hello" + } + + labels = { + my_key = "my_value" + my_other_key = "my_other_value" + } + + enable_display = true +} + +data "google_compute_instance" "bar" { + name = google_compute_instance.foo.name + zone = "us-central1-a" +} + +data "google_compute_instance" "baz" { + self_link = google_compute_instance.foo.self_link +} +`, instanceName) +} +{{- if ne $.TargetVersionName "ga" }} +func TestAccDataSourceComputeInstance_networkAttachmentUsageExample(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceComputeInstance_networkAttachmentUsageConfig(instanceName), + Check: resource.TestCheckResourceAttrSet("data.google_compute_instance.bar", "network_interface.1.network_attachment"), + }, + }, + }) +} + +func testAccDataSourceComputeInstance_networkAttachmentUsageConfig(instanceName string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "foo" { + provider = google-beta + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + + access_config { + // Ephemeral IP + } + } + + network_interface { + network_attachment = google_compute_network_attachment.net_attar_default.self_link + } + + + + labels = { + my_key = "my_value" + my_other_key = "my_other_value" + } + + enable_display = true +} + +data "google_compute_instance" "bar" { + provider = google-beta + name = google_compute_instance.foo.name + zone = "us-central1-a" +} + +data "google_compute_instance" "baz" { + provider = google-beta + self_link = google_compute_instance.foo.self_link +} +resource "google_compute_network" "net_att_default" { + provider = google-beta + name = "basic-network-att" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet_att_default" { + provider = google-beta + name = "basic-subnetwork-att" + region = "us-central1" + network = google_compute_network.net_att_default.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_network_attachment" "net_attar_default" { + provider = google-beta + name = "basic-attachment" + region = "us-central1" + subnetworks = [google_compute_subnetwork.subnet_att_default.id] + connection_preference = "ACCEPT_AUTOMATIC" +} +`, instanceName) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_machine_types.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_machine_types.go.tmpl new file mode 100644 index 000000000000..648abdd1bb96 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_machine_types.go.tmpl @@ -0,0 +1,251 @@ +package compute + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeMachineTypes() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceGoogleComputeMachineTypesRead, + + Schema: map[string]*schema.Schema{ + "filter": { + Type: schema.TypeString, + Optional: true, + }, + + "machine_types": { + Type: schema.TypeList, + Computed: true, + Description: `The list of machine types`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the machine type.`, + }, + "guest_cpus": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of virtual CPUs that are available to the instance.`, + }, + "memory_mb": { + Type: schema.TypeInt, + Computed: true, + Description: `The amount of physical memory available to the instance, defined in MB.`, + }, + {{- if ne $.TargetVersionName "ga" }} + "bundled_local_ssds": { + Type: schema.TypeSet, + Computed: true, + Description: `The configuration of bundled local SSD for the machine type.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_interface": { + Type: schema.TypeString, + Computed: true, + Description: `The default disk interface if the interface is not specified.`, + }, + "partition_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of partitions.`, + }, + }, + }, + }, + {{- end }} + "deprecated": { + Type: schema.TypeSet, + Computed: true, + Description: `The deprecation status associated with this machine type. Only applicable if the machine type is unavailable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "replacement": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the suggested replacement for a deprecated machine type.`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The deprecation state of this resource. This can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED.`, + }, + }, + }, + }, + "maximum_persistent_disks": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum persistent disks allowed.`, + }, + "maximum_persistent_disks_size_gb": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum total persistent disks size (GB) allowed.`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: `A textual description of the machine type.`, + }, + "is_shared_cpus": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether this machine type has a shared CPU.`, + }, + "accelerators": { + Type: schema.TypeList, + Computed: true, + Description: `A list of accelerator configurations assigned to this machine type.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "guest_accelerator_type": { + Type: schema.TypeString, + Computed: true, + Description: `The accelerator type resource name, not a full URL, e.g. nvidia-tesla-t4.`, + }, + "guest_accelerator_count": { + Type: schema.TypeInt, + Computed: true, + Description: `Number of accelerator cards exposed to the guest.`, + }, + }, + }, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The server-defined URL for the machine type.`, + }, + }, + }, + }, + "zone": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the zone for this request.`, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Description: `Project ID for this request.`, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeMachineTypesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return diag.FromErr(err) + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return diag.FromErr(err) + } + + filter := d.Get("filter").(string) + zone := d.Get("zone").(string) + + machineTypes := make([]map[string]interface{}, 0) + token := "" + + for paginate := true; paginate; { + resp, err := config.NewComputeClient(userAgent).MachineTypes.List(project, zone).Context(ctx).Filter(filter).PageToken(token).Do() + if err != nil { + return diag.FromErr(fmt.Errorf("Error retrieving machine types: %w", err)) + + } + pageMachineTypes := flattenDatasourceGoogleComputeMachineTypesList(ctx, resp.Items) + machineTypes = append(machineTypes, pageMachineTypes...) + + token = resp.NextPageToken + paginate = token != "" + } + + if err := d.Set("machine_types", machineTypes); err != nil { + return diag.FromErr(fmt.Errorf("Error setting machine_types: %w", err)) + } + + if err := d.Set("project", project); err != nil { + return diag.FromErr(fmt.Errorf("Error setting project: %w", err)) + } + if err := d.Set("zone", zone); err != nil { + return diag.FromErr(fmt.Errorf("Error setting zone: %w", err)) + } + + id := fmt.Sprintf("projects/%s/zones/%s/machineTypes/filters/%s", project, zone, filter) + d.SetId(id) + + return diag.Diagnostics{} +} + +func flattenDatasourceGoogleComputeMachineTypesList(ctx context.Context, v []*compute.MachineType) []map[string]interface{} { + if v == nil { + return make([]map[string]interface{}, 0) + } + + machineTypes := make([]map[string]interface{}, 0, len(v)) + for _, mt := range v { + accelerators := make([]map[string]interface{}, len(mt.Accelerators)) + for i, a := range mt.Accelerators { + accelerators[i] = map[string]interface{}{ + "guest_accelerator_type": a.GuestAcceleratorType, + "guest_accelerator_count": a.GuestAcceleratorCount, + } + } + {{- if ne $.TargetVersionName "ga" }} + var localSSDs []map[string]interface{} + if bls := mt.BundledLocalSsds; bls != nil { + localSSDs = []map[string]interface{}{ + { + "default_interface": bls.DefaultInterface, + "partition_count": bls.PartitionCount, + }, + } + } + {{- end }} + machineType := map[string]interface{}{ + "name": mt.Name, + "guest_cpus": mt.GuestCpus, + "memory_mb": mt.MemoryMb, + {{- if ne $.TargetVersionName "ga" }} + "bundled_local_ssds": localSSDs, + {{- end }} + "maximum_persistent_disks": mt.MaximumPersistentDisks, + "maximum_persistent_disks_size_gb": mt.MaximumPersistentDisksSizeGb, + "description": mt.Description, + "is_shared_cpus": mt.IsSharedCpu, + "accelerators": accelerators, + "self_link": mt.SelfLink, + } + if dep := mt.Deprecated; dep != nil { + d := map[string]interface{}{ + "replacement": dep.Replacement, + "state": dep.State, + } + machineType["deprecated"] = []map[string]interface{}{d} + } + machineTypes = append(machineTypes, machineType) + } + + return machineTypes +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_node_types.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_node_types.go.tmpl new file mode 100644 index 000000000000..3c187d11e13f --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_node_types.go.tmpl @@ -0,0 +1,87 @@ +package compute + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeNodeTypes() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeNodeTypesRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceGoogleComputeNodeTypesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return fmt.Errorf("Please specify zone to get appropriate node types for zone. Unable to get zone: %s", err) + } + + resp, err := config.NewComputeClient(userAgent).NodeTypes.List(project, zone).Do() + if err != nil { + return err + } + nodeTypes := flattenComputeNodeTypes(resp.Items) + log.Printf("[DEBUG] Received Google Compute Regions: %q", nodeTypes) + + if err := d.Set("names", nodeTypes); err != nil { + return fmt.Errorf("Error setting names: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s", project, zone)) + + return nil +} + +func flattenComputeNodeTypes(nodeTypes []*compute.NodeType) []string { + result := make([]string, len(nodeTypes)) + for i, nodeType := range nodeTypes { + result[i] = nodeType.Name + } + sort.Strings(result) + return result +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group.go.tmpl new file mode 100644 index 000000000000..6ce51173ae8e --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group.go.tmpl @@ -0,0 +1,169 @@ +package compute + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "google.golang.org/api/googleapi" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeRegionInstanceGroup() *schema.Resource { + return &schema.Resource{ + Read: dataSourceComputeRegionInstanceGroupRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "instances": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance": { + Type: schema.TypeString, + Required: true, + }, + + "status": { + Type: schema.TypeString, + Required: true, + }, + + "named_ports": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "port": { + Type: schema.TypeInt, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + }, + + "size": { + Type: schema.TypeInt, + Computed: true, + }, + }, + } +} + +func dataSourceComputeRegionInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, region, name, err := tpgresource.GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + id := fmt.Sprintf("projects/%s/regions/%s/instanceGroups/%s", project, region, name) + instanceGroup, err := config.NewComputeClient(userAgent).RegionInstanceGroups.Get( + project, region, name).Do() + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("Region Instance Group %q", name), id) + } + + members, err := config.NewComputeClient(userAgent).RegionInstanceGroups.ListInstances( + project, region, name, &compute.RegionInstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't have any instances, which is okay. + if err := d.Set("instances", nil); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + } else { + return fmt.Errorf("Error reading RegionInstanceGroup Members: %s", err) + } + } else { + if err := d.Set("instances", flattenInstancesWithNamedPorts(members.Items)); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + } + d.SetId(id) + if err := d.Set("self_link", instanceGroup.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + return nil +} + +func flattenInstancesWithNamedPorts(insts []*compute.InstanceWithNamedPorts) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(insts)) + log.Printf("There were %d instances.\n", len(insts)) + for _, inst := range insts { + instMap := make(map[string]interface{}) + instMap["instance"] = inst.Instance + instMap["named_ports"] = flattenNamedPorts(inst.NamedPorts) + instMap["status"] = inst.Status + result = append(result, instMap) + } + return result +} + +func flattenNamedPorts(namedPorts []*compute.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group_test.go b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group_test.go new file mode 100644 index 000000000000..7296cd4e9eb9 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_group_test.go @@ -0,0 +1,76 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceRegionInstanceGroup(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + name := "tf-test-" + acctest.RandString(t, 6) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceRegionInstanceGroup_basic(fmt.Sprintf("tf-test-rigm--%d", acctest.RandInt(t)), name), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_compute_region_instance_group.data_source", "name", name), + resource.TestCheckResourceAttr("data.google_compute_region_instance_group.data_source", "project", envvar.GetTestProjectFromEnv()), + resource.TestCheckResourceAttr("data.google_compute_region_instance_group.data_source", "instances.#", "1")), + }, + }, + }) +} + +func testAccDataSourceRegionInstanceGroup_basic(rigmName, instanceManagerName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foo" { + name = "%s" +} + +data "google_compute_image" "debian" { + project = "debian-cloud" + name = "debian-11-bullseye-v20220719" +} + +resource "google_compute_instance_template" "foo" { + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.debian.self_link + } + network_interface { + access_config { + } + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "foo" { + name = "%s" + base_instance_name = "foo" + version { + instance_template = google_compute_instance_template.foo.self_link + name = "primary" + } + region = "us-central1" + target_pools = [google_compute_target_pool.foo.self_link] + target_size = 1 + + named_port { + name = "web" + port = 80 + } + wait_for_instances = true +} + +data "google_compute_region_instance_group" "data_source" { + self_link = google_compute_region_instance_group_manager.foo.instance_group +} +`, rigmName, instanceManagerName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template.go.tmpl new file mode 100644 index 000000000000..dcec743c7eef --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template.go.tmpl @@ -0,0 +1,130 @@ +package compute + +import ( + "encoding/json" + "fmt" + "sort" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeRegionInstanceTemplate() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRegionInstanceTemplate().Schema) + + dsSchema["filter"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + dsSchema["most_recent"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "region", "project") + + dsSchema["name"].ExactlyOneOf = []string{"name", "filter"} + dsSchema["filter"].ExactlyOneOf = []string{"name", "filter"} + + return &schema.Resource{ + Read: datasourceComputeRegionInstanceTemplateRead, + Schema: dsSchema, + } +} + +func datasourceComputeRegionInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + if v, ok := d.GetOk("name"); ok { + return retrieveInstances(d, meta, project, region, v.(string)) + } + if v, ok := d.GetOk("filter"); ok { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + params := map[string]string{ + "filter": v.(string), + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates") + if err != nil { + return err + } + + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + templates, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return fmt.Errorf("error retrieving list of region instance templates: %s", err) + } + + instanceTemplates := templates["items"] + + instanceTemplatesList, err := json.Marshal(instanceTemplates) + if err != nil { + fmt.Println(err) + return err + } + + var items []*compute.InstanceTemplate + + if err := json.Unmarshal(instanceTemplatesList, &items); err != nil { + fmt.Println(err) + return err + } + + mostRecent := d.Get("most_recent").(bool) + if mostRecent { + sort.Sort(ByCreationTimestamp(items)) + } + + count := len(items) + if count == 1 || count > 1 && mostRecent { + return retrieveInstances(d, meta, project, region, items[0].Name) + } + + return fmt.Errorf("your filter has returned %d region instance template(s). Please refine your filter or set most_recent to return exactly one region instance template", len(items)) + } + + return fmt.Errorf("one of name or filters must be set") +} + +func retrieveInstances(d *schema.ResourceData, meta interface{}, project, region, name string) error { + d.SetId("projects/" + project + "/regions/" + region + "/instanceTemplates/" + name) + + if err := resourceComputeRegionInstanceTemplateRead(d, meta); err != nil { + return err + } + return tpgresource.SetDataSourceLabels(d) +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template_test.go.tmpl new file mode 100644 index 000000000000..2d757b86ed70 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_region_instance_template_test.go.tmpl @@ -0,0 +1,232 @@ + +package compute_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccRegionInstanceTemplateDatasource_name(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceTemplate_name(envvar.GetTestProjectFromEnv(), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + "data.google_compute_region_instance_template.default", + "google_compute_region_instance_template.default", + map[string]struct{}{}, + ), + ), + }, + }, + }) +} + +func TestAccRegionInstanceTemplateDatasource_filter(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceTemplate_filter(envvar.GetTestProjectFromEnv(), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + "data.google_compute_region_instance_template.default", + "google_compute_region_instance_template.c", + map[string]struct{}{}, + ), + ), + }, + }, + }) +} + +func TestAccRegionInstanceTemplateDatasource_filter_mostRecent(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceTemplate_filter_mostRecent(envvar.GetTestProjectFromEnv(), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + "data.google_compute_region_instance_template.default", + "google_compute_region_instance_template.c", + map[string]struct{}{}, + ), + ), + }, + }, + }) +} + +func testAccRegionInstanceTemplate_name(project, suffix string) string { + return acctest.Nprintf(` +resource "google_compute_region_instance_template" "default" { + name = "tf-test-template-%{suffix}" + description = "Example template." + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + labels = { + my-label = "my-label-value" + } +} +data "google_compute_region_instance_template" "default" { + project = "%{project}" + region = "us-central1" + name = google_compute_region_instance_template.default.name +} +`, map[string]interface{}{"project": project, "suffix": suffix}) +} + +func testAccRegionInstanceTemplate_filter(project, suffix string) string { + return acctest.Nprintf(` +resource "google_compute_region_instance_template" "a" { + name = "tf-test-template-a-%{suffix}" + description = "Example template." + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar", "a"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} +resource "google_compute_region_instance_template" "b" { + name = "tf-test-template-b-%{suffix}" + description = "Example template." + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar", "b"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} +resource "google_compute_region_instance_template" "c" { + name = "tf-test-template-c-%{suffix}" + description = "Example template." + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar", "c"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} +data "google_compute_region_instance_template" "default" { + project = "%{project}" + region = "us-central1" + filter = "name = tf-test-template-c-%{suffix}" + depends_on = [ + google_compute_region_instance_template.a, + google_compute_region_instance_template.b, + google_compute_region_instance_template.c, + ] +} +`, map[string]interface{}{"project": project, "suffix": suffix}) +} + +func testAccRegionInstanceTemplate_filter_mostRecent(project, suffix string) string { + return acctest.Nprintf(` +resource "google_compute_region_instance_template" "a" { + name = "tf-test-template-%{suffix}-a" + description = "tf-test-instance-template" + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar", "a"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} +resource "google_compute_region_instance_template" "b" { + name = "tf-test-template-%{suffix}-b" + description = "tf-test-instance-template" + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar", "b"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + depends_on = [ + google_compute_region_instance_template.a, + google_compute_region_instance_template.c, + ] +} +resource "google_compute_region_instance_template" "c" { + name = "tf-test-template-%{suffix}-c" + description = "tf-test-instance-template" + region = "us-central1" + machine_type = "e2-small" + tags = ["foo", "bar", "c"] + disk { + source_image = "cos-cloud/cos-stable" + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + depends_on = [ + google_compute_region_instance_template.a, + ] +} +data "google_compute_region_instance_template" "default" { + region = "us-central1" + filter = "(name != tf-test-template-%{suffix}-b) (description = tf-test-instance-template)" + most_recent = true + depends_on = [ + google_compute_region_instance_template.a, + google_compute_region_instance_template.b, + google_compute_region_instance_template.c, + ] +} +`, map[string]interface{}{"project": project, "suffix": suffix}) +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_regions.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_regions.go.tmpl new file mode 100644 index 000000000000..f3d3da79c4b5 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_regions.go.tmpl @@ -0,0 +1,87 @@ +package compute + +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeRegions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeRegionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"UP", "DOWN"}, false), + }, + }, + } +} + +func dataSourceGoogleComputeRegionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + filter := "" + if s, ok := d.GetOk("status"); ok { + filter = fmt.Sprintf(" (status eq %s)", s) + } + + call := config.NewComputeClient(userAgent).Regions.List(project).Filter(filter) + + resp, err := call.Do() + if err != nil { + return err + } + + regions := flattenRegions(resp.Items) + log.Printf("[DEBUG] Received Google Compute Regions: %q", regions) + + if err := d.Set("names", regions); err != nil { + return fmt.Errorf("Error setting names: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s", project)) + + return nil +} + +func flattenRegions(regions []*compute.Region) []string { + result := make([]string, len(regions)) + for i, region := range regions { + result[i] = region.Name + } + sort.Strings(result) + return result +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_resource_policy.go b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_resource_policy.go new file mode 100644 index 000000000000..c86f5215b5a4 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_resource_policy.go @@ -0,0 +1,51 @@ +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleComputeResourcePolicy() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeResourcePolicy().Schema) + + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "region") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleComputeResourcePolicyRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeResourcePolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + name := d.Get("name").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + id := fmt.Sprintf("projects/%s/regions/%s/resourcePolicies/%s", project, region, name) + d.SetId(id) + + err = resourceComputeResourcePolicyRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_router_status.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_router_status.go.tmpl new file mode 100644 index 000000000000..3a240784d930 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_router_status.go.tmpl @@ -0,0 +1,135 @@ +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeRouterStatus() *schema.Resource { + routeElemSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeRoute().Schema) + + return &schema.Resource{ + Read: dataSourceComputeRouterStatusRead, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Description: "Name of the router to query.", + Required: true, + Computed: false, + }, + "project": { + Type: schema.TypeString, + Description: "Project ID of the target router.", + Optional: true, + Computed: false, + }, + "region": { + Type: schema.TypeString, + Description: "Region of the target router.", + Optional: true, + Computed: true, + }, + "network": { + Type: schema.TypeString, + Description: "URI of the network to which this router belongs.", + Computed: true, + }, + "best_routes": { + Type: schema.TypeList, + Description: "Best routes for this router's network.", + Elem: &schema.Resource{ + Schema: routeElemSchema, + }, + Computed: true, + }, + "best_routes_for_router": { + Type: schema.TypeList, + Description: "Best routes learned by this router.", + Elem: &schema.Resource{ + Schema: routeElemSchema, + }, + Computed: true, + }, + }, + } +} + +func dataSourceComputeRouterStatusRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + var name string + if n, ok := d.GetOk("name"); ok { + name = n.(string) + } + + resp, err := config.NewComputeClient(userAgent).Routers.GetRouterStatus(project, region, name).Do() + if err != nil { + return err + } + + status := resp.Result + + if err := d.Set("network", status.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + + if err := d.Set("best_routes", flattenRoutes(status.BestRoutes)); err != nil { + return fmt.Errorf("Error setting best_routes: %s", err) + } + + if err := d.Set("best_routes_for_router", flattenRoutes(status.BestRoutesForRouter)); err != nil { + return fmt.Errorf("Error setting best_routes_for_router: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return nil +} + +func flattenRoutes(routes []*compute.Route) []map[string]interface{} { + results := make([]map[string]interface{}, len(routes)) + + for i, route := range routes { + results[i] = map[string]interface{}{ + "dest_range": route.DestRange, + "name": route.Name, + "network": route.Network, + "description": route.Description, + "next_hop_gateway": route.NextHopGateway, + "next_hop_ilb": route.NextHopIlb, + "next_hop_ip": route.NextHopIp, + "next_hop_vpn_tunnel": route.NextHopVpnTunnel, + "priority": route.Priority, + "tags": route.Tags, + "next_hop_network": route.NextHopNetwork, + } + } + + return results +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_snapshot.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_snapshot.go.tmpl new file mode 100644 index 000000000000..ddba217cbe95 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_snapshot.go.tmpl @@ -0,0 +1,123 @@ +package compute + +import ( + "fmt" + "sort" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeSnapshot() *schema.Resource { + + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceComputeSnapshot().Schema) + + dsSchema["filter"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + } + dsSchema["most_recent"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + } + + // Set 'Optional' schema elements + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name", "filter", "most_recent", "project") + + dsSchema["name"].ExactlyOneOf = []string{"name", "filter"} + dsSchema["filter"].ExactlyOneOf = []string{"name", "filter"} + + return &schema.Resource{ + Read: dataSourceGoogleComputeSnapshotRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleComputeSnapshotRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + if v, ok := d.GetOk("name"); ok { + return retrieveSnapshot(d, meta, project, v.(string)) + } + + if v, ok := d.GetOk("filter"); ok { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectGetCall := config.NewResourceManagerClient(userAgent).Projects.Get(project) + + if config.UserProjectOverride { + billingProject := project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + projectGetCall.Header().Add("X-Goog-User-Project", billingProject) + } + + //handling the pagination locally + allSnapshots := make([]*compute.Snapshot,0) + token := "" + for paginate := true; paginate; { + snapshots, err := config.NewComputeClient(userAgent).Snapshots.List(project).Filter(v.(string)).PageToken(token).Do() + if err != nil { + return fmt.Errorf("error retrieving list of snapshots: %s", err) + + } + allSnapshots = append(allSnapshots, snapshots.Items...) + + token = snapshots.NextPageToken + paginate = token != "" + } + + mostRecent := d.Get("most_recent").(bool) + if mostRecent { + sort.Sort(ByCreationTimestampOfSnapshot(allSnapshots)) + } + + count := len(allSnapshots) + if count == 1 || count > 1 && mostRecent { + return retrieveSnapshot(d, meta, project, allSnapshots[0].Name) + } + + return fmt.Errorf("your filter has returned %d snapshot(s). Please refine your filter or set most_recent to return exactly one snapshot", len(allSnapshots)) + + } + + return fmt.Errorf("one of name or filter must be set") +} + +func retrieveSnapshot(d *schema.ResourceData, meta interface{}, project, name string) error { + d.SetId("projects/" + project + "/global/snapshots/" + name) + d.Set("name", name) + if err := resourceComputeSnapshotRead(d, meta); err != nil { + return err + } + return tpgresource.SetDataSourceLabels(d) +} + +// ByCreationTimestamp implements sort.Interface for []*Snapshot based on +// the CreationTimestamp field. +type ByCreationTimestampOfSnapshot []*compute.Snapshot + +func (a ByCreationTimestampOfSnapshot) Len() int { return len(a) } +func (a ByCreationTimestampOfSnapshot) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByCreationTimestampOfSnapshot) Less(i, j int) bool { + return a[i].CreationTimestamp > a[j].CreationTimestamp +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_subnetwork.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_subnetwork.go.tmpl new file mode 100644 index 000000000000..4315751f0b08 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_subnetwork.go.tmpl @@ -0,0 +1,152 @@ +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeSubnetwork() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeSubnetworkRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + }, + "self_link": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "description": { + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": { + Type: schema.TypeString, + Computed: true, + }, + "internal_ipv6_prefix": { + Type: schema.TypeString, + Computed: true, + }, + "private_ip_google_access": { + Type: schema.TypeBool, + Computed: true, + }, + "secondary_ip_range": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range_name": { + Type: schema.TypeString, + Computed: true, + }, + "ip_cidr_range": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "network": { + Type: schema.TypeString, + Computed: true, + }, + "gateway_address": { + Type: schema.TypeString, + Computed: true, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func dataSourceGoogleComputeSubnetworkRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, region, name, err := tpgresource.GetRegionalResourcePropertiesFromSelfLinkOrSchema(d, config) + if err != nil { + return err + } + id := fmt.Sprintf("projects/%s/regions/%s/subnetworks/%s", project, region, name) + + subnetwork, err := config.NewComputeClient(userAgent).Subnetworks.Get(project, region, name).Do() + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("Subnetwork Not Found : %s", name), id) + } + + if err := d.Set("ip_cidr_range", subnetwork.IpCidrRange); err != nil { + return fmt.Errorf("Error setting ip_cidr_range: %s", err) + } + if err := d.Set("internal_ipv6_prefix", subnetwork.InternalIpv6Prefix); err != nil { + return fmt.Errorf("Error setting internal_ipv6_prefix: %s", err) + } + if err := d.Set("private_ip_google_access", subnetwork.PrivateIpGoogleAccess); err != nil { + return fmt.Errorf("Error setting private_ip_google_access: %s", err) + } + if err := d.Set("self_link", subnetwork.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("description", subnetwork.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("gateway_address", subnetwork.GatewayAddress); err != nil { + return fmt.Errorf("Error setting gateway_address: %s", err) + } + if err := d.Set("network", subnetwork.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("secondary_ip_range", flattenSecondaryRanges(subnetwork.SecondaryIpRanges)); err != nil { + return fmt.Errorf("Error setting secondary_ip_range: %s", err) + } + + d.SetId(id) + return nil +} + +func flattenSecondaryRanges(secondaryRanges []*compute.SubnetworkSecondaryRange) []map[string]interface{} { + secondaryRangesSchema := make([]map[string]interface{}, 0, len(secondaryRanges)) + for _, secondaryRange := range secondaryRanges { + data := map[string]interface{}{ + "range_name": secondaryRange.RangeName, + "ip_cidr_range": secondaryRange.IpCidrRange, + } + + secondaryRangesSchema = append(secondaryRangesSchema, data) + } + return secondaryRangesSchema +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_vpn_gateway.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_vpn_gateway.go.tmpl new file mode 100644 index 000000000000..f103cdfa4d5e --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_vpn_gateway.go.tmpl @@ -0,0 +1,99 @@ +package compute + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeVpnGateway() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeVpnGatewayRead, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "description": { + Type: schema.TypeString, + Computed: true, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + }, + + "network": { + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceGoogleComputeVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + id := fmt.Sprintf("projects/%s/regions/%s/targetVpnGateways/%s", project, region, name) + + vpnGatewaysService := compute.NewTargetVpnGatewaysService(config.NewComputeClient(userAgent)) + + gateway, err := vpnGatewaysService.Get(project, region, name).Do() + if err != nil { + return transport_tpg.HandleDataSourceNotFoundError(err, d, fmt.Sprintf("VPN Gateway Not Found : %s", name), id) + } + if err := d.Set("network", tpgresource.ConvertSelfLinkToV1(gateway.Network)); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("region", gateway.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("self_link", gateway.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("description", gateway.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(id) + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_zones.go.tmpl b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_zones.go.tmpl new file mode 100644 index 000000000000..776a8a44fd7b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/data_source_google_compute_zones.go.tmpl @@ -0,0 +1,101 @@ +package compute + +import ( + "fmt" + "log" + "sort" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func DataSourceGoogleComputeZones() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleComputeZonesRead, + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "names": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"UP", "DOWN"}, false), + }, + }, + } +} + +func dataSourceGoogleComputeZonesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region := config.Region + if r, ok := d.GetOk("region"); ok { + region = r.(string) + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + filter := "" + if s, ok := d.GetOk("status"); ok { + filter += fmt.Sprintf(" (status eq %s)", s) + } + + zones := []string{} + err = config.NewComputeClient(userAgent).Zones.List(project).Filter(filter).Pages(config.Context, func(zl *compute.ZoneList) error { + for _, zone := range zl.Items { + // We have no way to guarantee a specific base path for the region, but the built-in API-level filtering + // only lets us query on exact matches, so we do our own filtering here. + if strings.HasSuffix(zone.Region, "/"+region) { + zones = append(zones, zone.Name) + } + } + return nil + }) + + if err != nil { + return err + } + + sort.Strings(zones) + log.Printf("[DEBUG] Received Google Compute Zones: %q", zones) + + if err := d.Set("names", zones); err != nil { + return fmt.Errorf("Error setting names: %s", err) + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/regions/%s", project, region)) + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/metadata.go.tmpl b/mmv1/third_party/terraform/services/compute/go/metadata.go.tmpl new file mode 100644 index 000000000000..813ebd486a4b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/metadata.go.tmpl @@ -0,0 +1,231 @@ +package compute + +import ( + {{- if ne $.TargetVersionName "ga" }} + "encoding/json" + {{- end }} + "errors" + {{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + "reflect" + {{- end }} + "sort" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Since the google compute API uses optimistic locking, there is a chance +// we need to resubmit our updated metadata. To do this, you need to provide +// an update function that attempts to submit your metadata +func MetadataRetryWrapper(update func() error) error { + return transport_tpg.MetadataRetryWrapper(update) +} + +// Update the metadata (serverMD) according to the provided diff (oldMDMap v +// newMDMap). +func MetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range serverMD.Items { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oldMDMap[kv.Key] + _, okNew := newMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = *kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range newMDMap { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + serverMD.Items = nil + for key, val := range curMDMap { + v := val + serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } +} + +// Update the beta metadata (serverMD) according to the provided diff (oldMDMap v +// newMDMap). +func BetaMetadataUpdate(oldMDMap map[string]interface{}, newMDMap map[string]interface{}, serverMD *compute.Metadata) { + curMDMap := make(map[string]string) + // Load metadata on server into map + for _, kv := range serverMD.Items { + // If the server state has a key that we had in our old + // state, but not in our new state, we should delete it + _, okOld := oldMDMap[kv.Key] + _, okNew := newMDMap[kv.Key] + if okOld && !okNew { + continue + } else { + curMDMap[kv.Key] = *kv.Value + } + } + + // Insert new metadata into existing metadata (overwriting when needed) + for key, val := range newMDMap { + curMDMap[key] = val.(string) + } + + // Reformat old metadata into a list + serverMD.Items = nil + for key, val := range curMDMap { + v := val + serverMD.Items = append(serverMD.Items, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } +} + +func expandComputeMetadata(m map[string]interface{}) []*compute.MetadataItems { + metadata := make([]*compute.MetadataItems, len(m)) + var keys []string + for key := range m { + keys = append(keys, key) + } + sort.Strings(keys) + // Append new metadata to existing metadata + for _, key := range keys { + v := m[key].(string) + metadata = append(metadata, &compute.MetadataItems{ + Key: key, + Value: &v, + }) + } + + return metadata +} + +func flattenMetadataBeta(metadata *compute.Metadata) map[string]string { + metadataMap := make(map[string]string) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + +// This function differs from flattenMetadataBeta only in that it takes +// compute.metadata rather than compute.metadata as an argument. It should +// be removed in favour of flattenMetadataBeta if/when all resources using it get +// beta support. +func FlattenMetadata(metadata *compute.Metadata) map[string]interface{} { + metadataMap := make(map[string]interface{}) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + +func resourceInstanceMetadata(d tpgresource.TerraformResourceData) (*compute.Metadata, error) { + m := &compute.Metadata{} + mdMap := d.Get("metadata").(map[string]interface{}) + if v, ok := d.GetOk("metadata_startup_script"); ok && v.(string) != "" { + if w, ok := mdMap["startup-script"]; ok { + // metadata.startup-script could be from metadata_startup_script in the first place + if v != w { + return nil, errors.New("Cannot provide both metadata_startup_script and metadata.startup-script.") + } + } + mdMap["startup-script"] = v + } + if len(mdMap) > 0 { + m.Items = make([]*compute.MetadataItems, 0, len(mdMap)) + var keys []string + for k := range mdMap { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := mdMap[k].(string) + m.Items = append(m.Items, &compute.MetadataItems{ + Key: k, + Value: &v, + }) + } + + // Set the fingerprint. If the metadata has never been set before + // then this will just be blank. + m.Fingerprint = d.Get("metadata_fingerprint").(string) + } + + return m, nil +} +{{- if ne $.TargetVersionName "ga" }} + +func resourceInstancePartnerMetadata(d tpgresource.TerraformResourceData) (map[string]compute.StructuredEntries, error) { + partnerMetadata := make(map[string]compute.StructuredEntries) + partnerMetadataMap := d.Get("partner_metadata").(map[string]interface{}) + if len(partnerMetadataMap) > 0 { + for key, value := range partnerMetadataMap { + var jsonMap map[string]interface{} + err := json.Unmarshal([]byte(value.(string)), &jsonMap) + if err != nil { + return nil, err + } + structuredEntries := jsonMap["entries"].(map[string]interface{}) + structuredEntriesJson, err := json.Marshal(&structuredEntries) + if err != nil { + return nil, err + } + partnerMetadata[key] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(structuredEntriesJson), + } + } + } + return partnerMetadata, nil +} + +func resourceInstancePatchPartnerMetadata(d tpgresource.TerraformResourceData, currentPartnerMetadata map[string]compute.StructuredEntries) map[string]compute.StructuredEntries { + partnerMetadata, _ := resourceInstancePartnerMetadata(d) + for key := range currentPartnerMetadata { + if _, ok := partnerMetadata[key]; !ok { + partnerMetadata[key] = compute.StructuredEntries{} + } + } + return partnerMetadata + +} +func flattenPartnerMetadata(partnerMetadata map[string]compute.StructuredEntries) (map[string]string, error) { + partnerMetadataMap := make(map[string]string) + for key, value := range partnerMetadata { + + jsonString, err := json.Marshal(&value) + if err != nil { + return nil, err + } + if value.Entries != nil { + partnerMetadataMap[key] = string(jsonString) + } + + } + return partnerMetadataMap, nil +} +func ComparePartnerMetadataDiff(_, old, new string, d *schema.ResourceData) bool { + var oldJson map[string]interface{} + var newJson map[string]interface{} + json.Unmarshal([]byte(old), &oldJson) + json.Unmarshal([]byte(new), &newJson) + if reflect.DeepEqual(oldJson, newJson) { + return true + } + return false +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_attached_disk.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_attached_disk.go.tmpl new file mode 100644 index 000000000000..95e2c84ebb84 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_attached_disk.go.tmpl @@ -0,0 +1,285 @@ +package compute + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeAttachedDisk() *schema.Resource { + return &schema.Resource{ + Create: resourceAttachedDiskCreate, + Read: resourceAttachedDiskRead, + Delete: resourceAttachedDiskDelete, + + Importer: &schema.ResourceImporter{ + State: resourceAttachedDiskImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(300 * time.Second), + Delete: schema.DefaultTimeout(300 * time.Second), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + computeAttachedDiskDefaultProviderZone, + ), + + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `name or self_link of the disk that will be attached.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "instance": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `name or self_link of the compute instance that the disk will be attached to. If the self_link is provided then zone and project are extracted from the self link. If only the name is used then zone and project must be defined as properties on the resource or provider.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "project": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + Description: `The project that the referenced compute instance is a part of. If instance is referenced by its self_link the project defined in the link will take precedence.`, + }, + "zone": { + Type: schema.TypeString, + ForceNew: true, + Computed: true, + Optional: true, + Description: `The zone that the referenced compute instance is located within. If instance is referenced by its self_link the zone defined in the link will take precedence.`, + }, + "device_name": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Computed: true, + Description: `Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance. If not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine.`, + }, + "mode": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Default: "READ_WRITE", + Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If not specified, the default is to attach the disk in READ_WRITE mode.`, + ValidateFunc: validation.StringInSlice([]string{"READ_ONLY", "READ_WRITE"}, false), + }, + }, + UseJSONNumber: true, + } +} + +func resourceAttachedDiskCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + + disk := d.Get("disk").(string) + diskName := tpgresource.GetResourceNameFromSelfLink(disk) + diskSrc := fmt.Sprintf("projects/%s/zones/%s/disks/%s", zv.Project, zv.Zone, diskName) + + // Check if the disk is a regional disk + if strings.Contains(disk, "regions") { + rv, err := tpgresource.ParseRegionDiskFieldValue(disk, d, config) + if err != nil { + return err + } + diskSrc = rv.RelativeLink() + } + + attachedDisk := compute.AttachedDisk{ + Source: diskSrc, + Mode: d.Get("mode").(string), + DeviceName: d.Get("device_name").(string), + } + + op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(zv.Project, zv.Zone, zv.Name, &attachedDisk).Do() + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s/%s", zv.Project, zv.Zone, zv.Name, diskName)) + + waitErr := ComputeOperationWaitTime(config, op, zv.Project, + "disk to attach", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + d.SetId("") + return waitErr + } + + return resourceAttachedDiskRead(d, meta) +} + +func resourceAttachedDiskRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + if err := d.Set("project", zv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", zv.Zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + + diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) + + instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("AttachedDisk %q", d.Id())) + } + + // Iterate through the instance's attached disks as this is the only way to + // confirm the disk is actually attached + ad := FindDiskByName(instance.Disks, diskName) + if ad == nil { + log.Printf("[WARN] Referenced disk wasn't found attached to this compute instance. Removing from state.") + d.SetId("") + return nil + } + + if err := d.Set("device_name", ad.DeviceName); err != nil { + return fmt.Errorf("Error setting device_name: %s", err) + } + if err := d.Set("mode", ad.Mode); err != nil { + return fmt.Errorf("Error setting mode: %s", err) + } + + // Force the referenced resources to a self-link in state because it's more specific then name. + instancePath, err := tpgresource.GetRelativePath(instance.SelfLink) + if err != nil { + return err + } + if err := d.Set("instance", instancePath); err != nil { + return fmt.Errorf("Error setting instance: %s", err) + } + diskPath, err := tpgresource.GetRelativePath(ad.Source) + if err != nil { + return err + } + if err := d.Set("disk", diskPath); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } + + return nil +} + +func resourceAttachedDiskDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + zv, err := tpgresource.ParseZonalFieldValue("instances", d.Get("instance").(string), "project", "zone", d, config, false) + if err != nil { + return err + } + + diskName := tpgresource.GetResourceNameFromSelfLink(d.Get("disk").(string)) + + instance, err := config.NewComputeClient(userAgent).Instances.Get(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return err + } + + // Confirm the disk is still attached before making the call to detach it. If the disk isn't listed as an attached + // disk on the compute instance then return as though the delete call succeed since this is the desired state. + ad := FindDiskByName(instance.Disks, diskName) + if ad == nil { + return nil + } + + op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(zv.Project, zv.Zone, zv.Name, ad.DeviceName).Do() + if err != nil { + return err + } + + waitErr := ComputeOperationWaitTime(config, op, zv.Project, + fmt.Sprintf("Detaching disk from %s", zv.Name), userAgent, d.Timeout(schema.TimeoutDelete)) + if waitErr != nil { + return waitErr + } + + return nil +} + +func resourceAttachedDiskImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + err := tpgresource.ParseImportId( + []string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config) + if err != nil { + return nil, err + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/instances/{{"{{"}}instance{{"}}"}}/{{"{{"}}disk{{"}}"}}") + if err != nil { + return nil, err + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func FindDiskByName(disks []*compute.AttachedDisk, id string) *compute.AttachedDisk { + for _, disk := range disks { + if tpgresource.CompareSelfLinkOrResourceName("", disk.Source, id, nil) { + return disk + } + } + + return nil +} + +func computeAttachedDiskDefaultProviderZone(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + if diff.GetRawConfig().GetAttr("instance") == cty.UnknownVal(cty.String) { + return nil + } + config := meta.(*transport_tpg.Config) + zv, err := tpgresource.ParseZonalFieldValueDiff("instances", diff.Get("instance").(string), "project", "zone", diff, config, false) + if err != nil { + return err + } + if err := diff.SetNew("zone", zv.Zone); err != nil { + return fmt.Errorf("Failed to retrieve zone: %s", err) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_autoscaler_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_autoscaler_test.go.tmpl new file mode 100644 index 000000000000..a6c6ecd2c4d2 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_autoscaler_test.go.tmpl @@ -0,0 +1,408 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeAutoscaler_update(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeAutoscaler_basic(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeAutoscaler_update(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + + }, + }) +} + +func TestAccComputeAutoscaler_multicondition(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeAutoscaler_multicondition(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeAutoscaler_scaleDownControl(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeAutoscaler_scaleDownControl(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeAutoscaler_scalingSchedule(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeAutoscaler_scalingSchedule(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeAutoscaler_scaleInControl(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeAutoscaler_scaleInControl(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeAutoscaler_scaleInControlFixed(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeAutoscaler_scaleInControlFixed(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeAutoscaler_scaffolding(itName, tpName, igmName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + target_pools = [google_compute_target_pool.foobar.self_link] + base_instance_name = "foobar" + zone = "us-central1-a" +} +`, itName, tpName, igmName) + +} + +func testAccComputeAutoscaler_basic(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 5 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + } +} +`, autoscalerName) +} + +func testAccComputeAutoscaler_update(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 0 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + } +} +`, autoscalerName) +} + +func testAccComputeAutoscaler_multicondition(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + load_balancing_utilization { + target = 0.5 + } + metric { + name = "compute.googleapis.com/instance/network/received_bytes_count" + target = 75 + type = "GAUGE" + } + metric { + name = "compute.googleapis.com/instance/network/sent_bytes_count" + target = 50 + type = "GAUGE" + } + } +} +`, autoscalerName) +} + +func testAccComputeAutoscaler_scaleDownControl(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + predictive_method = "OPTIMIZE_AVAILABILITY" + } +{{- if ne $.TargetVersionName "ga" }} + scale_down_control { + max_scaled_down_replicas { + percent = 80 + } + time_window_sec = 300 + } +{{- end }} + } +} +`, autoscalerName) +} + +func testAccComputeAutoscaler_scaleInControl(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + scale_in_control { + max_scaled_in_replicas { + percent = 80 + } + time_window_sec = 300 + } + } +} +`, autoscalerName) +} + +func testAccComputeAutoscaler_scaleInControlFixed(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + scale_in_control { + max_scaled_in_replicas { + fixed = 8 + } + time_window_sec = 300 + } + } +} +`, autoscalerName) +} + +func testAccComputeAutoscaler_scalingSchedule(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + zone = "us-central1-a" + target = google_compute_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } +{{- if ne $.TargetVersionName "ga" }} + scale_down_control { + max_scaled_down_replicas { + percent = 80 + } + time_window_sec = 300 + } +{{- end }} + scaling_schedules { + name = "every-weekday-morning" + description = "Increase to 2 every weekday at 7AM for 6 hours." + min_required_replicas = 0 + schedule = "0 7 * * MON-FRI" + time_zone = "America/New_York" + duration_sec = 21600 + } + scaling_schedules { + name = "every-weekday-afternoon" + description = "Increase to 2 every weekday at 7PM for 6 hours." + min_required_replicas = 2 + schedule = "0 19 * * MON-FRI" + time_zone = "America/New_York" + duration_sec = 21600 + } + } +} +`, autoscalerName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_backend_service_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_backend_service_test.go.tmpl new file mode 100644 index 000000000000..74617a1680e4 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_backend_service_test.go.tmpl @@ -0,0 +1,2011 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeBackendService_basic(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_basic(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_basicModified( + serviceName, checkName, extraCheckName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withBackend(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 20), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withBackendAndMaxUtilization(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withBackendAndMaxUtilization( + serviceName, igName, itName, checkName, 10), + PlanOnly: true, + ExpectNonEmptyPlan: true, + }, + { + Config: testAccComputeBackendService_withBackendAndMaxUtilization( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withBackendAndIAP(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withBackendAndIAP( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, + }, + { + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_updatePreservesOptionalParameters(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withSessionAffinity( + serviceName, checkName, "initial-description", "GENERATED_COOKIE"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withSessionAffinity( + serviceName, checkName, "updated-description", "GENERATED_COOKIE"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withConnectionDraining(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withConnectionDraining(serviceName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withConnectionDrainingAndUpdate(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withConnectionDraining(serviceName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_basic(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withHttpsHealthCheck(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withHttpsHealthCheck(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withCdnPolicy(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withCdnPolicy(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withCdnPolicy2(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withCdnPolicyBypassCacheOnRequestHeaders(serviceName, checkName, "Proxy-Authorization"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withCdnPolicyBypassCacheOnRequestHeaders(serviceName, checkName, "Authorization"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withCdnPolicyUseOriginHeaders(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withSecurityPolicy(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + polName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + edgePolName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withSecurityPolicy(serviceName, checkName, polName, edgePolName, "google_compute_security_policy.policy.self_link", "google_compute_security_policy.edgePolicy.self_link"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withSecurityPolicy(serviceName, checkName, polName, edgePolName, "\"\"", "\"\""), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withCDNEnabled(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withCDNEnabled( + serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withSessionAffinity(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withSessionAffinity( + serviceName, checkName, "description", "CLIENT_IP"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withSessionAffinity( + serviceName, checkName, "description", "GENERATED_COOKIE"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withAffinityCookieTtlSec(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withAffinityCookieTtlSec( + serviceName, checkName, "description", "GENERATED_COOKIE", 300), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withMaxConnections(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withMaxConnections( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withMaxConnections( + serviceName, igName, itName, checkName, 20), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withMaxConnectionsPerInstance(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withMaxConnectionsPerInstance( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withMaxConnectionsPerInstance( + serviceName, igName, itName, checkName, 20), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withMaxRatePerEndpoint(t *testing.T) { + t.Parallel() + + randSuffix := acctest.RandString(t, 10) + service := fmt.Sprintf("tf-test-%s", randSuffix) + instance := fmt.Sprintf("tf-test-%s", randSuffix) + neg := fmt.Sprintf("tf-test-%s", randSuffix) + network := fmt.Sprintf("tf-test-%s", randSuffix) + check := fmt.Sprintf("tf-test-%s", randSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withMaxRatePerEndpoint( + service, instance, neg, network, check, 0.2), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withMaxRatePerEndpoint( + service, instance, neg, network, check, 0.4), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withMaxConnectionsPerEndpoint(t *testing.T) { + t.Parallel() + + randSuffix := acctest.RandString(t, 10) + service := fmt.Sprintf("tf-test-%s", randSuffix) + instance := fmt.Sprintf("tf-test-%s", randSuffix) + neg := fmt.Sprintf("tf-test-%s", randSuffix) + network := fmt.Sprintf("tf-test-%s", randSuffix) + check := fmt.Sprintf("tf-test-%s", randSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withMaxConnectionsPerEndpoint( + service, instance, neg, network, check, 5), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withMaxConnectionsPerEndpoint( + service, instance, neg, network, check, 10), + }, + { + ResourceName: "google_compute_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withCustomHeaders(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withCustomHeaders(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_basic(serviceName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_internalLoadBalancing(t *testing.T) { + // Instance template uses UniqueId in some cases + acctest.SkipIfVcr(t) + t.Parallel() + + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + proxy := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_internalLoadBalancing(fr, proxy, backend, hc, urlmap), + }, + { + ResourceName: "google_compute_backend_service.backend_service", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withLogConfig(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withLogConfig(serviceName, checkName, 0.7, true), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withLogConfig(serviceName, checkName, 0.4, true), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withLogConfig(serviceName, checkName, 0.4, false), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withLogConfig2(serviceName, checkName, true), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withLogConfig2(serviceName, checkName, false), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withLogConfig(serviceName, checkName, 0.7, false), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_trafficDirectorUpdateBasic(t *testing.T) { + t.Parallel() + + backendName := fmt.Sprintf("foo-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("bar-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_trafficDirectorBasic(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_trafficDirectorUpdateBasic(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_withCompressionMode(t *testing.T) { + t.Parallel() + + backendName := fmt.Sprintf("foo-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("bar-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_withCompressionMode(backendName, checkName, "DISABLED"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_withCompressionMode(backendName, checkName, "AUTOMATIC"), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_basic(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeBackendService_trafficDirectorUpdateLbPolicies(t *testing.T) { + t.Parallel() + + backendName := fmt.Sprintf("foo-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("bar-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_trafficDirectorLbPolicies(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_trafficDirectorUpdateLbPolicies(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeBackendService_trafficDirectorUpdateFull(t *testing.T) { + t.Parallel() + + backendName := fmt.Sprintf("foo-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("bar-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_trafficDirectorFull(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeBackendService_trafficDirectorUpdateFull(backendName, checkName), + }, + { + ResourceName: "google_compute_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeBackendService_regionNegBackend(t *testing.T) { + t.Parallel() + + suffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeBackendService_regionNegBackend(suffix), + }, + { + ResourceName: "google_compute_backend_service.backend", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccComputeBackendService_trafficDirectorBasic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + locality_lb_policy = "RING_HASH" + circuit_breakers { + max_connections = 10 + } + consistent_hash { + http_cookie { + ttl { + seconds = 11 + nanos = 1234 + } + name = "mycookie" + } + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_trafficDirectorUpdateBasic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + locality_lb_policy = "RANDOM" + circuit_breakers { + max_connections = 10 + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_trafficDirectorLbPolicies(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + locality_lb_policies { + custom_policy { + name = "myorg.CustomPolicy" + data = "{\"foo\": \"bar\"}" + } + } + locality_lb_policies { + policy { + name = "ROUND_ROBIN" + } + } + circuit_breakers { + max_connections = 10 + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_trafficDirectorUpdateLbPolicies(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + locality_lb_policies { + custom_policy { + name = "myorg.AnotherCustomPolicy" + data = "{\"foo\": \"bar\"}" + } + } + locality_lb_policies { + custom_policy { + name = "myorg.CustomPolicy" + data = "{\"foo\": \"bar\"}" + } + } + locality_lb_policies { + policy { + name = "ROUND_ROBIN" + } + } + circuit_breakers { + max_connections = 10 + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeBackendService_trafficDirectorFull(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + locality_lb_policy = "MAGLEV" + circuit_breakers { + max_connections = 10 + } + consistent_hash { + http_cookie { + ttl { + seconds = 11 + nanos = 1234 + } + name = "mycookie" + } + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeBackendService_trafficDirectorUpdateFull(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + locality_lb_policy = "MAGLEV" + circuit_breakers { + connect_timeout { + seconds = 3 + nanos = 4 + } + max_connections = 11 + max_requests_per_connection = 12 + max_pending_requests = 13 + max_requests = 14 + max_retries = 15 + } + consistent_hash { + http_cookie { + ttl { + seconds = 12 + } + name = "mycookie2" + path = "mycookie2/path" + } + minimum_ring_size = 16 + } + outlier_detection { + base_ejection_time { + seconds = 0 + nanos = 5 + } + consecutive_errors = 1 + consecutive_gateway_failure = 3 + enforcing_consecutive_errors = 4 + enforcing_consecutive_gateway_failure = 5 + enforcing_success_rate = 6 + interval { + seconds = 7 + } + max_ejection_percent = 99 + success_rate_minimum_hosts = 98 + success_rate_request_volume = 97 + success_rate_stdev_factor = 1800 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} +{{- end }} + +func testAccComputeBackendService_basic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_withCDNEnabled(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + enable_cdn = true +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.one.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_http_health_check" "one" { + name = "%s" + request_path = "/one" + check_interval_sec = 30 + timeout_sec = 30 +} +`, serviceName, checkOne, checkTwo) +} + +func testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "HTTP" + timeout_sec = %v + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + } + + health_checks = [google_compute_http_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_http_health_check" "default" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, timeout, igName, itName, checkName) +} + +func testAccComputeBackendService_withBackendAndMaxUtilization( + serviceName, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "HTTP" + timeout_sec = %v + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + max_utilization = 1.0 + } + + health_checks = [google_compute_http_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_http_health_check" "default" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, timeout, igName, itName, checkName) +} + +func testAccComputeBackendService_withBackendAndIAP( + serviceName, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "HTTP" + timeout_sec = %v + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + } + + iap { + oauth2_client_id = "test" + oauth2_client_secret = "test" + } + + health_checks = [google_compute_http_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_http_health_check" "default" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, timeout, igName, itName, checkName) +} + +func testAccComputeBackendService_withSessionAffinity(serviceName, checkName, description, affinityName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + description = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + session_affinity = "%s" +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, description, affinityName, checkName) +} + +func testAccComputeBackendService_withAffinityCookieTtlSec(serviceName, checkName, description, affinityName string, affinityCookieTtlSec int64) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + description = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + session_affinity = "%s" + affinity_cookie_ttl_sec = %v +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, description, affinityName, affinityCookieTtlSec, checkName) +} + +func testAccComputeBackendService_withConnectionDraining(serviceName, checkName string, drainingTimeout int64) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + connection_draining_timeout_sec = %v +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, drainingTimeout, checkName) +} + +func testAccComputeBackendService_withHttpsHealthCheck(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_https_health_check.zero.self_link] + protocol = "HTTPS" +} + +resource "google_compute_https_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_withCdnPolicy(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + cdn_policy { + negative_caching = false + serve_while_stale = 0 + cache_key_policy { + include_protocol = true + include_host = true + include_query_string = true + query_string_whitelist = ["foo", "bar"] + } + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_withCdnPolicy2(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + cdn_policy { + cache_key_policy { + include_protocol = true + include_host = true + include_query_string = true + query_string_whitelist = ["foo", "bar"] + } + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_withCdnPolicyBypassCacheOnRequestHeaders(serviceName, checkName, headerName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + cdn_policy { + negative_caching = false + serve_while_stale = 0 + cache_key_policy { + include_protocol = true + include_host = true + include_query_string = true + query_string_whitelist = ["foo", "bar"] + } + bypass_cache_on_request_headers { + header_name = "%s" + } + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, headerName, checkName) +} + +func testAccComputeBackendService_withCdnPolicyUseOriginHeaders(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + cdn_policy { + cache_mode = "USE_ORIGIN_HEADERS" + cache_key_policy { + include_protocol = true + include_host = true + include_query_string = true + } + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +func testAccComputeBackendService_withSecurityPolicy(serviceName, checkName, polName, edgePolName, polLink string, edgePolLink string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + security_policy = %s + edge_security_policy = %s +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic security policy" +} + +resource "google_compute_security_policy" "edgePolicy" { + name = "%s" + description = "edge security policy" + type = "CLOUD_ARMOR_EDGE" +} +`, serviceName, polLink, edgePolLink, checkName, polName, edgePolName) +} + +func testAccComputeBackendService_withMaxConnections( + serviceName, igName, itName, checkName string, maxConnections int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "TCP" + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + max_connections = %v + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_health_check" "default" { + name = "%s" + tcp_health_check { + port = "110" + } +} +`, serviceName, maxConnections, igName, itName, checkName) +} + +func testAccComputeBackendService_withMaxConnectionsPerInstance( + serviceName, igName, itName, checkName string, maxConnectionsPerInstance int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "TCP" + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + max_connections_per_instance = %v + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_health_check" "default" { + name = "%s" + tcp_health_check { + port = "110" + } +} +`, serviceName, maxConnectionsPerInstance, igName, itName, checkName) +} + + +func testAccComputeBackendService_withMaxConnectionsPerEndpoint( + service, instance, neg, network, check string, maxConnections int64) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "TCP" + + backend { + group = google_compute_network_endpoint_group.lb-neg.self_link + balancing_mode = "CONNECTION" + max_connections_per_endpoint = %v + } + + health_checks = [google_compute_health_check.default.self_link] +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "endpoint-instance" { + name = "%s" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.self_link + access_config { + network_tier = "PREMIUM" + } + } +} + +resource "google_compute_network_endpoint_group" "lb-neg" { + name = "%s" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link + default_port = "90" + zone = "us-central1-a" +} + +resource "google_compute_network_endpoint" "lb-endpoint" { + network_endpoint_group = google_compute_network_endpoint_group.lb-neg.name + + instance = google_compute_instance.endpoint-instance.name + port = google_compute_network_endpoint_group.lb-neg.default_port + ip_address = google_compute_instance.endpoint-instance.network_interface[0].network_ip +} + +resource "google_compute_network" "default" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.self_link +} + +resource "google_compute_health_check" "default" { + name = "%s" + tcp_health_check { + port = "110" + } +} +`, service, maxConnections, instance, neg, network, network, check) +} + +func testAccComputeBackendService_withMaxRatePerEndpoint( + service, instance, neg, network, check string, maxRate float64) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "https" + protocol = "HTTPS" + + backend { + group = google_compute_network_endpoint_group.lb-neg.self_link + balancing_mode = "RATE" + max_rate_per_endpoint = %v + } + + health_checks = [google_compute_health_check.default.self_link] +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "endpoint-instance" { + name = "%s" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.self_link + access_config { + network_tier = "PREMIUM" + } + } +} + +resource "google_compute_network_endpoint_group" "lb-neg" { + name = "%s" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link + default_port = "90" + zone = "us-central1-a" +} + +resource "google_compute_network_endpoint" "lb-endpoint" { + network_endpoint_group = google_compute_network_endpoint_group.lb-neg.name + + instance = google_compute_instance.endpoint-instance.name + port = google_compute_network_endpoint_group.lb-neg.default_port + ip_address = google_compute_instance.endpoint-instance.network_interface[0].network_ip +} + +resource "google_compute_network" "default" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.self_link +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 3 + healthy_threshold = 3 + timeout_sec = 2 + unhealthy_threshold = 3 + https_health_check { + port = "443" + } +} +`, service, maxRate, instance, neg, network, network, check) +} + +func testAccComputeBackendService_withCustomHeaders(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + custom_request_headers = ["Client-Region: {client_region}", "Client-Rtt: {client_rtt_msec}"] + custom_response_headers = ["X-Cache-Hit: {cdn_cache_status}", "X-Cache-Id: {cdn_cache_id}"] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + +{{/* This test is for import functionality. It can be removed and added to examples when this goes GA */}} +func testAccComputeBackendService_internalLoadBalancing(fr, proxy, backend, hc, urlmap string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + name = "%s" + target = google_compute_target_http_proxy.default.self_link + port_range = "80" + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + ip_address = "0.0.0.0" +} + +resource "google_compute_target_http_proxy" "default" { + name = "%s" + description = "a description" + url_map = google_compute_url_map.default.self_link + proxy_bind = true +} + +resource "google_compute_backend_service" "backend_service" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + balancing_mode = "RATE" + capacity_scaler = 0.4 + max_rate_per_instance = 50 + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} + +resource "google_compute_url_map" "default" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.backend_service.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = google_compute_backend_service.backend_service.self_link + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.backend_service.self_link + } + } +} + +data "google_compute_image" "debian_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "tf-test-igm-internal" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name_prefix = "tf-test-" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.debian_image.self_link + auto_delete = true + boot = true + } +} +`, fr, proxy, backend, hc, urlmap) +} + +func testAccComputeBackendService_withLogConfig(serviceName, checkName string, sampleRate float64, enabled bool) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + log_config { + enable = %t + sample_rate = %v + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, enabled, sampleRate, checkName) +} + +func testAccComputeBackendService_withLogConfig2(serviceName, checkName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + + log_config { + enable = %t + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, enabled, checkName) +} + +func testAccComputeBackendService_withCompressionMode(serviceName, checkName, compressionMode string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] + enable_cdn = true + compression_mode = "%s" +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, compressionMode, checkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeBackendService_regionNegBackend(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "backend" { + name = "tf-test-backend%s" + enable_cdn = true + connection_draining_timeout_sec = 10 + + backend { + group = google_compute_region_network_endpoint_group.cloudrun_neg.id + } +} + +resource "google_compute_region_network_endpoint_group" "cloudrun_neg" { + name = "tf-test-neg%s" + network_endpoint_type = "SERVERLESS" + region = "us-central1" + cloud_run { + service = google_cloud_run_service.cloudrun_neg.name + } +} + +resource "google_cloud_run_service" "cloudrun_neg" { + name = "tf-test-cr%s" + location = "us-central1" + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + } + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +`, suffix, suffix, suffix) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication.go.tmpl new file mode 100644 index 000000000000..1daf655331c6 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication.go.tmpl @@ -0,0 +1,303 @@ +package compute + +import ( + "fmt" + "regexp" + "time" + "log" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeDiskAsyncReplication() *schema.Resource { + return &schema.Resource{ + Create: resourceDiskAsyncReplicationCreate, + Read: resourceDiskAsyncReplicationRead, + Delete: resourceDiskAsyncReplicationDelete, + + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(5 * time.Minute), + Delete: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "primary_disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Primary disk for asynchronous replication.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "secondary_disk": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Description: `Secondary disk for asynchronous replication.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Secondary disk for asynchronous replication.`, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `Output-only. Status of replication on the secondary disk.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func asyncReplicationGetComputeClient(d *schema.ResourceData, meta interface{}) (*compute.Service, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + clientCompute := config.NewComputeClient(userAgent) + return clientCompute, nil +} + +func asyncReplicationGetDiskFromConfig(disk string, d *schema.ResourceData, meta interface{}) (zv *tpgresource.ZonalFieldValue, rv *tpgresource.RegionalFieldValue, resourceId string, err error) { + config := meta.(*transport_tpg.Config) + + var zonalMatch bool + zonalMatch, err = regexp.MatchString(fmt.Sprintf(tpgresource.ZonalLinkBasePattern, "disks"), disk) + if err != nil { + return + } + zv, parseErr := tpgresource.ParseDiskFieldValue(disk, d, config) + if !zonalMatch || parseErr != nil { + rv, err = tpgresource.ParseRegionDiskFieldValue(disk, d, config) + if err != nil { + return + } + var regionalMatch bool + regionalMatch, err = regexp.MatchString(fmt.Sprintf(tpgresource.RegionalLinkBasePattern, "disks"), disk) + if !regionalMatch || err != nil { + err = fmt.Errorf("regional disk expected: %s", disk) + return + } + resourceId = fmt.Sprintf(tpgresource.RegionalLinkTemplate, rv.Project, rv.Region, "disks", rv.Name) + } else { + resourceId = fmt.Sprintf(tpgresource.ZonalLinkTemplate, zv.Project, zv.Zone, "disks", zv.Name) + } + return +} + +func asyncReplicationGetDiskStatus(client *compute.Service, zv *tpgresource.ZonalFieldValue, rv *tpgresource.RegionalFieldValue) (diskStatus *compute.Disk, err error) { + if rv == nil { // Zonal disk + diskStatus, err = client.Disks.Get(zv.Project, zv.Zone, zv.Name).Do() + log.Printf("[DEBUG] Get disk zones/%s/%s: %v", zv.Zone, zv.Name, diskStatus) + } else { + diskStatus, err = client.RegionDisks.Get(rv.Project, rv.Region, rv.Name).Do() + log.Printf("[DEBUG] Get disk regions/%s/%s: %v", rv.Region, rv.Name, diskStatus) + } + return +} + +func resourceDiskAsyncReplicationCreate(d *schema.ResourceData, meta interface{}) error { + clientCompute, err := asyncReplicationGetComputeClient(d, meta) + if err != nil { + return err + } + + zv, rv, resourceId, err := asyncReplicationGetDiskFromConfig(d.Get("primary_disk").(string), d, meta) + if err != nil { + return err + } + + secondaryDiskList := d.Get("secondary_disk").([]interface{}) + secondaryDiskMap := secondaryDiskList[0].(map[string]interface{}) + secondaryDisk := secondaryDiskMap["disk"].(string) + if rv == nil { // Zonal disk + replicationRequest := compute.DisksStartAsyncReplicationRequest{ + AsyncSecondaryDisk: secondaryDisk, + } + _, err = clientCompute.Disks.StartAsyncReplication(zv.Project, zv.Zone, zv.Name, &replicationRequest).Do() + if err != nil { + return err + } + } else { + replicationRequest := compute.RegionDisksStartAsyncReplicationRequest{ + AsyncSecondaryDisk: secondaryDisk, + } + _, err = clientCompute.RegionDisks.StartAsyncReplication(rv.Project, rv.Region, rv.Name, &replicationRequest).Do() + if err != nil { + return err + } + } + err = retry.Retry(time.Minute*time.Duration(5), func() *retry.RetryError { + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return retry.NonRetryableError(err) + } + if diskStatus.ResourceStatus == nil { + return retry.NonRetryableError(fmt.Errorf("no resource status for disk: %s", resourceId)) + } + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[secondaryDisk]; ok { + if secondaryState.State != "ACTIVE" { + time.Sleep(5 * time.Second) + return retry.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not: ACTIVE", secondaryDisk, secondaryState)) + } + return nil + } + time.Sleep(5 * time.Second) + return retry.RetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) + }) + if err != nil { + return err + } + d.SetId(resourceId) + return resourceDiskAsyncReplicationRead(d, meta) +} + +func resourceDiskAsyncReplicationRead(d *schema.ResourceData, meta interface{}) error { + clientCompute, err := asyncReplicationGetComputeClient(d, meta) + if err != nil { + return err + } + + primaryDisk := d.Get("primary_disk").(string) + if primaryDisk == "" { + primaryDisk = d.Id() + d.Set("primary_disk", primaryDisk) + } + + zv, rv, resourceId, err := asyncReplicationGetDiskFromConfig(primaryDisk, d, meta) + if err != nil { + return err + } + + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return err + } + + secondaryDisks := make([]map[string]string, 0) + existingSecondaryDisks := make(map[string]bool, 0) + for _, disk := range diskStatus.AsyncSecondaryDisks { + secondaryDisk := make(map[string]string) + + _, _, resourceName, err := asyncReplicationGetDiskFromConfig(disk.AsyncReplicationDisk.Disk, d, meta) + if err != nil { + return err + } + + if diskStatus.ResourceStatus == nil { + return fmt.Errorf("no resource status for disk: %s", resourceId) + } + + secondaryDisk["disk"] = resourceName + existingSecondaryDisks[resourceName] = true + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { + // Note this might be other than ACTIVE or STOPPED, but we wait for proper state + // on replication start/stop so it shouldnt affect Terraform + log.Printf("[DEBUG] Secondary disk %s is in state: %s", resourceName, secondaryState.State) + secondaryDisk["state"] = secondaryState.State + } + secondaryDisks = append(secondaryDisks, secondaryDisk) + } + + log.Printf("[DEBUG] Secondary disks: %v", secondaryDisks) + if err = d.Set("secondary_disk", secondaryDisks); err != nil { + return fmt.Errorf("Error setting secondary_disk: %s", err) + } + d.SetId(resourceId) + return nil +} + +func resourceDiskAsyncReplicationDelete(d *schema.ResourceData, meta interface{}) error { + clientCompute, err := asyncReplicationGetComputeClient(d, meta) + if err != nil { + return err + } + + zv, rv, _, err := asyncReplicationGetDiskFromConfig(d.Get("primary_disk").(string), d, meta) + if err != nil { + return err + } + + var replicationStopped bool = false + secondaryDiskList := d.Get("secondary_disk").([]interface{}) + secondaryDiskMap := secondaryDiskList[0].(map[string]interface{}) + secondaryDisk := secondaryDiskMap["disk"].(string) + _, _, resourceName, err := asyncReplicationGetDiskFromConfig(secondaryDisk, d, meta) + if err != nil { + return err + } + + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return err + } + + if diskStatus.ResourceStatus == nil { + // Nothing to do, replication not running + return nil + } + + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { + if secondaryState.State != "STOPPED" { + replicationStopped = true + if rv == nil { // Zonal disk + _, err = clientCompute.Disks.StopAsyncReplication(zv.Project, zv.Zone, zv.Name).Do() + if err != nil { + return err + } + } else { + _, err = clientCompute.RegionDisks.StopAsyncReplication(rv.Project, rv.Region, rv.Name).Do() + if err != nil { + return err + } + } + err = retry.Retry(time.Minute*time.Duration(5), func() *retry.RetryError { + diskStatus, err := asyncReplicationGetDiskStatus(clientCompute, zv, rv) + if err != nil { + return retry.NonRetryableError(err) + } + if secondaryState, ok := diskStatus.ResourceStatus.AsyncSecondaryDisks[resourceName]; ok { + if secondaryState.State != "STOPPED" { + time.Sleep(5 * time.Second) + return retry.RetryableError(fmt.Errorf("secondary disk %s state (%s) is not STOPPED", secondaryDisk, secondaryState)) + } + return nil + } + return retry.NonRetryableError(fmt.Errorf("secondary disk %s state not available", secondaryDisk)) + }) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("could not find secondary disk: %s", secondaryDisk) + } + + if replicationStopped { + // Allow the replication to quiescence + time.Sleep(5000 * time.Millisecond) + } + return nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication_test.go.tmpl new file mode 100644 index 000000000000..57550d48e989 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_async_replication_test.go.tmpl @@ -0,0 +1,150 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestAccComputeDiskAsyncReplication(t *testing.T) { + t.Parallel() + + region := envvar.GetTestRegionFromEnv() + if !tpgresource.StringInSlice([]string{"europe-west2", "europe-west1", "us-central1", "us-east1", "us-west1", "us-east4", "asia-east1", "australia-southeast1"}, region) { + return + } + secondaryRegion := region + switch region { + case "europe-west2": + secondaryRegion = "europe-west1" + case "europe-west1": + secondaryRegion = "europe-west2" + case "us-central1": + secondaryRegion = "us-east1" + case "us-east1", "us-west1", "us-east4": + secondaryRegion = "us-central1" + case "asia-east1": + secondaryRegion = "asia-southeast1" + case "asia-southeast1": + secondaryRegion = "asia-east1" + case "australia-southeast1": + secondaryRegion = "australia-southeast2" + case "australia-southeast2": + secondaryRegion = "australia-southeast1" + } + + primaryDisk := fmt.Sprintf("tf-test-disk-primary-%s", acctest.RandString(t, 10)) + secondaryDisk := fmt.Sprintf("tf-test-disk-secondary-%s", acctest.RandString(t, 10)) + primaryRegionalDisk := fmt.Sprintf("tf-test-disk-rprimary-%s", acctest.RandString(t, 10)) + secondaryRegionalDisk := fmt.Sprintf("tf-test-disk-rsecondary-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDiskAsyncReplication_basicZonal(region, secondaryRegion, primaryDisk, secondaryDisk), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk_async_replication.replication", "secondary_disk.0.state", "ACTIVE"), + ), + }, + { + ResourceName: "google_compute_disk_async_replication.replication", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDiskAsyncReplication_basicRegional(region, secondaryRegion, primaryRegionalDisk, secondaryRegionalDisk), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk_async_replication.replication", "secondary_disk.0.state", "ACTIVE"), + ), + }, + { + ResourceName: "google_compute_disk_async_replication.replication", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeDiskAsyncReplication_basicZonal(region, secondaryRegion, primaryDisk, secondaryDisk string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "primary" { + zone = "%s-a" + name = "%s" + type = "pd-ssd" + + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "secondary" { + name = "%s" + type = "pd-ssd" + zone = "%s-b" + + async_primary_disk { + disk = google_compute_disk.primary.id + } + + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk_async_replication" "replication" { + primary_disk = google_compute_disk.primary.id + + secondary_disk { + disk = google_compute_disk.secondary.id + } +} +`, region, primaryDisk, secondaryDisk, secondaryRegion) +} + +func testAccComputeDiskAsyncReplication_basicRegional(region, secondaryRegion, primaryDisk, secondaryDisk string) string { + return fmt.Sprintf(` +resource "google_compute_region_disk" "primary" { + region = "%s" + name = "%s" + type = "pd-ssd" + + physical_block_size_bytes = 4096 + + replica_zones = [ + "%s-a", + "%s-b" + ] +} + +resource "google_compute_region_disk" "secondary" { + region = "%s" + name = "%s" + type = "pd-ssd" + + async_primary_disk { + disk = google_compute_region_disk.primary.id + } + + physical_block_size_bytes = 4096 + + replica_zones = [ + "%s-b", + "%s-c" + ] +} + +resource "google_compute_disk_async_replication" "replication" { + primary_disk = google_compute_region_disk.primary.id + + secondary_disk { + disk = google_compute_region_disk.secondary.id + } +} +`, region, primaryDisk, region, region, secondaryRegion, secondaryDisk, secondaryRegion, secondaryRegion) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_test.go.tmpl new file mode 100644 index 000000000000..07ece3dde962 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_disk_test.go.tmpl @@ -0,0 +1,1656 @@ +package compute_test + +import ( + "fmt" + "net/http" + "os" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestDiskImageDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + // Full & partial links + "matching self_link with different api version": { + Old: "https://www.googleapis.com/compute/beta/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + ExpectDiffSuppress: true, + }, + "matching image partial self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "projects/debian-cloud/global/images/debian-8-jessie-v20171213", + ExpectDiffSuppress: true, + }, + "matching image partial no project self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "global/images/debian-8-jessie-v20171213", + ExpectDiffSuppress: true, + }, + "different image self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-7-jessie-v20171213", + ExpectDiffSuppress: false, + }, + "different image partial self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "projects/debian-cloud/global/images/debian-7-jessie-v20171213", + ExpectDiffSuppress: false, + }, + "different image partial no project self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "global/images/debian-7-jessie-v20171213", + ExpectDiffSuppress: false, + }, + // Image name + "matching image name": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian-8-jessie-v20171213", + ExpectDiffSuppress: true, + }, + "different image name": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian-7-jessie-v20171213", + ExpectDiffSuppress: false, + }, + // Image short hand + "matching image short hand": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian-cloud/debian-8-jessie-v20171213", + ExpectDiffSuppress: true, + }, + "matching image short hand but different project": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "different-cloud/debian-8-jessie-v20171213", + ExpectDiffSuppress: false, + }, + "different image short hand": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian-cloud/debian-7-jessie-v20171213", + ExpectDiffSuppress: false, + }, + // Image Family + "matching image family": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "family/debian-8", + ExpectDiffSuppress: true, + }, + "matching image family self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/family/debian-8", + ExpectDiffSuppress: true, + }, + "matching unconventional image family self link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20180122", + New: "https://www.googleapis.com/compute/v1/projects/projects/ubuntu-os-cloud/global/images/family/ubuntu-1404-lts", + ExpectDiffSuppress: true, + }, + "matching image family partial self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "projects/debian-cloud/global/images/family/debian-8", + ExpectDiffSuppress: true, + }, + "matching unconventional image family partial self link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20180122", + New: "projects/ubuntu-os-cloud/global/images/family/ubuntu-1404-lts", + ExpectDiffSuppress: true, + }, + "matching image family partial no project self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "global/images/family/debian-8", + ExpectDiffSuppress: true, + }, + "matching image family short hand": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian-cloud/debian-8", + ExpectDiffSuppress: true, + }, + "matching image family short hand with project short name": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian/debian-8", + ExpectDiffSuppress: true, + }, + "matching unconventional image family short hand": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1404-trusty-v20180122", + New: "ubuntu-os-cloud/ubuntu-1404-lts", + ExpectDiffSuppress: true, + }, + "matching unconventional image family - minimal": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-1804-bionic-v20180705", + New: "ubuntu-minimal-1804-lts", + ExpectDiffSuppress: true, + }, + "matching unconventional image family - cos": { + Old: "https://www.googleapis.com/compute/v1/projects/cos-cloud/global/images/cos-85-13310-1209-17", + New: "cos-85-lts", + ExpectDiffSuppress: true, + }, + "different image family": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "family/debian-7", + ExpectDiffSuppress: false, + }, + "different image family self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/family/debian-7", + ExpectDiffSuppress: false, + }, + "different image family partial self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "projects/debian-cloud/global/images/family/debian-7", + ExpectDiffSuppress: false, + }, + "different image family partial no project self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "global/images/family/debian-7", + ExpectDiffSuppress: false, + }, + "matching image family but different project in self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "https://www.googleapis.com/compute/v1/projects/other-cloud/global/images/family/debian-8", + ExpectDiffSuppress: false, + }, + "different image family but different project in partial self link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "projects/other-cloud/global/images/family/debian-8", + ExpectDiffSuppress: false, + }, + "different image family short hand": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "debian-cloud/debian-7", + ExpectDiffSuppress: false, + }, + "matching image family shorthand but different project": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-8-jessie-v20171213", + New: "different-cloud/debian-8", + ExpectDiffSuppress: false, + }, + // arm images + "matching image opensuse arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-leap-15-4-v20220713-arm64", + New: "opensuse-leap-arm64", + ExpectDiffSuppress: true, + }, + "matching image sles arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-15-sp4-v20220713-arm64", + New: "sles-15-arm64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-arm64-v20220712", + New: "ubuntu-1804-lts-arm64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu-minimal arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-arm64-v20220713", + New: "ubuntu-minimal-2004-lts-arm64", + ExpectDiffSuppress: true, + }, + "matching image debian arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-arm64-v20220719", + New: "debian-11-arm64", + ExpectDiffSuppress: true, + }, + "different architecture image opensuse arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-leap-15-4-v20220713-arm64", + New: "opensuse-leap", + ExpectDiffSuppress: false, + }, + "different architecture image sles arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-15-sp4-v20220713-arm64", + New: "sles-15", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-arm64-v20220712", + New: "ubuntu-1804-lts", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu-minimal arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-arm64-v20220713", + New: "ubuntu-minimal-2004-lts", + ExpectDiffSuppress: false, + }, + "different architecture image debian arm64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-arm64-v20220719", + New: "debian-11", + ExpectDiffSuppress: false, + }, + "different architecture image opensuse arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/opensuse-cloud/global/images/opensuse-leap-15-2-v20200702", + New: "opensuse-leap-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image sles arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/suse-cloud/global/images/sles-15-sp4-v20220722-x86-64", + New: "sles-15-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-1804-bionic-v20220712", + New: "ubuntu-1804-lts-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu-minimal arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2004-focal-v20220713", + New: "ubuntu-minimal-2004-lts-arm64", + ExpectDiffSuppress: false, + }, + "different architecture image debian arm64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/debian-11-bullseye-v20220719", + New: "debian-11-arm64", + ExpectDiffSuppress: false, + }, + // amd images + "matching image ubuntu amd64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2210-kinetic-amd64-v20221022", + New: "ubuntu-2210-amd64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu-minimal amd64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2210-kinetic-amd64-v20221022", + New: "ubuntu-minimal-2210-amd64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2404-noble-amd64-v20240423", + New: "ubuntu-2404-lts-amd64", + ExpectDiffSuppress: true, + }, + "matching image ubuntu minimal amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2404-noble-amd64-v20240423", + New: "ubuntu-minimal-2404-lts-amd64", + ExpectDiffSuppress: true, + }, + "different architecture image ubuntu amd64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2210-kinetic-amd64-v20221022", + New: "ubuntu-2210", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu-minimal amd64 self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2210-kinetic-amd64-v20221022", + New: "ubuntu-minimal-2210", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu amd64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2210-kinetic-v20221022", + New: "ubuntu-2210-amd64", + ExpectDiffSuppress: false, + }, + "different architecture image ubuntu-minimal amd64 family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2210-kinetic-v20221022", + New: "ubuntu-minimal-2210-amd64", + ExpectDiffSuppress: false, + }, + "different image ubuntu amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2404-noble-amd64-v20240423", + New: "ubuntu-2404-lts", + ExpectDiffSuppress: false, + }, + "different image ubuntu minimal amd64 canonical lts self_link": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2404-noble-amd64-v20240423", + New: "ubuntu-minimal-2404-lts", + ExpectDiffSuppress: false, + }, + "different image ubuntu amd64 canonical lts family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-2404-noble-v20240423", + New: "ubuntu-2404-lts-amd64", + ExpectDiffSuppress: false, + }, + "different image ubuntu minimal amd64 canonical lts family": { + Old: "https://www.googleapis.com/compute/v1/projects/ubuntu-os-cloud/global/images/ubuntu-minimal-2404-noble-v20240423", + New: "ubuntu-minimal-2404-lts-amd64", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + tc := tc + t.Run(tn, func(t *testing.T) { + t.Parallel() + if tpgcompute.DiskImageDiffSuppress("image", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("%q => %q expect DiffSuppress to return %t", tc.Old, tc.New, tc.ExpectDiffSuppress) + } + }) + } +} + +// Test that all the naming pattern for public images are supported. +func TestAccComputeDisk_imageDiffSuppressPublicVendorsFamilyNames(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + + config := getInitializedConfig(t) + + for _, publicImageProject := range tpgcompute.ImageMap { + token := "" + // Hard limit on number of pages to prevent infinite loops + // caused by the API always returning a pagination token + page := 0 + maxPages := 10 + for paginate := true; paginate && page < maxPages; { + resp, err := config.NewComputeClient(config.UserAgent).Images.List(publicImageProject).Filter("deprecated.replacement ne .*images.*").PageToken(token).Do() + if err != nil { + t.Fatalf("Can't list public images for project %q", publicImageProject) + } + + for _, image := range resp.Items { + if !tpgcompute.DiskImageDiffSuppress("image", image.SelfLink, "family/"+image.Family, nil) { + t.Errorf("should suppress diff for image %q and family %q", image.SelfLink, image.Family) + } + } + token := resp.NextPageToken + paginate = token != "" + page++ + } + } +} + +func TestAccComputeDisk_update(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + diskType := "pd-ssd" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_basic(diskName, diskType), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeDisk_updated(diskName, diskType), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeDisk_fromTypeUrl(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + diskType := fmt.Sprintf("projects/%s/zones/us-central1-a/diskTypes/pd-ssd", envvar.GetTestProjectFromEnv()) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_basic(diskName, diskType), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(t *testing.T) { + t.Parallel() + + context_1 := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "provisioned_iops": 10000, + "lifecycle_bool": true, + } + context_2 := map[string]interface{}{ + "random_suffix": context_1["random_suffix"], + "provisioned_iops": 11000, + "lifecycle_bool": true, + } + context_3 := map[string]interface{}{ + "random_suffix": context_1["random_suffix"], + "provisioned_iops": 11000, + "lifecycle_bool": false, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(context_1), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(context_2), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(context_3), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(t *testing.T) { + t.Parallel() + + context_1 := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "provisioned_throughput": 180, + "lifecycle_bool": true, + } + context_2 := map[string]interface{}{ + "random_suffix": context_1["random_suffix"], + "provisioned_throughput": 20, + "lifecycle_bool": true, + } + context_3 := map[string]interface{}{ + "random_suffix": context_1["random_suffix"], + "provisioned_throughput": 20, + "lifecycle_bool": false, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context_1), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context_2), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context_3), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_fromSnapshot(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + firstDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + projectName := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_fromSnapshot(projectName, firstDiskName, snapshotName, diskName, "self_link"), + }, + { + ResourceName: "google_compute_disk.seconddisk", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDisk_fromSnapshot(projectName, firstDiskName, snapshotName, diskName, "name"), + }, + { + ResourceName: "google_compute_disk.seconddisk", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_encryption(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_encryption(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + t, "google_compute_disk.foobar", envvar.GetTestProjectFromEnv(), &disk), + testAccCheckEncryptionKey( + t, "google_compute_disk.foobar", &disk), + ), + }, + }, + }) +} + +func TestAccComputeDisk_encryptionKMS(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKey(t) + pid := envvar.GetTestProjectFromEnv() + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + importID := fmt.Sprintf("%s/%s/%s", pid, "us-central1-a", diskName) + var disk compute.Disk + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_encryptionKMS(diskName, kms.CryptoKey.Name), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + t, "google_compute_disk.foobar", pid, &disk), + testAccCheckEncryptionKey( + t, "google_compute_disk.foobar", &disk), + ), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportStateId: importID, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_pdHyperDiskEnableConfidentialCompute(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "kms": acctest.BootstrapKMSKeyWithPurposeInLocationAndName( + t, + "ENCRYPT_DECRYPT", + "us-central1", + "tf-bootstrap-hyperdisk-key1").CryptoKey.Name, // regional KMS key + "disk_size": 64, + "confidential_compute": true, + } + + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_pdHyperDiskEnableConfidentialCompute(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + t, "google_compute_disk.foobar", envvar.GetTestProjectFromEnv(), &disk), + testAccCheckEncryptionKey( + t, "google_compute_disk.foobar", &disk), + ), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_deleteDetach(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_deleteDetach(instanceName, diskName), + }, + { + ResourceName: "google_compute_disk.foo", + ImportState: true, + ImportStateVerify: true, + }, + // this needs to be a second step so we refresh and see the instance + // listed as attached to the disk; the instance is created after the + // disk. and the disk's properties aren't refreshed unless there's + // another step + { + Config: testAccComputeDisk_deleteDetach(instanceName, diskName), + }, + { + ResourceName: "google_compute_disk.foo", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_deleteDetachIGM(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + diskName2 := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + mgrName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_deleteDetachIGM(diskName, mgrName), + }, + { + ResourceName: "google_compute_disk.foo", + ImportState: true, + ImportStateVerify: true, + }, + // this needs to be a second step so we refresh and see the instance + // listed as attached to the disk; the instance is created after the + // disk. and the disk's properties aren't refreshed unless there's + // another step + { + Config: testAccComputeDisk_deleteDetachIGM(diskName, mgrName), + }, + { + ResourceName: "google_compute_disk.foo", + ImportState: true, + ImportStateVerify: true, + }, + // Change the disk name to recreate the instances + { + Config: testAccComputeDisk_deleteDetachIGM(diskName2, mgrName), + }, + { + ResourceName: "google_compute_disk.foo", + ImportState: true, + ImportStateVerify: true, + }, + // Add the extra step like before + { + Config: testAccComputeDisk_deleteDetachIGM(diskName2, mgrName), + }, + { + ResourceName: "google_compute_disk.foo", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeDisk_pdExtremeImplicitProvisionedIops(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_pdExtremeImplicitProvisionedIops(diskName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeDisk_resourcePolicies(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + policyName := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_resourcePolicies(diskName, policyName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeDisk_multiWriter(t *testing.T) { + t.Parallel() + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_multiWriter(instanceName, diskName, true), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccCheckComputeDiskExists(t *testing.T, n, p string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).Disks.Get( + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("Disk not found") + } + + *disk = *found + + return nil + } +} + +func testAccCheckEncryptionKey(t *testing.T, n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["disk_encryption_key.0.sha256"] + if disk.DiskEncryptionKey == nil { + return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v\nGCP State: ", n, attr) + } else if attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("Disk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, disk.DiskEncryptionKey.Sha256) + } + return nil + } +} + +func TestAccComputeDisk_cloneDisk(t *testing.T) { + t.Parallel() + pid := envvar.GetTestProjectFromEnv() + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_diskClone(diskName, "self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + t, "google_compute_disk.disk-clone", pid, &disk), + ), + }, + { + ResourceName: "google_compute_disk.disk-clone", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeDisk_featuresUpdated(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_features(diskName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeDisk_featuresUpdated(diskName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + + +func testAccComputeDisk_basic(diskName string, diskType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "%s" + zone = "us-central1-a" + labels = { + my-label = "my-label-value" + } +} +`, diskName, diskType) +} + +func testAccComputeDisk_updated(diskName string, diskType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 100 + type = "%s" + zone = "us-central1-a" + labels = { + my-label = "my-updated-label-value" + a-new-label = "a-new-label-value" + } +} +`, diskName, diskType) +} + +func testAccComputeDisk_fromSnapshot(projectName, firstDiskName, snapshotName, diskName, ref_selector string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s-d1" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + project = "%s" +} + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = google_compute_disk.foobar.name + zone = "us-central1-a" + project = "%s" +} + +resource "google_compute_disk" "seconddisk" { + name = "%s-d2" + snapshot = google_compute_snapshot.snapdisk.%s + type = "pd-ssd" + zone = "us-central1-a" + project = "%s" +} +`, firstDiskName, projectName, snapshotName, projectName, diskName, ref_selector, projectName) +} + +func testAccComputeDisk_encryption(diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} +`, diskName) +} + +func testAccComputeDisk_encryptionKMS(diskName, kmsKey string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + kms_key_self_link = "%s" + } +} +`, diskName, kmsKey) +} + +func testAccComputeDisk_deleteDetach(instanceName, diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foo" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "bar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foo.self_link + } + + network_interface { + network = "default" + } +} +`, diskName, instanceName) +} + +func testAccComputeDisk_deleteDetachIGM(diskName, mgrName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foo" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "template" { + machine_type = "g1-small" + + disk { + boot = true + source = google_compute_disk.foo.name + auto_delete = false + } + + network_interface { + network = "default" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "manager" { + name = "%s" + base_instance_name = "tf-test-disk-igm" + version { + instance_template = google_compute_instance_template.template.self_link + name = "primary" + } + update_policy { + minimal_action = "RESTART" + type = "PROACTIVE" + max_unavailable_fixed = 1 + } + zone = "us-central1-a" + target_size = 1 + + // block on instances being ready so that when they get deleted, we don't try + // to continue interacting with them in other resources + wait_for_instances = true +} +`, diskName, mgrName) +} + +func testAccComputeDisk_pdHyperDiskEnableConfidentialCompute(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_disk" "foobar" { + name = "tf-test-ecc-%{random_suffix}" + size = %{disk_size} + type = "hyperdisk-balanced" + zone = "us-central1-a" + enable_confidential_compute = %{confidential_compute} + + disk_encryption_key { + kms_key_self_link = "%{kms}" + } + + } +`, context) +} + + +func testAccComputeDisk_pdHyperDiskProvisionedIopsLifeCycle(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_disk" "foobar" { + name = "tf-test-hyperdisk-%{random_suffix}" + type = "hyperdisk-extreme" + provisioned_iops = %{provisioned_iops} + size = 64 + lifecycle { + prevent_destroy = %{lifecycle_bool} + } + } +`, context) +} + +func testAccComputeDisk_pdHyperDiskProvisionedThroughputLifeCycle(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_disk" "foobar" { + name = "tf-test-hyperdisk-%{random_suffix}" + type = "hyperdisk-throughput" + zone = "us-east4-c" + provisioned_throughput = %{provisioned_throughput} + size = 2048 + lifecycle { + prevent_destroy = %{lifecycle_bool} + } + } +`, context) +} + + +func testAccComputeDisk_pdExtremeImplicitProvisionedIops(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + type = "pd-extreme" + size = 1 +} +`, diskName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeDisk_resourcePolicies(diskName, policyName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_resource_policy" "foo" { + name = "%s" + region = "us-central1" + snapshot_schedule_policy { + schedule { + daily_schedule { + days_in_cycle = 1 + start_time = "04:00" + } + } + } +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + resource_policies = [google_compute_resource_policy.foo.self_link] +} +`, policyName, diskName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeDisk_multiWriter(instance string, diskName string, enableMultiwriter bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + multi_writer = %t +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n2-standard-2" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.name + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, diskName, enableMultiwriter, instance) +} +{{- end }} + +func testAccComputeDisk_diskClone(diskName, refSelector string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + labels = { + my-label = "my-label-value" + } + } + + resource "google_compute_disk" "disk-clone" { + name = "%s" + source_disk = google_compute_disk.foobar.%s + type = "pd-ssd" + zone = "us-central1-a" + labels = { + my-label = "my-label-value" + } + } +`, diskName, diskName+"-clone", refSelector) +} + +func TestAccComputeDisk_encryptionWithRSAEncryptedKey(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_encryptionWithRSAEncryptedKey(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeDiskExists( + t, "google_compute_disk.foobar-1", envvar.GetTestProjectFromEnv(), &disk), + testAccCheckEncryptionKey( + t, "google_compute_disk.foobar-1", &disk), + ), + }, + }, + }) +} + +func testAccComputeDisk_encryptionWithRSAEncryptedKey(diskName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar-1" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + rsa_encrypted_key = "fB6BS8tJGhGVDZDjGt1pwUo2wyNbkzNxgH1avfOtiwB9X6oPG94gWgenygitnsYJyKjdOJ7DyXLmxwQOSmnCYCUBWdKCSssyLV5907HL2mb5TfqmgHk5JcArI/t6QADZWiuGtR+XVXqiLa5B9usxFT2BTmbHvSKfkpJ7McCNc/3U0PQR8euFRZ9i75o/w+pLHFMJ05IX3JB0zHbXMV173PjObiV3ItSJm2j3mp5XKabRGSA5rmfMnHIAMz6stGhcuom6+bMri2u/axmPsdxmC6MeWkCkCmPjaKsVz1+uQUNCJkAnzesluhoD+R6VjFDm4WI7yYabu4MOOAOTaQXdEg==" + } +} +`, diskName) +} + +func testAccComputeDisk_features(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + labels = { + my-label = "my-label-value" + } + + guest_os_features { + type = "SECURE_BOOT" + } +} +`, diskName) +} + +func testAccComputeDisk_featuresUpdated(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + labels = { + my-label = "my-label-value" + } + + guest_os_features { + type = "SECURE_BOOT" + } + + guest_os_features { + type = "MULTI_IP_SUBNET" + } +} +`, diskName) +} + +func TestAccComputeDisk_attributionLabelOnCreation(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_attributionLabel(diskName, "true", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.user-label", "foo"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.goog-terraform-provisioned", "true"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.user-label", "foo"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "effective_labels.%", "2"), + ), + }, + { + Config: testAccComputeDisk_attributionLabelUpdated(diskName, "true", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.user-label", "bar"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.goog-terraform-provisioned", "true"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.user-label", "bar"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "effective_labels.%", "2"), + ), + }, + }, + }) +} + +func TestAccComputeDisk_attributionLabelOnCreationSkip(t *testing.T) { + // VCR tests cache provider configuration between steps, this test changes provider configuration and fails under VCR. + acctest.SkipIfVcr(t) + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_attributionLabel(diskName, "false", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.user-label", "foo"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.user-label", "foo"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "effective_labels.%", "1"), + ), + }, + { + Config: testAccComputeDisk_attributionLabelUpdated(diskName, "true", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.user-label", "bar"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.user-label", "bar"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "effective_labels.%", "1"), + ), + }, + }, + }) +} + +func TestAccComputeDisk_attributionLabelProactive(t *testing.T) { + // VCR tests cache provider configuration between steps, this test changes provider configuration and fails under VCR. + acctest.SkipIfVcr(t) + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_attributionLabel(diskName, "false", "PROACTIVE"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.user-label", "foo"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.user-label", "foo"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "effective_labels.%", "1"), + ), + }, + { + Config: testAccComputeDisk_attributionLabelUpdated(diskName, "true", "PROACTIVE"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.%", "1"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "labels.user-label", "bar"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.goog-terraform-provisioned", "true"), + resource.TestCheckResourceAttr("google_compute_disk.foobar", "terraform_labels.user-label", "bar"), + + resource.TestCheckResourceAttr("google_compute_disk.foobar", "effective_labels.%", "2"), + ), + }, + }, + }) +} + +func testAccComputeDisk_attributionLabel(diskName, add, strategy string) string { + return fmt.Sprintf(` +provider "google" { + add_terraform_attribution_label = %s + terraform_attribution_label_addition_strategy = %q +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + labels = { + user-label = "foo" + } +} +`, add, strategy, diskName) +} + +func testAccComputeDisk_attributionLabelUpdated(diskName, add, strategy string) string { + return fmt.Sprintf(` +provider "google" { + add_terraform_attribution_label = %s + terraform_attribution_label_addition_strategy = %q +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + labels = { + user-label = "bar" + } +} +`, add, strategy, diskName) +} + +func TestAccComputeDisk_storagePoolSpecified(t *testing.T) { + t.Parallel() + + storagePoolName := fmt.Sprintf("tf-test-storage-pool-%s", acctest.RandString(t, 10)) + storagePoolUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePools/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), storagePoolName) + diskName := fmt.Sprintf("tf-test-disk-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + PreConfig: setupTestingStoragePool(t, storagePoolName), + Config: testAccComputeDisk_storagePoolSpecified(diskName, storagePoolUrl), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_disk.foobar", "storage_pool", storagePoolName), + ), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) + + cleanupTestingStoragePool(t, storagePoolName) +} + +func setupTestingStoragePool(t *testing.T, storagePoolName string) func() { + return func() { + config := acctest.GoogleProviderConfig(t) + headers := make(http.Header) + project := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools", config.ComputeBasePath, project, zone) + storagePoolTypeUrl := fmt.Sprintf("/projects/%s/zones/%s/storagePoolTypes/hyperdisk-throughput", project, zone) + defaultTimeout := 20 * time.Minute + obj := make(map[string]interface{}) + obj["name"] = storagePoolName + obj["poolProvisionedCapacityGb"] = 10240 + obj["poolProvisionedThroughput"] = 180 + obj["storagePoolType"] = storagePoolTypeUrl + obj["capacityProvisioningType"] = "ADVANCED" + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: config.UserAgent, + Body: obj, + Timeout: defaultTimeout, + Headers: headers, + }) + if err != nil { + t.Errorf("Error creating StoragePool: %s", err) + } + + err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Creating StoragePool", config.UserAgent, defaultTimeout) + if err != nil { + t.Errorf("Error waiting to create StoragePool: %s", err) + } + } +} + +func cleanupTestingStoragePool(t *testing.T, storagePoolName string) { + config := acctest.GoogleProviderConfig(t) + headers := make(http.Header) + project := envvar.GetTestProjectFromEnv() + zone := envvar.GetTestZoneFromEnv() + url := fmt.Sprintf("%sprojects/%s/zones/%s/storagePools/%s", config.ComputeBasePath, project, zone, storagePoolName) + defaultTimeout := 20 * time.Minute + var obj map[string]interface{} + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: config.UserAgent, + Body: obj, + Timeout: defaultTimeout, + Headers: headers, + }) + if err != nil { + t.Errorf("Error deleting StoragePool: %s", err) + } + + err = tpgcompute.ComputeOperationWaitTime(config, res, project, "Deleting StoragePool", config.UserAgent, defaultTimeout) + if err != nil { + t.Errorf("Error waiting to delete StoragePool: %s", err) + } +} + +func testAccComputeDisk_storagePoolSpecified(diskName, storagePoolUrl string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "foobar" { + name = "%s" + type = "hyperdisk-throughput" + size = 2048 + provisioned_throughput = 140 + storage_pool = "%s" +} +`, diskName, storagePoolUrl) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_rule_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_rule_test.go new file mode 100644 index 000000000000..51954650ed5d --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_rule_test.go @@ -0,0 +1,695 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeFirewallPolicyRule_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_name": fmt.Sprintf("organizations/%s", envvar.GetTestOrgFromEnv(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewallPolicyRule_start(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeFirewallPolicyRule_update(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy", "target_resources"}, + }, + { + Config: testAccComputeFirewallPolicyRule_removeConfigs(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy", "target_resources"}, + }, + { + Config: testAccComputeFirewallPolicyRule_start(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + }, + }) +} + +func TestAccComputeFirewallPolicyRule_multipleRules(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_name": fmt.Sprintf("organizations/%s", envvar.GetTestOrgFromEnv(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewallPolicyRule_multiple(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule2", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeFirewallPolicyRule_multipleAdd(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule3", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeFirewallPolicyRule_multipleRemove(context), + }, + }, + }) +} + +func TestAccComputeFirewallPolicyRule_securityProfileGroup_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_name": fmt.Sprintf("organizations/%s", envvar.GetTestOrgFromEnv(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewallPolicyRule_securityProfileGroup_basic(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeFirewallPolicyRule_securityProfileGroup_update(context), + }, + { + ResourceName: "google_compute_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy", "target_resources"}, + }, + }, + }) +} + +func testAccComputeFirewallPolicyRule_securityProfileGroup_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_network_security_security_profile" "security_profile" { + name = "tf-test-my-sp%{random_suffix}" + type = "THREAT_PREVENTION" + parent = "%{org_name}" + location = "global" +} + +resource "google_network_security_security_profile_group" "security_profile_group" { + name = "tf-test-my-spg%{random_suffix}" + parent = "%{org_name}" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.security_profile.id +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.name + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "apply_security_profile_group" + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group.id}" + direction = "INGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + src_ip_ranges = ["11.100.0.1/32"] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_securityProfileGroup_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_network_security_security_profile" "security_profile" { + name = "tf-test-my-sp%{random_suffix}" + type = "THREAT_PREVENTION" + parent = "%{org_name}" + location = "global" +} + +resource "google_network_security_security_profile_group" "security_profile_group" { + name = "tf-test-my-spg%{random_suffix}" + parent = "%{org_name}" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.security_profile.id +} + +resource "google_network_security_security_profile_group" "security_profile_group_updated" { + name = "tf-test-my-spg-updated%{random_suffix}" + parent = "%{org_name}" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.security_profile.id +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.name + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "apply_security_profile_group" + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_updated.id}" + direction = "INGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + src_ip_ranges = ["11.100.0.1/32"] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_start(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "service_account" { + account_id = "tf-test-sa-%{random_suffix}" +} + +resource "google_service_account" "service_account2" { + account_id = "tf-test-sa2-%{random_suffix}" +} + +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network2" { + name = "tf-test-2-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.name + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = [] + dest_region_codes = [] + dest_threat_intelligences = [] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "service_account" { + account_id = "tf-test-sa-%{random_suffix}" +} + +resource "google_service_account" "service_account2" { + account_id = "tf-test-sa2-%{random_suffix}" +} + +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network2" { + name = "tf-test-2-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.name + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + target_service_accounts = [google_service_account.service_account.email] + target_resources = [ + google_compute_network.network1.self_link, + google_compute_network.network2.self_link + ] + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [8080] + } + layer4_configs { + ip_protocol = "udp" + ports = [22] + } + dest_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_removeConfigs(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "service_account" { + account_id = "tf-test-sa-%{random_suffix}" +} + +resource "google_service_account" "service_account2" { + account_id = "tf-test-sa2-%{random_suffix}" +} + +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network2" { + name = "tf-test-2-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.id + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Test description" + priority = 9000 + enable_logging = false + action = "deny" + direction = "INGRESS" + disabled = true + target_resources = [google_compute_network.network1.self_link] + target_service_accounts = [ + google_service_account.service_account.email, + google_service_account.service_account2.email + ] + + match { + layer4_configs { + ip_protocol = "udp" + ports = [22] + } + src_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_multiple(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.name + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule2" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9001 + enable_logging = false + action = "deny" + direction = "INGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + layer4_configs { + ip_protocol = "all" + } + src_ip_ranges = ["11.100.0.1/32"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_multipleAdd(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.id + short_name = "tf-test-policy-%{random_suffix}" + description = "Description Update" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule2" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9001 + enable_logging = false + action = "deny" + direction = "INGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + layer4_configs { + ip_protocol = "all" + } + src_ip_ranges = ["11.100.0.1/32"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [google_network_security_address_group.address_group.id] + } +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule3" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 40 + enable_logging = true + action = "allow" + direction = "INGRESS" + disabled = true + + match { + layer4_configs { + ip_protocol = "udp" + ports = [8000] + } + src_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeFirewallPolicyRule_multipleRemove(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "folder" { + display_name = "tf-test-folder-%{random_suffix}" + parent = "%{org_name}" +} + +resource "google_compute_firewall_policy" "fw_policy" { + parent = google_folder.folder.name + short_name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + } +} + +resource "google_compute_firewall_policy_rule" "fw_policy_rule3" { + firewall_policy = google_compute_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 40 + enable_logging = true + action = "allow" + direction = "INGRESS" + disabled = true + + match { + layer4_configs { + ip_protocol = "udp" + ports = [8000] + } + src_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_test.go new file mode 100644 index 000000000000..a4f69426b577 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_policy_test.go @@ -0,0 +1,80 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeFirewallPolicy_update(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + policyName := fmt.Sprintf("tf-test-firewall-policy-%s", acctest.RandString(t, 10)) + folderName := fmt.Sprintf("tf-test-folder-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewallPolicy_basic(org, policyName, folderName), + }, + { + ResourceName: "google_compute_firewall_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewallPolicy_update(org, policyName, folderName), + }, + { + ResourceName: "google_compute_firewall_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewallPolicy_update(org, policyName, folderName), + }, + { + ResourceName: "google_compute_firewall_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeFirewallPolicy_basic(org, policyName, folderName string) string { + return fmt.Sprintf(` +resource "google_folder" "folder" { + display_name = "%s" + parent = "%s" +} + +resource "google_compute_firewall_policy" "default" { + parent = google_folder.folder.name + short_name = "%s" + description = "Resource created for Terraform acceptance testing" +} +`, folderName, "organizations/"+org, policyName) +} + +func testAccComputeFirewallPolicy_update(org, policyName, folderName string) string { + return fmt.Sprintf(` +resource "google_folder" "folder" { + display_name = "%s" + parent = "%s" +} + +resource "google_compute_firewall_policy" "default" { + parent = google_folder.folder.id + short_name = "%s" + description = "An updated description" +} +`, folderName, "organizations/"+org, policyName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go new file mode 100644 index 000000000000..bb0644a6f901 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go @@ -0,0 +1,593 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeFirewall_update(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_basic(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_update(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_basic(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_localRanges(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_localRanges(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_localRangesUpdate(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_localRanges(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_priority(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_priority(networkName, firewallName, 1001), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_noSource(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_noSource(networkName, firewallName), + ExpectError: regexp.MustCompile("one of source_tags, source_ranges, or source_service_accounts must be defined"), + }, + }, + }) +} + +func TestAccComputeFirewall_denied(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_denied(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_egress(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_egress(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_serviceAccounts(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + sourceSa := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + targetSa := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_serviceAccounts(sourceSa, targetSa, networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_disabled(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_disabled(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_basic(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_enableLogging(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_enableLogging(networkName, firewallName, ""), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_enableLogging(networkName, firewallName, "INCLUDE_ALL_METADATA"), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_enableLogging(networkName, firewallName, "EXCLUDE_ALL_METADATA"), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeFirewall_enableLogging(networkName, firewallName, ""), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeFirewall_moduleOutput(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + firewallName := fmt.Sprintf("tf-test-firewall-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeFirewallDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeFirewall_moduleOutput(networkName, firewallName), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeFirewall_basic(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + allow { + protocol = "icmp" + } +} +`, network, firewall) +} + +func testAccComputeFirewall_localRanges(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + source_ranges = ["10.0.0.0/8"] + destination_ranges = ["192.168.1.0/24"] + + allow { + protocol = "icmp" + } +} +`, network, firewall) +} + + +func testAccComputeFirewall_localRangesUpdate(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + source_ranges = ["192.168.1.0/24"] + destination_ranges = ["10.0.0.0/8"] + + allow { + protocol = "icmp" + } +} +`, network, firewall) +} + +func testAccComputeFirewall_update(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.self_link + source_tags = ["foo"] + target_tags = ["bar"] + + allow { + protocol = "tcp" + ports = ["80-255"] + } +} +`, network, firewall) +} + +func testAccComputeFirewall_priority(network, firewall string, priority int) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + allow { + protocol = "icmp" + } + priority = %d +} +`, network, firewall, priority) +} + +func testAccComputeFirewall_noSource(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + + allow { + protocol = "tcp" + ports = [22] + } +} +`, network, firewall) +} + +func testAccComputeFirewall_denied(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + deny { + protocol = "tcp" + ports = [22] + } +} +`, network, firewall) +} + +func testAccComputeFirewall_egress(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + direction = "EGRESS" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + + allow { + protocol = "tcp" + ports = [22] + } +} +`, network, firewall) +} + +func testAccComputeFirewall_serviceAccounts(sourceSa, targetSa, network, firewall string) string { + return fmt.Sprintf(` +resource "google_service_account" "source" { + account_id = "%s" +} + +resource "google_service_account" "target" { + account_id = "%s" +} + +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + + allow { + protocol = "icmp" + } + + source_service_accounts = [google_service_account.source.email] + target_service_accounts = [google_service_account.target.email] +} +`, sourceSa, targetSa, network, firewall) +} + +func testAccComputeFirewall_disabled(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + allow { + protocol = "icmp" + } + + disabled = true +} +`, network, firewall) +} + +func testAccComputeFirewall_enableLogging(network, firewall, logging string) string { + enableLoggingCfg := "" + if logging != "" { + enableLoggingCfg = fmt.Sprintf(`log_config { + metadata = "%s" + } + `, logging) + } + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + source_tags = ["foo"] + + allow { + protocol = "icmp" + } + + %s +} +`, network, firewall, enableLoggingCfg) +} + +func testAccComputeFirewall_moduleOutput(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.foobar.name +} + +resource "google_compute_address" "foobar" { + name = "%s-address" + subnetwork = google_compute_subnetwork.foobar.id + address_type = "INTERNAL" + region = "us-central1" + } + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + network = google_compute_network.foobar.name + direction = "INGRESS" + + source_ranges = ["${google_compute_address.foobar.address}/32"] + target_tags = ["foo"] + + allow { + protocol = "tcp" + } +} +`, network, network, network, firewall) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl new file mode 100644 index 000000000000..de0f67775ddc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl @@ -0,0 +1,913 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeForwardingRule_update(t *testing.T) { + t.Parallel() + + poolName := fmt.Sprintf("tf-%s", acctest.RandString(t, 10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_basic(poolName, ruleName), + }, + { + ResourceName: "google_compute_forwarding_rule.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeForwardingRule_update(poolName, ruleName), + }, + { + ResourceName: "google_compute_forwarding_rule.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeForwardingRule_ip(t *testing.T) { + t.Parallel() + + addrName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + poolName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + ruleName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + addressRefFieldRaw := "address" + addressRefFieldID := "id" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName, addressRefFieldID), + }, + { + ResourceName: "google_compute_forwarding_rule.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ip_address"}, // ignore ip_address because we've specified it by ID + }, + { + Config: testAccComputeForwardingRule_ip(addrName, poolName, ruleName, addressRefFieldRaw), + }, + { + ResourceName: "google_compute_forwarding_rule.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeForwardingRule_internalTcpUdpLbWithMigBackendExampleUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_internalTcpUdpLbWithMigBackendExample(context), + }, + { + ResourceName: "google_compute_forwarding_rule.google_compute_forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backend_service", "network", "subnetwork", "region"}, + }, + { + Config: testAccComputeForwardingRule_internalTcpUdpLbWithMigBackendExampleUpdate(context), + }, + { + ResourceName: "google_compute_forwarding_rule.google_compute_forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backend_service", "network", "subnetwork", "region"}, + }, + }, + }) +} +{{- end }} + +func TestAccComputeForwardingRule_networkTier(t *testing.T) { + t.Parallel() + + poolName := fmt.Sprintf("tf-%s", acctest.RandString(t, 10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_networkTier(poolName, ruleName), + }, + + { + ResourceName: "google_compute_forwarding_rule.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeForwardingRule_serviceDirectoryRegistrations(t *testing.T) { + t.Parallel() + + poolName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + ruleName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + svcDirNamespace := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_serviceDirectoryRegistrations(poolName, ruleName, svcDirNamespace, serviceName), + }, + + { + ResourceName: "google_compute_forwarding_rule.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_forwardingRuleVpcPscExample(context), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context, true), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context, false), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeForwardingRule_forwardingRulePscRecreate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_forwardingRulePscRecreate(context), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"recreate_closed_psc"}, + }, + { + Config: testAccComputeForwardingRule_forwardingRulePscRecreate(context), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"recreate_closed_psc"}, + }, + { + Config: testAccComputeForwardingRule_forwardingRulePscRecreate(context), + }, + { + ResourceName: "google_compute_forwarding_rule.default", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"recreate_closed_psc"}, + }, + }, + }) +} + +func TestAccComputeForwardingRule_forwardingRuleRegionalSteeringExampleUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_forwardingRuleRegionalSteeringExample(context), + }, + { + ResourceName: "google_compute_forwarding_rule.steering", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backend_service", "network", "subnetwork", "region"}, + }, + { + Config: testAccComputeForwardingRule_forwardingRuleRegionalSteeringExampleUpdate(context), + }, + { + ResourceName: "google_compute_forwarding_rule.steering", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backend_service", "network", "subnetwork", "region"}, + }, + }, + }) +} + +func TestAccComputeForwardingRule_forwardingRuleIpAddressIpv6(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeForwardingRule_forwardingRuleIpAddressIpv6(context), + }, + { + ResourceName: "google_compute_forwarding_rule.external", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"backend_service", "network", "subnetwork", "region"}, + }, + }, + }) +} + +func testAccComputeForwardingRule_basic(poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foo-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "foo-%s" +} + +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.foo-tp.self_link +{{- if ne $.TargetVersionName "ga" }} + labels = { + "foo" = "bar" + } +{{- end }} +} +`, poolName, ruleName) +} + +func testAccComputeForwardingRule_update(poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foo-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "foo-%s" +} + +resource "google_compute_target_pool" "bar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "bar-%s" +} + +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.bar-tp.self_link +{{- if ne $.TargetVersionName "ga" }} + labels = { + "baz" = "qux" + } +{{- end }} +} +`, poolName, poolName, ruleName) +} + +func testAccComputeForwardingRule_ip(addrName, poolName, ruleName, addressRefFieldValue string) string { + return fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "%s" +} + +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} + +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_address = google_compute_address.foo.%s + ip_protocol = "TCP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.foobar-tp.self_link +} +`, addrName, poolName, addressRefFieldValue, ruleName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeForwardingRule_internalTcpUdpLbWithMigBackendExampleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +# Internal TCP/UDP load balancer with a managed instance group backend + +# VPC +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network%{random_suffix}" + provider = google-beta + auto_create_subnetworks = false +} + +# backed subnet +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet%{random_suffix}" + provider = google-beta + ip_cidr_range = "10.0.1.0/24" + region = "europe-west1" + network = google_compute_network.ilb_network.id +} + +# forwarding rule +resource "google_compute_forwarding_rule" "google_compute_forwarding_rule" { + name = "tf-test-l4-ilb-forwarding-rule%{random_suffix}" + backend_service = google_compute_region_backend_service.default.id + provider = google-beta + region = "europe-west1" + ip_protocol = "TCP" + load_balancing_scheme = "INTERNAL" + all_ports = true + allow_global_access = false + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id +} + +# backend service +resource "google_compute_region_backend_service" "default" { + name = "tf-test-l4-ilb-backend-subnet%{random_suffix}" + provider = google-beta + region = "europe-west1" + protocol = "TCP" + load_balancing_scheme = "INTERNAL" + health_checks = [google_compute_region_health_check.default.id] + backend { + group = google_compute_region_instance_group_manager.mig.instance_group + balancing_mode = "CONNECTION" + } +} + +# instance template +resource "google_compute_instance_template" "instance_template" { + name = "tf-test-l4-ilb-mig-template%{random_suffix}" + provider = google-beta + machine_type = "e2-small" + tags = ["allow-ssh","allow-health-check"] + + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + access_config { + # add external ip to fetch packages + } + } + disk { + source_image = "debian-cloud/debian-10" + auto_delete = true + boot = true + } + + # install nginx and serve a simple web page + metadata = { + startup-script = <<-EOF1 + #! /bin/bash + set -euo pipefail + + export DEBIAN_FRONTEND=noninteractive + apt-get update + apt-get install -y nginx-light jq + + NAME=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/hostname") + IP=$(curl -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/ip") + METADATA=$(curl -f -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/attributes/?recursive=True" | jq 'del(.["startup-script"])') + + cat < /var/www/html/index.html +
+      Name: $NAME
+      IP: $IP
+      Metadata: $METADATA
+      
+ EOF + EOF1 + } + lifecycle { + create_before_destroy = true + } +} + +# health check +resource "google_compute_region_health_check" "default" { + name = "tf-test-l4-ilb-hc%{random_suffix}" + provider = google-beta + region = "europe-west1" + http_health_check { + port = "80" + } +} + +# MIG +resource "google_compute_region_instance_group_manager" "mig" { + name = "tf-test-l4-ilb-mig1%{random_suffix}" + provider = google-beta + region = "europe-west1" + version { + instance_template = google_compute_instance_template.instance_template.id + name = "primary" + } + base_instance_name = "vm" + target_size = 2 +} + +# allow all access from health check ranges +resource "google_compute_firewall" "fw_hc" { + name = "tf-test-l4-ilb-fw-allow-hc%{random_suffix}" + provider = google-beta + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["130.211.0.0/22", "35.191.0.0/16", "35.235.240.0/20"] + allow { + protocol = "tcp" + } + target_tags = ["allow-health-check"] +} + +# allow communication within the subnet +resource "google_compute_firewall" "fw_ilb_to_backends" { + name = "tf-test-l4-ilb-fw-allow-ilb-to-backends%{random_suffix}" + provider = google-beta + direction = "INGRESS" + network = google_compute_network.ilb_network.id + source_ranges = ["10.0.1.0/24"] + allow { + protocol = "tcp" + } + allow { + protocol = "udp" + } + allow { + protocol = "icmp" + } +} + +# allow SSH +resource "google_compute_firewall" "fw_ilb_ssh" { + name = "tf-test-l4-ilb-fw-ssh%{random_suffix}" + provider = google-beta + direction = "INGRESS" + network = google_compute_network.ilb_network.id + allow { + protocol = "tcp" + ports = ["22"] + } + target_tags = ["allow-ssh"] + source_ranges = ["0.0.0.0/0"] +} + +# test instance +resource "google_compute_instance" "vm_test" { + name = "tf-test-l4-ilb-test-vm%{random_suffix}" + provider = google-beta + zone = "europe-west1-b" + machine_type = "e2-small" + network_interface { + network = google_compute_network.ilb_network.id + subnetwork = google_compute_subnetwork.ilb_subnet.id + } + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } +} +`, context) +} +{{- end }} + +func testAccComputeForwardingRule_networkTier(poolName, ruleName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foobar-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "%s" +} + +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.foobar-tp.self_link + + network_tier = "STANDARD" +} +`, poolName, ruleName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeForwardingRule_serviceDirectoryRegistrations(poolName, ruleName, svcDirNamespace, serviceName string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foo-tp" { + description = "Resource created for Terraform acceptance testing" + instances = ["us-central1-a/foo", "us-central1-b/bar"] + name = "foo-%s" +} + +resource "google_compute_forwarding_rule" "foobar" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "UDP" + name = "%s" + port_range = "80-81" + target = google_compute_target_pool.foo-tp.self_link + + service_directory_registrations { + namespace = google_service_directory_namespace.examplens.namespace_id + service = google_service_directory_service.examplesvc.service_id + } +} + +resource "google_service_directory_namespace" "examplens" { + namespace_id = "%s" + location = "us-central1" +} + +resource "google_service_directory_service" "examplesvc" { + service_id = "%s" + namespace = google_service_directory_namespace.examplens.id + + metadata = { + stage = "prod" + region = "us-central1" + } +} +`, poolName, ruleName, svcDirNamespace, serviceName) +} +{{- end }} + +func testAccComputeForwardingRule_forwardingRuleVpcPscExampleUpdate(context map[string]interface{}, preventDestroy bool) string { + context["lifecycle_block"] = "" + if preventDestroy { + context["lifecycle_block"] = ` + lifecycle { + prevent_destroy = true + }` + } + + return acctest.Nprintf(` +// Forwarding rule for VPC private service connect +resource "google_compute_forwarding_rule" "default" { + name = "tf-test-psc-endpoint%{random_suffix}" + region = "us-central1" + load_balancing_scheme = "" + target = google_compute_service_attachment.producer_service_attachment.id + network = google_compute_network.consumer_net.name + ip_address = google_compute_address.consumer_address.id + allow_psc_global_access = false + %{lifecycle_block} +} + +// Consumer service endpoint + +resource "google_compute_network" "consumer_net" { + name = "tf-test-consumer-net%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "consumer_subnet" { + name = "tf-test-consumer-net%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.consumer_net.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip%{random_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.consumer_subnet.id + address_type = "INTERNAL" +} + + +// Producer service attachment + +resource "google_compute_network" "producer_net" { + name = "tf-test-producer-net%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "producer_subnet" { + name = "tf-test-producer-net%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_subnetwork" "psc_producer_subnet" { + name = "tf-test-producer-psc-net%{random_suffix}" + ip_cidr_range = "10.1.0.0/16" + region = "us-central1" + + purpose = "PRIVATE_SERVICE_CONNECT" + network = google_compute_network.producer_net.id +} + +resource "google_compute_service_attachment" "producer_service_attachment" { + name = "tf-test-producer-service%{random_suffix}" + region = "us-central1" + description = "A service attachment configured with Terraform" + + enable_proxy_protocol = true + connection_preference = "ACCEPT_AUTOMATIC" + nat_subnets = [google_compute_subnetwork.psc_producer_subnet.name] + target_service = google_compute_forwarding_rule.producer_target_service.id +} + +resource "google_compute_forwarding_rule" "producer_target_service" { + name = "tf-test-producer-forwarding-rule%{random_suffix}" + region = "us-central1" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.producer_service_backend.id + all_ports = true + network = google_compute_network.producer_net.name + subnetwork = google_compute_subnetwork.producer_subnet.name +} + +resource "google_compute_region_backend_service" "producer_service_backend" { + name = "tf-test-producer-service-backend%{random_suffix}" + region = "us-central1" + + health_checks = [google_compute_health_check.producer_service_health_check.id] +} + +resource "google_compute_health_check" "producer_service_health_check" { + name = "tf-test-producer-service-health-check%{random_suffix}" + + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "80" + } +} +`, context) +} + +func testAccComputeForwardingRule_forwardingRulePscRecreate(context map[string]interface{}) string { + + return acctest.Nprintf(` +// Forwarding rule for VPC private service connect +resource "google_compute_forwarding_rule" "default" { + name = "tf-test-psc-endpoint%{random_suffix}" + region = "us-central1" + load_balancing_scheme = "" + target = google_compute_service_attachment.producer_service_attachment.id + network = google_compute_network.consumer_net.name + ip_address = google_compute_address.consumer_address.id + allow_psc_global_access = true + recreate_closed_psc = true +} + +// Consumer service endpoint + +resource "google_compute_network" "consumer_net" { + name = "tf-test-consumer-net%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "consumer_subnet" { + name = "tf-test-consumer-net%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.consumer_net.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip%{random_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.consumer_subnet.id + address_type = "INTERNAL" +} + + +// Producer service attachment + +resource "google_compute_network" "producer_net" { + name = "tf-test-producer-net%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "producer_subnet" { + name = "tf-test-producer-net%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_subnetwork" "psc_producer_subnet" { + name = "tf-test-producer-psc-net%{random_suffix}" + ip_cidr_range = "10.1.0.0/16" + region = "us-central1" + + purpose = "PRIVATE_SERVICE_CONNECT" + network = google_compute_network.producer_net.id +} + +resource "google_compute_service_attachment" "producer_service_attachment" { + name = "tf-test-producer-service%{random_suffix}" + region = "us-central1" + description = "A service attachment configured with Terraform" + + enable_proxy_protocol = true + connection_preference = "ACCEPT_AUTOMATIC" + nat_subnets = [google_compute_subnetwork.psc_producer_subnet.name] + target_service = google_compute_forwarding_rule.producer_target_service.id +} + +resource "google_compute_forwarding_rule" "producer_target_service" { + name = "tf-test-producer-forwarding-rule%{random_suffix}" + region = "us-central1" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.producer_service_backend.id + all_ports = true + network = google_compute_network.producer_net.name + subnetwork = google_compute_subnetwork.producer_subnet.name +} + +resource "google_compute_region_backend_service" "producer_service_backend" { + name = "tf-test-producer-service-backend%{random_suffix}" + region = "us-central1" + + health_checks = [google_compute_health_check.producer_service_health_check.id] +} + +resource "google_compute_health_check" "producer_service_health_check" { + name = "tf-test-producer-service-health-check%{random_suffix}" + + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "80" + } +} +`, context) +} + +func testAccComputeForwardingRule_forwardingRuleRegionalSteeringExampleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "steering" { + name = "tf-test-steering-rule%{random_suffix}" + region = "us-central1" + ip_address = google_compute_address.basic.self_link + backend_service = google_compute_region_backend_service.external.self_link + load_balancing_scheme = "EXTERNAL" + source_ip_ranges = ["35.121.88.0/24", "36.187.239.137"] + depends_on = [google_compute_forwarding_rule.external] +} + +resource "google_compute_address" "basic" { + name = "tf-test-website-ip%{random_suffix}" + region = "us-central1" +} + +resource "google_compute_region_backend_service" "external" { + name = "tf-test-service-backend%{random_suffix}" + region = "us-central1" + load_balancing_scheme = "EXTERNAL" +} + +resource "google_compute_forwarding_rule" "external" { + name = "tf-test-external-forwarding-rule%{random_suffix}" + region = "us-central1" + ip_address = google_compute_address.basic.self_link + backend_service = google_compute_region_backend_service.external.self_link + load_balancing_scheme = "EXTERNAL" +} +`, context) +} + +func testAccComputeForwardingRule_forwardingRuleIpAddressIpv6(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_address" "basic" { + name = "tf-test-address%{random_suffix}" + region = "us-central1" + + address_type = "EXTERNAL" + ipv6_endpoint_type = "NETLB" + ip_version = "IPV6" + subnetwork = google_compute_subnetwork.subnetwork-ipv6.id +} + +resource "google_compute_region_backend_service" "external" { + name = "tf-test-backend%{random_suffix}" + region = "us-central1" + load_balancing_scheme = "EXTERNAL" +} + +resource "google_compute_forwarding_rule" "external" { + name = "tf-test-forwarding-rule%{random_suffix}" + region = "us-central1" + ip_address = google_compute_address.basic.self_link + backend_service = google_compute_region_backend_service.external.self_link + load_balancing_scheme = "EXTERNAL" +} + +resource "google_compute_subnetwork" "subnetwork-ipv6" { + name = "tf-test-subnetwork%{random_suffix}" + ip_cidr_range = "10.0.0.0/22" + region = "us-central1" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.custom-test.id +} + +resource "google_compute_network" "custom-test" { + name = "tf-test-network%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_global_address_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_address_test.go new file mode 100644 index 000000000000..fb846d6525dc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_address_test.go @@ -0,0 +1,76 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeGlobalAddress_ipv6(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeGlobalAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeGlobalAddress_ipv6(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_global_address.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeGlobalAddress_internal(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeGlobalAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeGlobalAddress_internal(acctest.RandString(t, 10), acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_global_address.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeGlobalAddress_ipv6(addressName string) string { + return fmt.Sprintf(` +resource "google_compute_global_address" "foobar" { + name = "tf-test-address-%s" + description = "Created for Terraform acceptance testing" + ip_version = "IPV6" +} +`, addressName) +} + +func testAccComputeGlobalAddress_internal(networkName, addressName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tf-test-address-%s" +} + +resource "google_compute_global_address" "foobar" { + name = "tf-test-address-%s" + address_type = "INTERNAL" + purpose = "VPC_PEERING" + prefix_length = 24 + address = "172.20.181.0" + network = google_compute_network.foobar.self_link +} +`, networkName, addressName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl new file mode 100644 index 000000000000..8e0191a8b4e8 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl @@ -0,0 +1,614 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeGlobalForwardingRule_updateTarget(t *testing.T) { + t.Parallel() + + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + proxy := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + proxyUpdated := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeGlobalForwardingRule_httpProxy(fr, "proxy", proxy, proxyUpdated, backend, hc, urlmap), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr( + "google_compute_global_forwarding_rule.forwarding_rule", "target", regexp.MustCompile(proxy + "$")), + ), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target"}, + }, + { + Config: testAccComputeGlobalForwardingRule_httpProxy(fr, "proxy2", proxy, proxyUpdated, backend, hc, urlmap), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr( + "google_compute_global_forwarding_rule.forwarding_rule", "target", regexp.MustCompile(proxyUpdated + "$")), + ), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target"}, + }, + }, + }) +} + +func TestAccComputeGlobalForwardingRule_ipv6(t *testing.T) { + t.Parallel() + + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + proxy := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeGlobalForwardingRule_ipv6(fr, proxy, backend, hc, urlmap), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_compute_global_forwarding_rule.forwarding_rule", "ip_version", "IPV6"), + ), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeGlobalForwardingRule_labels(t *testing.T) { + t.Parallel() + + fr := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + proxy := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + urlmap := fmt.Sprintf("forwardrule-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeGlobalForwardingRule_labels(fr, proxy, backend, hc, urlmap), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target", "labels", "terraform_labels"}, + }, + { + Config: testAccComputeGlobalForwardingRule_labelsUpdated(fr, proxy, backend, hc, urlmap), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target", "labels", "terraform_labels"}, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeGlobalForwardingRule_internalLoadBalancing(t *testing.T) { + t.Parallel() + + fr := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + proxy := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + urlmap := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + it := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeGlobalForwardingRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeGlobalForwardingRule_internalLoadBalancing(fr, proxy, backend, hc, urlmap, igm, it), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target"}, + }, + { + Config: testAccComputeGlobalForwardingRule_internalLoadBalancingUpdate(fr, proxy, backend, hc, urlmap, igm, it), + }, + { + ResourceName: "google_compute_global_forwarding_rule.forwarding_rule", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"port_range", "target"}, + }, + }, + }) +} +{{- end }} + +func testAccComputeGlobalForwardingRule_httpProxy(fr, targetProxy, proxy, proxy2, backend, hc, urlmap string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "%s" + port_range = "80" + target = google_compute_target_http_proxy.%s.self_link +} + +resource "google_compute_target_http_proxy" "proxy" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_url_map.url_map.self_link +} + +resource "google_compute_target_http_proxy" "proxy2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_url_map.url_map.self_link +} + +resource "google_compute_backend_service" "backend" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "url_map" { + name = "%s" + default_service = google_compute_backend_service.backend.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_backend_service.backend.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_backend_service.backend.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.backend.self_link + } +} +`, fr, targetProxy, proxy, proxy2, backend, hc, urlmap) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeGlobalForwardingRule_labels(fr, proxy, backend, hc, urlmap string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + name = "%s" + target = google_compute_target_http_proxy.proxy.self_link + port_range = "80" + + labels = { + my-label = "a-value" + a-different-label = "my-second-label-value" + } +} + +resource "google_compute_target_http_proxy" "proxy" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_url_map.urlmap.self_link +} + +resource "google_compute_backend_service" "backend" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "urlmap" { + name = "%s" + default_service = google_compute_backend_service.backend.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_backend_service.backend.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_backend_service.backend.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.backend.self_link + } +} +`, fr, proxy, backend, hc, urlmap) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeGlobalForwardingRule_labelsUpdated(fr, proxy, backend, hc, urlmap string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + name = "%s" + target = google_compute_target_http_proxy.proxy.self_link + port_range = "80" + + labels = { + my-label = "a-new-value" + a-different-label = "my-third-label-value" + } +} + +resource "google_compute_target_http_proxy" "proxy" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_url_map.urlmap.self_link +} + +resource "google_compute_backend_service" "backend" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "urlmap" { + name = "%s" + default_service = google_compute_backend_service.backend.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_backend_service.backend.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_backend_service.backend.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.backend.self_link + } +} +`, fr, proxy, backend, hc, urlmap) +} +{{- end }} + +func testAccComputeGlobalForwardingRule_ipv6(fr, proxy, backend, hc, urlmap string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + description = "Resource created for Terraform acceptance testing" + ip_protocol = "TCP" + name = "%s" + port_range = "80" + target = google_compute_target_http_proxy.proxy.self_link + ip_version = "IPV6" +} + +resource "google_compute_target_http_proxy" "proxy" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_url_map.urlmap.self_link +} + +resource "google_compute_backend_service" "backend" { + name = "%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "urlmap" { + name = "%s" + default_service = google_compute_backend_service.backend.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_backend_service.backend.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_backend_service.backend.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.backend.self_link + } +} +`, fr, proxy, backend, hc, urlmap) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeGlobalForwardingRule_internalLoadBalancing(fr, proxy, backend, hc, urlmap, igm, it string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + name = "%s" + target = google_compute_target_http_proxy.default.self_link + port_range = "8080" + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + ip_address = "0.0.0.0" + metadata_filters { + filter_match_criteria = "MATCH_ANY" + filter_labels { + name = "PLANET" + value = "NEPTUNE" + } + } +} + +resource "google_compute_target_http_proxy" "default" { + name = "%s" + description = "a description" + url_map = google_compute_url_map.default.self_link +} + +resource "google_compute_backend_service" "default" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + + backend { + group = google_compute_instance_group_manager.igm.instance_group + balancing_mode = "RATE" + capacity_scaler = 0.4 + max_rate_per_instance = 50 + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} + +resource "google_compute_url_map" "default" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.default.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = google_compute_backend_service.default.self_link + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.default.self_link + } + } +} + +data "google_compute_image" "debian_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_group_manager" "igm" { + name = "%s" + version { + instance_template = google_compute_instance_template.instance_template.self_link + name = "primary" + } + base_instance_name = "tf-test-internal-igm" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "instance_template" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.debian_image.self_link + auto_delete = true + boot = true + } +} +`, fr, proxy, backend, hc, urlmap, igm, it) +} + +func testAccComputeGlobalForwardingRule_internalLoadBalancingUpdate(fr, proxy, backend, hc, urlmap, igm, it string) string { + return fmt.Sprintf(` +resource "google_compute_global_forwarding_rule" "forwarding_rule" { + name = "%s" + target = google_compute_target_http_proxy.default.self_link + port_range = "8080" + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + ip_address = "0.0.0.0" + metadata_filters { + filter_match_criteria = "MATCH_ANY" + filter_labels { + name = "PLANET" + value = "NEPTUNE" + } + filter_labels { + name = "PLANET" + value = "JUPITER" + } + } + metadata_filters { + filter_match_criteria = "MATCH_ALL" + filter_labels { + name = "STAR" + value = "PROXIMA CENTAURI" + } + filter_labels { + name = "SPECIES" + value = "ALIEN" + } + } +} + +resource "google_compute_target_http_proxy" "default" { + name = "%s" + description = "a description" + url_map = google_compute_url_map.default.self_link +} + +resource "google_compute_backend_service" "default" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "INTERNAL_SELF_MANAGED" + + backend { + group = google_compute_instance_group_manager.igm.instance_group + balancing_mode = "RATE" + capacity_scaler = 0.4 + max_rate_per_instance = 50 + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} + +resource "google_compute_url_map" "default" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.default.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = google_compute_backend_service.default.self_link + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.default.self_link + } + } +} + +data "google_compute_image" "debian_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_group_manager" "igm" { + name = "%s" + version { + instance_template = google_compute_instance_template.instance_template.self_link + name = "primary" + } + base_instance_name = "tf-test-internal-igm" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "instance_template" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.debian_image.self_link + auto_delete = true + boot = true + } +} +`, fr, proxy, backend, hc, urlmap, igm, it) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_global_network_endpoint_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_network_endpoint_test.go new file mode 100644 index 000000000000..1190fb6f9351 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_network_endpoint_test.go @@ -0,0 +1,88 @@ +package compute_test +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeGlobalNetworkEndpoint_networkEndpointsBasic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "default_port": 90, + "modified_port": 100, + } + negId := fmt.Sprintf("projects/%s/global/networkEndpointGroups/neg-%s", + envvar.GetTestProjectFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one endpoint + Config: testAccComputeGlobalNetworkEndpoint_networkEndpointsBasic(context), + }, + { + ResourceName: "google_compute_global_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Force-recreate old endpoint + Config: testAccComputeGlobalNetworkEndpoint_networkEndpointsModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkEndpointWithPortsDestroyed(t, negId, "90"), + ), + }, + { + ResourceName: "google_compute_global_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // delete all endpoints + Config: testAccComputeGlobalNetworkEndpoint_noNetworkEndpoints(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkEndpointWithPortsDestroyed(t, negId, "100"), + ), + }, + }, + }) +} + +func testAccComputeGlobalNetworkEndpoint_networkEndpointsBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_global_network_endpoint" "default" { + global_network_endpoint_group = google_compute_global_network_endpoint_group.neg.id + + ip_address = "8.8.8.8" + port = google_compute_global_network_endpoint_group.neg.default_port +} +`, context) + testAccComputeGlobalNetworkEndpoint_noNetworkEndpoints(context) +} + +func testAccComputeGlobalNetworkEndpoint_networkEndpointsModified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_global_network_endpoint" "default" { + global_network_endpoint_group = google_compute_global_network_endpoint_group.neg.name + + ip_address = "8.8.8.8" + port = "%{modified_port}" +} +`, context) + testAccComputeGlobalNetworkEndpoint_noNetworkEndpoints(context) +} + +func testAccComputeGlobalNetworkEndpoint_noNetworkEndpoints(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_global_network_endpoint_group" "neg" { + name = "neg-%{random_suffix}" + default_port = "%{default_port}" + network_endpoint_type = "INTERNET_IP_PORT" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl new file mode 100644 index 000000000000..e0b18dab24a6 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl @@ -0,0 +1,384 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeHealthCheck_tcp_update(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_tcp(hckName), + }, + { + ResourceName: "google_compute_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeHealthCheck_tcp_update(hckName), + }, + { + ResourceName: "google_compute_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeHealthCheck_ssl_port_spec(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_ssl_fixed_port(hckName), + }, + { + ResourceName: "google_compute_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeHealthCheck_http_port_spec(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_http_port_spec(hckName), + ExpectError: regexp.MustCompile("Error in http_health_check: Must specify port_name when using USE_NAMED_PORT as port_specification."), + }, + { + Config: testAccComputeHealthCheck_http_named_port(hckName), + }, + }, + }) +} + +func TestAccComputeHealthCheck_https_serving_port(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_https_serving_port(hckName), + }, + { + ResourceName: "google_compute_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeHealthCheck_typeTransition(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_https(hckName), + }, + { + Config: testAccComputeHealthCheck_http(hckName), + }, + { + Config: testAccComputeHealthCheck_ssl(hckName), + }, + { + Config: testAccComputeHealthCheck_tcp(hckName), + }, + { + Config: testAccComputeHealthCheck_http2(hckName), + }, + { + Config: testAccComputeHealthCheck_https(hckName), + }, + }, + }) +} + +func TestAccComputeHealthCheck_tcpAndSsl_shouldFail(t *testing.T) { + // No HTTP interactions, is a unit test + acctest.SkipIfVcr(t) + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName), + ExpectError: regexp.MustCompile("only one of\n`grpc_health_check,http2_health_check,http_health_check,https_health_check,ssl_health_check,tcp_health_check`\ncan be specified, but `ssl_health_check,tcp_health_check` were specified"), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeHealthCheck_logConfigDisabled(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_logConfigDisabled(hckName), + }, + { + ResourceName: "google_compute_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ end }} + +func testAccComputeHealthCheck_tcp(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + tcp_health_check { + port = 443 + } +} +`, hckName) +} + +func testAccComputeHealthCheck_tcp_update(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + healthy_threshold = 10 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 10 + tcp_health_check { + port = "8080" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_ssl(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + ssl_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_ssl_fixed_port(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + ssl_health_check { + port = "443" + port_specification = "USE_FIXED_PORT" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_http(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port = "80" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_http_port_spec(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port_specification = "USE_NAMED_PORT" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_http_named_port(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port_name = "http" + port_specification = "USE_NAMED_PORT" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_https(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + https_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_https_serving_port(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + https_health_check { + port_specification = "USE_SERVING_PORT" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_http2(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http2_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_tcpAndSsl_shouldFail(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + + tcp_health_check { + port = 443 + } + ssl_health_check { + port = 443 + } +} +`, hckName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeHealthCheck_logConfigDisabled(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http2_health_check { + port = "443" + } + log_config { + enable = false + } +} +`, hckName) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_http_health_check_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_http_health_check_test.go.tmpl new file mode 100644 index 000000000000..e7a73fcf10b8 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_http_health_check_test.go.tmpl @@ -0,0 +1,128 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeHttpHealthCheck_update(t *testing.T) { + t.Parallel() + + var healthCheck compute.HttpHealthCheck + + hhckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHttpHealthCheck_update1(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + t, "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/not_default", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 2, 2, &healthCheck), + ), + }, + { + Config: testAccComputeHttpHealthCheck_update2(hhckName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeHttpHealthCheckExists( + t, "google_compute_http_health_check.foobar", &healthCheck), + testAccCheckComputeHttpHealthCheckRequestPath( + "/", &healthCheck), + testAccCheckComputeHttpHealthCheckThresholds( + 10, 10, &healthCheck), + ), + }, + }, + }) +} + +func testAccCheckComputeHttpHealthCheckExists(t *testing.T, n string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No name is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).HttpHealthChecks.Get( + config.Project, rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("HttpHealthCheck not found") + } + + *healthCheck = *found + + return nil + } +} + +func testAccCheckComputeHttpHealthCheckRequestPath(path string, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.RequestPath != path { + return fmt.Errorf("RequestPath doesn't match: expected %s, got %s", path, healthCheck.RequestPath) + } + + return nil + } +} + +func testAccCheckComputeHttpHealthCheckThresholds(healthy, unhealthy int64, healthCheck *compute.HttpHealthCheck) resource.TestCheckFunc { + return func(s *terraform.State) error { + if healthCheck.HealthyThreshold != healthy { + return fmt.Errorf("HealthyThreshold doesn't match: expected %d, got %d", healthy, healthCheck.HealthyThreshold) + } + + if healthCheck.UnhealthyThreshold != unhealthy { + return fmt.Errorf("UnhealthyThreshold doesn't match: expected %d, got %d", unhealthy, healthCheck.UnhealthyThreshold) + } + + return nil + } +} + +func testAccComputeHttpHealthCheck_update1(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + request_path = "/not_default" +} +`, hhckName) +} + +func testAccComputeHttpHealthCheck_update2(hhckName string) string { + return fmt.Sprintf(` +resource "google_compute_http_health_check" "foobar" { + name = "%s" + description = "Resource updated for Terraform acceptance testing" + healthy_threshold = 10 + unhealthy_threshold = 10 +} +`, hhckName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_image_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_image_test.go.tmpl new file mode 100644 index 000000000000..3df9f17165f0 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_image_test.go.tmpl @@ -0,0 +1,524 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeImage_withLicense(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_license("image-test-" + acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_image.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeImage_update(t *testing.T) { + t.Parallel() + + var image compute.Image + + name := "image-test-" + acctest.RandString(t, 10) + // Only labels supports an update + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_basic(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + t, "google_compute_image.foobar", &image), + testAccCheckComputeImageContainsLabel(&image, "my-label", "my-label-value"), + ), + }, + { + Config: testAccComputeImage_update(name), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + t, "google_compute_image.foobar", &image), + testAccCheckComputeImageDoesNotContainLabel(&image, "my-label"), + testAccCheckComputeImageContainsLabel(&image, "empty-label", "oh-look-theres-a-label-now"), + testAccCheckComputeImageContainsLabel(&image, "new-field", "only-shows-up-when-updated"), + ), + }, + { + ResourceName: "google_compute_image.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"raw_disk", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeImage_basedondisk(t *testing.T) { + t.Parallel() + + var image compute.Image + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_basedondisk(acctest.RandString(t, 10), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + t, "google_compute_image.foobar", &image), + testAccCheckComputeImageHasSourceType(&image), + ), + }, + { + ResourceName: "google_compute_image.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeImage_sourceImage(t *testing.T) { + t.Parallel() + + var image compute.Image + imageName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_sourceImage(imageName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + t, "google_compute_image.foobar", &image), + testAccCheckComputeImageHasSourceType(&image), + ), + }, + { + ResourceName: "google_compute_image.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeImage_sourceSnapshot(t *testing.T) { + t.Parallel() + + var image compute.Image + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + snapshotName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + imageName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_sourceSnapshot(diskName, snapshotName, imageName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + t, "google_compute_image.foobar", &image), + testAccCheckComputeImageHasSourceType(&image), + ), + }, + { + ResourceName: "google_compute_image.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckComputeImageExists(t *testing.T, n string, image *compute.Image) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No name is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).Images.Get( + config.Project, rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("Image not found") + } + + *image = *found + + return nil + } +} + +func TestAccComputeImage_resolveImage(t *testing.T) { + t.Parallel() + + var image compute.Image + rand := acctest.RandString(t, 10) + name := fmt.Sprintf("test-image-%s", rand) + fam := fmt.Sprintf("test-image-family-%s", rand) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_resolving(name, fam), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + t, "google_compute_image.foobar", &image), + testAccCheckComputeImageResolution(t, "google_compute_image.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeImage_imageEncryptionKey(t *testing.T) { + t.Parallel() + + kmsKey := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + kmsKeyName := tpgresource.GetResourceNameFromSelfLink(kmsKey.CryptoKey.Name) + kmsRingName := tpgresource.GetResourceNameFromSelfLink(kmsKey.KeyRing.Name) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeImage_imageEncryptionKey(kmsRingName, kmsKeyName, acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_image.image", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckComputeImageResolution(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + project := config.Project + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No image name is set") + } + if rs.Primary.Attributes["family"] == "" { + return fmt.Errorf("No image family is set") + } + if rs.Primary.Attributes["self_link"] == "" { + return fmt.Errorf("No self_link is set") + } + + name := rs.Primary.Attributes["name"] + family := rs.Primary.Attributes["family"] + link := rs.Primary.Attributes["self_link"] + + latestDebian, err := config.NewComputeClient(config.UserAgent).Images.GetFromFamily("debian-cloud", "debian-11").Do() + if err != nil { + return fmt.Errorf("Error retrieving latest debian: %s", err) + } + + images := map[string]string{ + "family/" + latestDebian.Family: "projects/debian-cloud/global/images/family/" + latestDebian.Family, + "projects/debian-cloud/global/images/" + latestDebian.Name: "projects/debian-cloud/global/images/" + latestDebian.Name, + latestDebian.Family: "projects/debian-cloud/global/images/family/" + latestDebian.Family, + latestDebian.Name: "projects/debian-cloud/global/images/" + latestDebian.Name, + latestDebian.SelfLink: latestDebian.SelfLink, + + "global/images/" + name: "global/images/" + name, + "global/images/family/" + family: "global/images/family/" + family, + name: "global/images/" + name, + family: "global/images/family/" + family, + "family/" + family: "global/images/family/" + family, + project + "/" + name: "projects/" + project + "/global/images/" + name, + project + "/" + family: "projects/" + project + "/global/images/family/" + family, + link: link, + } + + for input, expectation := range images { + result, err := tpgcompute.ResolveImage(config, project, input, config.UserAgent) + if err != nil { + return fmt.Errorf("Error resolving input %s to image: %+v\n", input, err) + } + if result != expectation { + return fmt.Errorf("Expected input '%s' to resolve to '%s', it resolved to '%s' instead.\n", input, expectation, result) + } + } + return nil + } +} + +func testAccCheckComputeImageContainsLabel(image *compute.Image, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + v, ok := image.Labels[key] + if !ok { + return fmt.Errorf("Expected label with key '%s' not found", key) + } + if v != value { + return fmt.Errorf("Incorrect label value for key '%s': expected '%s' but found '%s'", key, value, v) + } + return nil + } +} + +func testAccCheckComputeImageDoesNotContainLabel(image *compute.Image, key string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if v, ok := image.Labels[key]; ok { + return fmt.Errorf("Expected no label for key '%s' but found one with value '%s'", key, v) + } + + return nil + } +} + +func testAccCheckComputeImageHasSourceType(image *compute.Image) resource.TestCheckFunc { + return func(s *terraform.State) error { + if image.SourceType == "" { + return fmt.Errorf("No source disk") + } + return nil + } +} + +func testAccComputeImage_resolving(name, family string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "foobar" { + name = "%s" + family = "%s" + source_disk = google_compute_disk.foobar.self_link +} +`, name, name, family) +} + +func testAccComputeImage_basic(name string) string { + return fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "%s" + description = "description-test" + family = "family-test" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + labels = { + my-label = "my-label-value" + } +} +`, name) +} + +func testAccComputeImage_license(name string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "disk-test-%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "foobar" { + name = "%s" + description = "description-test" + source_disk = google_compute_disk.foobar.self_link + + labels = { + my-label = "my-label-value" + } + licenses = [ + "https://www.googleapis.com/compute/v1/projects/debian-cloud/global/licenses/debian-11-bullseye", + ] +} +`, name, name) +} + +func testAccComputeImage_update(name string) string { + return fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "%s" + description = "description-test" + family = "family-test" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + labels = { + empty-label = "oh-look-theres-a-label-now" + new-field = "only-shows-up-when-updated" + } +} +`, name) +} + +func testAccComputeImage_basedondisk(diskName, imageName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "disk-test-%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "foobar" { + name = "image-test-%s" + source_disk = google_compute_disk.foobar.self_link +} +`, diskName, imageName) +} + +func testAccComputeImage_sourceImage(imageName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_image" "foobar" { + name = "%s" + source_image = data.google_compute_image.my_image.self_link +} +`, imageName) +} + +func testAccComputeImage_sourceSnapshot(diskName, snapshotName, imageName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "foobar" { + name = "%s" + source_disk = google_compute_disk.foobar.name + zone = "us-central1-a" +} + +resource "google_compute_image" "foobar" { + name = "%s" + source_snapshot = google_compute_snapshot.foobar.self_link +} +`, diskName, snapshotName, imageName) +} + +func testAccComputeImage_imageEncryptionKey(kmsRingName, kmsKeyName, suffix string) string { + return fmt.Sprintf(` +data "google_kms_key_ring" "ring" { + name = "%s" + location = "us-central1" +} + +data "google_kms_crypto_key" "key" { + name = "%s" + key_ring = data.google_kms_key_ring.ring.id +} + +resource "google_service_account" "test" { + account_id = "tf-test-sa-%s" + display_name = "KMS Ops Account" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = data.google_kms_crypto_key.key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${google_service_account.test.email}" +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_image" "image" { + name = "tf-test-image-%s" + source_image = data.google_compute_image.debian.self_link + image_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} +`, kmsRingName, kmsKeyName, suffix, suffix) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl new file mode 100644 index 000000000000..7c177d611d58 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl @@ -0,0 +1,3104 @@ +package compute + +import ( + "context" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/mitchellh/hashstructure" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +var ( + bootDiskKeys = []string{ + "boot_disk.0.auto_delete", + "boot_disk.0.device_name", + "boot_disk.0.disk_encryption_key_raw", + "boot_disk.0.kms_key_self_link", + "boot_disk.0.initialize_params", + "boot_disk.0.mode", + "boot_disk.0.source", + } + + initializeParamsKeys = []string{ + "boot_disk.0.initialize_params.0.size", + "boot_disk.0.initialize_params.0.type", + "boot_disk.0.initialize_params.0.image", + "boot_disk.0.initialize_params.0.labels", + "boot_disk.0.initialize_params.0.resource_manager_tags", + "boot_disk.0.initialize_params.0.provisioned_iops", + "boot_disk.0.initialize_params.0.provisioned_throughput", + "boot_disk.0.initialize_params.0.enable_confidential_compute", + } + + schedulingKeys = []string{ + "scheduling.0.on_host_maintenance", + "scheduling.0.automatic_restart", + "scheduling.0.preemptible", + "scheduling.0.node_affinities", + "scheduling.0.min_node_cpus", + "scheduling.0.provisioning_model", + "scheduling.0.instance_termination_action", +{{- if ne $.TargetVersionName "ga" }} + "scheduling.0.max_run_duration", + "scheduling.0.maintenance_interval", + "scheduling.0.on_instance_stop_action", +{{- end }} + "scheduling.0.local_ssd_recovery_timeout", + } + + shieldedInstanceConfigKeys = []string{ + "shielded_instance_config.0.enable_secure_boot", + "shielded_instance_config.0.enable_vtpm", + "shielded_instance_config.0.enable_integrity_monitoring", + } +) + +// network_interface.[d].network_ip can only change when subnet/network +// is also changing. Validate that if network_ip is changing this scenario +// holds up to par. +func forceNewIfNetworkIPNotUpdatable(ctx context.Context, d *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return forceNewIfNetworkIPNotUpdatableFunc(d) +} + +func forceNewIfNetworkIPNotUpdatableFunc(d tpgresource.TerraformResourceDiff) error { + oldCount, newCount := d.GetChange("network_interface.#") + if oldCount.(int) != newCount.(int) { + return nil + } + + for i := 0; i < newCount.(int); i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + networkKey := prefix + ".network" + subnetworkKey := prefix + ".subnetwork" + subnetworkProjectKey := prefix + ".subnetwork_project" + networkIPKey := prefix + ".network_ip" + if d.HasChange(networkIPKey) { + if !d.HasChange(networkKey) && !d.HasChange(subnetworkKey) && !d.HasChange(subnetworkProjectKey) { + if err := d.ForceNew(networkIPKey); err != nil { + return err + } + } + } + } + + return nil +} + +// User may specify AUTOMATIC using any case; the API will accept it and return an empty string. +func ComputeInstanceMinCpuPlatformEmptyOrAutomaticDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + old = strings.ToLower(old) + new = strings.ToLower(new) + defaultVal := "automatic" + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) +} + +func ResourceComputeInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceCreate, + Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, + Delete: resourceComputeInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeInstanceImportState, + }, + + SchemaVersion: 6, + MigrateState: ResourceComputeInstanceMigrateState, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + // A compute instance is more or less a superset of a compute instance + // template. Please attempt to maintain consistency with the + // resource_compute_instance_template schema when updating this one. + Schema: map[string]*schema.Schema{ + "boot_disk": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Description: `The boot disk for the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Default: true, + ForceNew: true, + Description: `Whether the disk will be auto-deleted when the instance is deleted.`, + }, + + "device_name": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, + Description: `Name with which attached disk will be accessible under /dev/disk/by-id/`, + }, + + "disk_encryption_key_raw": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + ForceNew: true, + ConflictsWith: []string{"boot_disk.0.kms_key_self_link"}, + Sensitive: true, + Description: `A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, + }, + + "disk_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.`, + }, + + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + ForceNew: true, + ConflictsWith: []string{"boot_disk.0.disk_encryption_key_raw"}, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Computed: true, + Description: `The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, + }, + + "initialize_params": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Parameters with which a disk was created alongside the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + Description: `The size of the image in gigabytes.`, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, + Description: `The Google Compute Engine disk type. Such as pd-standard, pd-ssd or pd-balanced.`, + }, + + "image": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, + DiffSuppressFunc: DiskImageDiffSuppress, + Description: `The image from which this disk was initialised.`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, + Description: `A set of key/value label pairs assigned to the disk.`, + }, + + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + AtLeastOneOf: initializeParamsKeys, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + + "provisioned_iops": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, + Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle.`, + }, + + "provisioned_throughput": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + Computed: true, + ForceNew: true, + Description: `Indicates how much throughput to provision for the disk. This sets the number of throughput mb per second that the disk can handle.`, + }, + + "enable_confidential_compute": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: initializeParamsKeys, + ForceNew: true, + Description: `A flag to enable confidential compute mode on boot disk`, + }, + }, + }, + }, + + "mode": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + ForceNew: true, + Default: "READ_WRITE", + ValidateFunc: validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), + Description: `Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE".`, + }, + + "source": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: bootDiskKeys, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"boot_disk.initialize_params"}, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the disk attached to this instance.`, + }, + }, + }, + }, + + "machine_type": { + Type: schema.TypeString, + Required: true, + Description: `The machine type to create.`, + DiffSuppressFunc: tpgresource.CompareResourceNames, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the instance. One of name or self_link must be provided.`, + }, + + "network_interface": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `The networks attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the network attached to this interface.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the subnetwork attached to this interface.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "network_attachment": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}.`, + }, + {{- end }} + + "subnetwork_project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The project in which the subnetwork belongs.`, + }, + + "network_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The private IP address assigned to the instance.`, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the interface`, + }, + "nic_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"GVNIC", "VIRTIO_NET"}, false), + Description: `The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET`, + }, + "access_config": { + Type: schema.TypeList, + Optional: true, + Description: `Access configurations, i.e. IPs via which this instance can be accessed via the Internet.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The IP address that is be 1:1 mapped to the instance's network ip.`, + }, + + "network_tier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The networking tier used for configuring this instance. One of PREMIUM or STANDARD.`, + }, + + "public_ptr_domain_name": { + Type: schema.TypeString, + Optional: true, + Description: `The DNS domain name for the public PTR record.`, + }, + {{- if ne $.TargetVersionName "ga" }} + "security_policy": { + Type: schema.TypeString, + Computed: true, + Description: `A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.`, + }, + {{- end }} + }, + }, + }, + + "alias_ip_range": { + Type: schema.TypeList, + Optional: true, + Description: `An array of alias IP ranges for this network interface.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + Description: `The IP CIDR range represented by this alias IP range.`, + }, + "subnetwork_range_name": { + Type: schema.TypeString, + Optional: true, + Description: `The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range.`, + }, + }, + }, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"IPV4_ONLY", "IPV4_IPV6", ""}, false), + Description: `The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used.`, + }, + + "ipv6_access_type": { + Type: schema.TypeString, + Computed: true, + Description: `One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork.`, + }, + + "ipv6_access_config": { + Type: schema.TypeList, + Optional: true, + Description: `An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_tier": { + Type: schema.TypeString, + Required: true, + Description: `The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6`, + }, + "public_ptr_domain_name": { + Type: schema.TypeString, + Optional: true, + Description: `The domain name to be used when creating DNSv6 records for the external IPv6 ranges.`, + }, + "external_ipv6": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. To use a static external IP address, it must be unused and in the same region as the instance's zone. If not specified, Google Cloud will automatically assign an external IPv6 address from the instance's subnetwork.`, + }, + "external_ipv6_prefix_length": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The prefix length of the external IPv6 range.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of this access configuration. In ipv6AccessConfigs, the recommended name is External IPv6.`, + }, + {{- if ne $.TargetVersionName "ga" }} + "security_policy": { + Type: schema.TypeString, + Computed: true, + Description: `A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.`, + }, + {{- end }} + }, + }, + }, + + "internal_ipv6_prefix_length": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: `The prefix length of the primary internal IPv6 range.`, + }, + + "ipv6_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork.`, + }, + + "queue_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "security_policy": { + Type: schema.TypeString, + Optional: true, + Description: `A full or partial URL to a security policy to add to this instance. If this field is set to an empty string it will remove the associated security policy.`, + }, + {{- end }} + }, + }, + }, + "network_performance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Configures network performance settings for the instance. If not specified, the instance will be created with its default network performance configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TIER_1", "DEFAULT"}, false), + Description: `The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT`, + }, + }, + }, + }, + "allow_stopping_for_update": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, allows Terraform to stop the instance to update its properties. If you try to update a property that requires stopping the instance without setting this field, the update will fail.`, + }, + + "attached_disk": { + Type: schema.TypeList, + Optional: true, + Description: `List of disks attached to the instance`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the disk attached to this instance.`, + }, + + "device_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Name with which the attached disk is accessible under /dev/disk/by-id/`, + }, + + "mode": { + Type: schema.TypeString, + Optional: true, + Default: "READ_WRITE", + ValidateFunc: validation.StringInSlice([]string{"READ_WRITE", "READ_ONLY"}, false), + Description: `Read/write mode for the disk. One of "READ_ONLY" or "READ_WRITE".`, + }, + + "disk_encryption_key_raw": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: `A 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, + }, + + "kms_key_self_link": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Computed: true, + Description: `The self_link of the encryption key that is stored in Google Cloud KMS to encrypt this disk. Only one of kms_key_self_link and disk_encryption_key_raw may be set.`, + }, + + "disk_encryption_key_sha256": { + Type: schema.TypeString, + Computed: true, + Description: `The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource.`, + }, + }, + }, + }, + + "can_ip_forward": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether sending and receiving of packets with non-matching source or destination IPs is allowed.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: `A brief description of the resource.`, + }, + + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether deletion protection is enabled on this instance.`, + }, + + "enable_display": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether the instance has virtual displays enabled.`, + }, + + "guest_accelerator": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + ConfigMode: schema.SchemaConfigModeAttr, + Description: `List of the type and count of accelerator cards attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of the guest accelerator cards exposed to this instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The accelerator type resource exposed to this instance. E.g. nvidia-tesla-k80.`, + }, + }, + }, + }, + + "params": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Stores additional params passed with the request, but not persisted as part of resource payload.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + }, + }, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value label pairs assigned to the instance. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Metadata key/value pairs made available within the instance.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "partner_metadata": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: ComparePartnerMetadataDiff, + DiffSuppressOnRefresh: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Partner Metadata Map made available within the instance.`, + }, + {{- end }} + + "metadata_startup_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Metadata startup scripts made available within the instance.`, + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The minimum CPU platform specified for the VM instance.`, + DiffSuppressFunc: ComputeInstanceMinCpuPlatformEmptyOrAutomaticDiffSuppress, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If self_link is provided, this value is ignored. If neither self_link nor project are provided, the provider project is used.`, + }, + + "scheduling": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `The scheduling strategy being used by the instance.`, + Elem: &schema.Resource{ + // !!! IMPORTANT !!! + // We have a custom diff function for the scheduling block due to issues with Terraform's + // diff on schema.Set. If changes are made to this block, they must be reflected in that + // method. See schedulingHasChangeWithoutReboot in compute_instance_helpers.go + Schema: map[string]*schema.Schema{ + "on_host_maintenance": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingKeys, + Description: `Describes maintenance behavior for the instance. One of MIGRATE or TERMINATE,`, + }, + + "automatic_restart": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingKeys, + Default: true, + Description: `Specifies if the instance should be restarted if it was terminated by Compute Engine (not a user).`, + }, + + "preemptible": { + Type: schema.TypeBool, + Optional: true, + Default: false, + AtLeastOneOf: schedulingKeys, + ForceNew: true, + Description: `Whether the instance is preemptible.`, + }, + + "node_affinities": { + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: schedulingKeys, + Elem: instanceSchedulingNodeAffinitiesElemSchema(), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), + Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, + }, + + "min_node_cpus": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: schedulingKeys, + }, + + "provisioning_model": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + AtLeastOneOf: schedulingKeys, + Description: `Whether the instance is spot. If this is set as SPOT.`, + }, + + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: schedulingKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "max_run_duration" : { + Type: schema.TypeList, + Optional: true, + Description: `The timeout for new network connections to hosts.`, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + "on_instance_stop_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: `Defines the behaviour for instances with the instance_termination_action.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "discard_local_ssd": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the contents of any attached Local SSD disks will be discarded.`, + Default: false, + ForceNew: true, + }, + }, + }, + }, + "maintenance_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: schedulingKeys, + Description: `Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC`, + }, +{{- end }} + "local_ssd_recovery_timeout" : { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the maximum amount of time a Local Ssd Vm should wait while + recovery of the Local Ssd state is attempted. Its value should be in + between 0 and 168 hours with hour granularity and the default value being 1 + hour.`, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + }, + }, + }, + + "scratch_disk": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The scratch disks attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Name with which the attached disk is accessible under /dev/disk/by-id/`, + }, + "interface": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"SCSI", "NVME"}, false), + Description: `The disk interface used for attaching this disk. One of SCSI or NVME.`, + }, + "size": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(375), + Default: 375, + Description: `The size of the disk in gigabytes. One of 375 or 3000.`, + }, + }, + }, + }, + + "service_account": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + DiffSuppressFunc: serviceAccountDiffSuppress, + Description: `The service account to attach to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The service account e-mail address.`, + }, + + "scopes": { + Type: schema.TypeSet, + Required: true, + Description: `A list of service scopes.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return tpgresource.CanonicalizeServiceScope(v.(string)) + }, + }, + Set: tpgresource.StringScopeHashcode, + }, + }, + }, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + // Since this block is used by the API based on which + // image being used, the field needs to be marked as Computed. + Computed: true, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), + Description: `The shielded vm config being used by the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: false, + Description: `Whether secure boot is enabled for the instance.`, + }, + + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: true, + Description: `Whether the instance uses vTPM.`, + }, + + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceConfigKeys, + Default: true, + Description: `Whether integrity monitoring is enabled for the instance.`, + }, + }, + }, + }, + "advanced_machine_features": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: `Controls for advanced machine-related behavior features.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_nested_virtualization": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization","advanced_machine_features.0.threads_per_core"}, + Description: `Whether to enable nested virtualization or not.`, + }, + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization","advanced_machine_features.0.threads_per_core"}, + Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, + }, + "visible_core_count": { + Type: schema.TypeInt, + Optional: true, + AtLeastOneOf: []string{"advanced_machine_features.0.enable_nested_virtualization","advanced_machine_features.0.threads_per_core","advanced_machine_features.0.visible_core_count"}, + Description: `The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width.`, + }, + }, + }, + }, + "confidential_instance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + {{- if eq $.TargetVersionName "ga" }} + "enable_confidential_compute": { + Type: schema.TypeBool, + Required: true, + Description: `Defines whether the instance should have confidential compute enabled.`, + }, + {{- else }} + "enable_confidential_compute": { + Type: schema.TypeBool, + Optional: true, + Description: `Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + "confidential_instance_type": { + Type: schema.TypeString, + Optional: true, + Description: ` + Specifies which confidential computing technology to use. + This could be one of the following values: SEV, SEV_SNP. + If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + {{- end }} + }, + }, + }, + "desired_status": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"RUNNING", "TERMINATED"}, false), + Description: `Desired status of the instance. Either "RUNNING" or "TERMINATED".`, + }, + "current_status": { + Type: schema.TypeString, + Computed: true, + Description: ` + Current status of the instance. + This could be one of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. + For more information about the status of the instance, see [Instance life cycle](https://cloud.google.com/compute/docs/instances/instance-life-cycle).`, + }, + "tags": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The list of tags attached to the instance.`, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The zone of the instance. If self_link is provided, this value is ignored. If neither self_link nor zone are provided, the provider zone is used.`, + }, + + "cpu_platform": { + Type: schema.TypeString, + Computed: true, + Description: `The CPU platform used by this instance.`, + }, + + "instance_id": { + Type: schema.TypeString, + Computed: true, + Description: `The server-assigned unique identifier of this instance.`, + }, + + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The unique fingerprint of the labels.`, + }, + + "metadata_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The unique fingerprint of the metadata.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "tags_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The unique fingerprint of the tags.`, + }, + + "hostname": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A custom hostname for the instance. Must be a fully qualified DNS name and RFC-1035-valid. Valid format is a series of labels 1-63 characters long matching the regular expression [a-z]([-a-z0-9]*[a-z0-9]), concatenated with periods. The entire hostname must not exceed 253 characters. Changing this forces a new resource to be created.`, + }, + + "resource_policies": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Optional: true, + MaxItems: 1, + Description: `A list of self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`, + }, + + "reservation_affinity": { + Type: schema.TypeList, + MaxItems: 1, + Computed: true, + Optional: true, + ForceNew: true, + Description: `Specifies the reservations that this instance can consume from.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"ANY_RESERVATION", "SPECIFIC_RESERVATION", "NO_RESERVATION"}, false), + Description: `The type of reservation from which this instance can consume resources.`, + }, + + "specific_reservation": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Specifies the label selector for the reservation to use.`, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value.`, + }, + "values": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + ForceNew: true, + Description: `Corresponds to the label values of a reservation resource.`, + }, + }, + }, + }, + }, + }, + }, + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderZone, + customdiff.If( + func(_ context.Context, d *schema.ResourceDiff, meta interface{}) bool { + return d.HasChange("guest_accelerator") + }, + suppressEmptyGuestAcceleratorDiff, + ), + desiredStatusDiff, + forceNewIfNetworkIPNotUpdatable, + tpgresource.SetLabelsDiff, + ), + UseJSONNumber: true, + } +} + +func getInstance(config *transport_tpg.Config, d *schema.ResourceData) (*compute.Instance, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return nil, err + } + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + {{- if eq $.TargetVersionName "ga" }} + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() + {{- else }} + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).View("FULL").Do() + {{- end }} + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + } + return instance, nil +} + +func getDisk(diskUri string, d *schema.ResourceData, config *transport_tpg.Config) (*compute.Disk, error) { + source, err := tpgresource.ParseDiskFieldValue(diskUri, d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + disk, err := config.NewComputeClient(userAgent).Disks.Get(source.Project, source.Zone, source.Name).Do() + if err != nil { + return nil, err + } + + return disk, err +} + +func expandComputeInstance(project string, d *schema.ResourceData, config *transport_tpg.Config) (*compute.Instance, error) { + // Get the machine type + var machineTypeUrl string + if mt, ok := d.GetOk("machine_type"); ok { + machineType, err := tpgresource.ParseMachineTypesFieldValue(mt.(string), d, config) + if err != nil { + return nil, fmt.Errorf( + "Error loading machine type: %s", + err) + } + machineTypeUrl = machineType.RelativeLink() + } + + // Build up the list of disks + + disks := []*compute.AttachedDisk{} + if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { + bootDisk, err := expandBootDisk(d, config, project) + if err != nil { + return nil, err + } + disks = append(disks, bootDisk) + } + + if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { + scratchDisks, err := expandScratchDisks(d, config, project) + if err != nil { + return nil, err + } + disks = append(disks, scratchDisks...) + } + + attachedDisksCount := d.Get("attached_disk.#").(int) + + for i := 0; i < attachedDisksCount; i++ { + diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) + disk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return nil, err + } + + disks = append(disks, disk) + } + + scheduling, err := expandScheduling(d.Get("scheduling")) + if err != nil { + return nil, fmt.Errorf("Error creating scheduling: %s", err) + } + + params, err := expandParams(d) + if err != nil { + return nil, fmt.Errorf("Error creating params: %s", err) + } + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return nil, fmt.Errorf("Error creating metadata: %s", err) + } + + {{ if ne $.TargetVersionName `ga` -}} + PartnerMetadata, err := resourceInstancePartnerMetadata(d) + if err != nil { + return nil, fmt.Errorf("Error creating partner metadata: %s", err) + } + {{- end }} + + networkInterfaces, err := expandNetworkInterfaces(d, config) + if err != nil { + return nil, fmt.Errorf("Error creating network interfaces: %s", err) + } + networkPerformanceConfig, err := expandNetworkPerformanceConfig(d, config) + if err != nil { + return nil, fmt.Errorf("Error creating network performance config: %s", err) + } + accels, err := expandInstanceGuestAccelerators(d, config) + if err != nil { + return nil, fmt.Errorf("Error creating guest accelerators: %s", err) + } + + reservationAffinity, err := expandReservationAffinity(d) + if err != nil { + return nil, fmt.Errorf("Error creating reservation affinity: %s", err) + } + + // Create the instance information + return &compute.Instance{ + CanIpForward: d.Get("can_ip_forward").(bool), + Description: d.Get("description").(string), + Disks: disks, + MachineType: machineTypeUrl, + Metadata: metadata, + {{- if ne $.TargetVersionName "ga" }} + PartnerMetadata: PartnerMetadata, + {{- end }} + Name: d.Get("name").(string), + NetworkInterfaces: networkInterfaces, + NetworkPerformanceConfig: networkPerformanceConfig, + Tags: resourceInstanceTags(d), + Params: params, + Labels: tpgresource.ExpandEffectiveLabels(d), + ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), + GuestAccelerators: accels, + MinCpuPlatform: d.Get("min_cpu_platform").(string), + Scheduling: scheduling, + DeletionProtection: d.Get("deletion_protection").(bool), + Hostname: d.Get("hostname").(string), + ForceSendFields: []string{"CanIpForward", "DeletionProtection"}, + ConfidentialInstanceConfig: expandConfidentialInstanceConfig(d), + AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), + ShieldedInstanceConfig: expandShieldedVmConfigs(d), + DisplayDevice: expandDisplayDevice(d), + ResourcePolicies: tpgresource.ConvertStringArr(d.Get("resource_policies").([]interface{})), + ReservationAffinity: reservationAffinity, + }, nil +} + +var computeInstanceStatus = []string{ + "PROVISIONING", + "REPAIRING", + "RUNNING", + "STAGING", + "STOPPED", + "STOPPING", + "SUSPENDED", + "SUSPENDING", + "TERMINATED", +} + +// return all possible Compute instances status except the one passed as parameter +func getAllStatusBut(status string) []string { + for i, s := range computeInstanceStatus { + if status == s { + return append(computeInstanceStatus[:i], computeInstanceStatus[i+1:]...) + } + } + return computeInstanceStatus +} + +func waitUntilInstanceHasDesiredStatus(config *transport_tpg.Config, d *schema.ResourceData) error { + desiredStatus := d.Get("desired_status").(string) + + if desiredStatus != "" { + stateRefreshFunc := func() (interface{}, string, error) { + instance, err := getInstance(config, d) + if err != nil || instance == nil { + log.Printf("Error on InstanceStateRefresh: %s", err) + return nil, "", err + } + return instance.Id, instance.Status, nil + } + stateChangeConf := retry.StateChangeConf{ + Delay: 5 * time.Second, + Pending: getAllStatusBut(desiredStatus), + Refresh: stateRefreshFunc, + Target: []string{desiredStatus}, + Timeout: d.Timeout(schema.TimeoutUpdate), + MinTimeout: 2 * time.Second, + } + _, err := stateChangeConf.WaitForState() + + if err != nil { + return fmt.Errorf( + "Error waiting for instance to reach desired status %s: %s", desiredStatus, err) + } + } + + return nil +} + +func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Get the zone + z, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Loading zone: %s", z) + zone, err := config.NewComputeClient(userAgent).Zones.Get( + project, z).Do() + if err != nil { + return fmt.Errorf("Error loading zone '%s': %s", z, err) + } + + instance, err := expandComputeInstance(project, d, config) + if err != nil { + return err + } + + {{ if ne $.TargetVersionName `ga` -}} + securityPolicies, err := computeInstanceMapSecurityPoliciesCreate(d, config) + if err != nil { + return err + } + {{- end }} + + log.Printf("[INFO] Requesting instance creation") + op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) + + // Wait for the operation to complete + waitErr := ComputeOperationWaitTime(config, op, project, "instance to create", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + {{ if ne $.TargetVersionName `ga` -}} + err = computeInstanceAddSecurityPolicy(d, config, securityPolicies, project, z, userAgent, instance.Name) + if err != nil { + return fmt.Errorf("Error creating instance while setting the security policies: %s", err) + } + {{- end }} + + err = waitUntilInstanceHasDesiredStatus(config, d) + if err != nil { + return fmt.Errorf("Error waiting for status: %s", err) + } + + return resourceComputeInstanceRead(d, meta) +} + +func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + instance, err := getInstance(config, d) + if err != nil || instance == nil { + return err + } + + md := flattenMetadataBeta(instance.Metadata) + + // If the existing state contains "metadata_startup_script" instead of "metadata.startup-script", + // we should move the remote metadata.startup-script to metadata_startup_script to avoid + // specifying it in two places. + if _, ok := d.GetOk("metadata_startup_script"); ok { + if err := d.Set("metadata_startup_script", md["startup-script"]); err != nil { + return fmt.Errorf("Error setting metadata_startup_script: %s", err) + } + + delete(md, "startup-script") + } + + if err = d.Set("metadata", md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + + if err := d.Set("metadata_fingerprint", instance.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + + {{ if ne $.TargetVersionName `ga` -}} + if instance.PartnerMetadata != nil { + partnerMetadata, err := flattenPartnerMetadata(instance.PartnerMetadata) + if err != nil { + return fmt.Errorf("Error parsing partner metadata: %s", err) + } + if err = d.Set("partner_metadata", partnerMetadata); err != nil { + return fmt.Errorf("Error setting partner metadata: %s", err) + } + } + {{- end }} + + if err := d.Set("can_ip_forward", instance.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + if err := d.Set("machine_type", tpgresource.GetResourceNameFromSelfLink(instance.MachineType)); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + if err := d.Set("network_performance_config", flattenNetworkPerformanceConfig(instance.NetworkPerformanceConfig)); err != nil { + return err + } + // Set the networks + // Use the first external IP found for the default connection info. + networkInterfaces, _, internalIP, externalIP, err := flattenNetworkInterfaces(d, config, instance.NetworkInterfaces) + if err != nil { + return err + } + if err := d.Set("network_interface", networkInterfaces); err != nil { + return err + } + + // Fall back on internal ip if there is no external ip. This makes sense in the situation where + // terraform is being used on a cloud instance and can therefore access the instances it creates + // via their internal ips. + sshIP := externalIP + if sshIP == "" { + sshIP = internalIP + } + + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": sshIP, + }) + + // Set the tags fingerprint if there is one. + if instance.Tags != nil { + if err := d.Set("tags_fingerprint", instance.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + if err := d.Set("tags", tpgresource.ConvertStringArrToInterface(instance.Tags.Items)); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } + + if err := tpgresource.SetLabels(instance.Labels, d, "labels"); err != nil { + return err + } + + if err := tpgresource.SetLabels(instance.Labels, d, "terraform_labels"); err != nil { + return err + } + + if err := d.Set("effective_labels", instance.Labels); err != nil { + return err + } + + if instance.LabelFingerprint != "" { + if err := d.Set("label_fingerprint", instance.LabelFingerprint); err != nil { + return fmt.Errorf("Error setting label_fingerprint: %s", err) + } + } + + attachedDiskSources := make(map[string]int) + for i, v := range d.Get("attached_disk").([]interface{}) { + if v == nil { + // There was previously a bug in this code that, when triggered, + // would cause some nil values to end up in the list of attached disks. + // Check for this case to make sure we don't try to parse the nil disk. + continue + } + disk := v.(map[string]interface{}) + s := disk["source"].(string) + var sourceLink string + if strings.Contains(s, "regions/") { + source, err := tpgresource.ParseRegionDiskFieldValue(disk["source"].(string), d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } else { + source, err := tpgresource.ParseDiskFieldValue(disk["source"].(string), d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } + attachedDiskSources[sourceLink] = i + } + + attachedDisks := make([]map[string]interface{}, d.Get("attached_disk.#").(int)) + scratchDisks := []map[string]interface{}{} + for _, disk := range instance.Disks { + if disk.Boot { + if err := d.Set("boot_disk", flattenBootDisk(d, disk, config)); err != nil { + return fmt.Errorf("Error setting boot_disk: %s", err) + } + } else if disk.Type == "SCRATCH" { + scratchDisks = append(scratchDisks, flattenScratchDisk(disk)) + } else { + var sourceLink string + if strings.Contains(disk.Source, "regions/") { + source, err := tpgresource.ParseRegionDiskFieldValue(disk.Source, d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } else { + source, err := tpgresource.ParseDiskFieldValue(disk.Source, d, config) + if err != nil { + return err + } + sourceLink = source.RelativeLink() + } + adIndex, inConfig := attachedDiskSources[sourceLink] + di := map[string]interface{}{ + "source": tpgresource.ConvertSelfLinkToV1(disk.Source), + "device_name": disk.DeviceName, + "mode": disk.Mode, + } + if key := disk.DiskEncryptionKey; key != nil { + if inConfig { + rawKey := d.Get(fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", adIndex)) + if rawKey != "" { + di["disk_encryption_key_raw"] = rawKey + } + } + if key.KmsKeyName != "" { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + di["kms_key_self_link"] = strings.Split(disk.DiskEncryptionKey.KmsKeyName, "/cryptoKeyVersions")[0] + } + if key.Sha256 != "" { + di["disk_encryption_key_sha256"] = key.Sha256 + } + } + // We want the disks to remain in the order we set in the config, so if a disk + // is present in the config, make sure it's at the correct index. Otherwise, append it. + if inConfig { + attachedDisks[adIndex] = di + } else { + attachedDisks = append(attachedDisks, di) + } + } + } + + if err := d.Set("resource_policies", instance.ResourcePolicies); err != nil { + return fmt.Errorf("Error setting resource_policies: %s", err) + } + + // Remove nils from map in case there were disks in the config that were not present on read; + // i.e. a disk was detached out of band + ads := []map[string]interface{}{} + for _, d := range attachedDisks { + if d != nil { + ads = append(ads, d) + } + } + + zone := tpgresource.GetResourceNameFromSelfLink(instance.Zone) + + if err := d.Set("service_account", flattenServiceAccounts(instance.ServiceAccounts)); err != nil { + return fmt.Errorf("Error setting service_account: %s", err) + } + if err := d.Set("attached_disk", ads); err != nil { + return fmt.Errorf("Error setting attached_disk: %s", err) + } + if err := d.Set("scratch_disk", scratchDisks); err != nil { + return fmt.Errorf("Error setting scratch_disk: %s", err) + } + if err := d.Set("scheduling", flattenScheduling(instance.Scheduling)); err != nil { + return fmt.Errorf("Error setting scheduling: %s", err) + } + if err := d.Set("guest_accelerator", flattenGuestAccelerators(instance.GuestAccelerators)); err != nil { + return fmt.Errorf("Error setting guest_accelerator: %s", err) + } + if err := d.Set("shielded_instance_config", flattenShieldedVmConfig(instance.ShieldedInstanceConfig)); err != nil { + return fmt.Errorf("Error setting shielded_instance_config: %s", err) + } + if err := d.Set("enable_display", flattenEnableDisplay(instance.DisplayDevice)); err != nil { + return fmt.Errorf("Error setting enable_display: %s", err) + } + if err := d.Set("cpu_platform", instance.CpuPlatform); err != nil { + return fmt.Errorf("Error setting cpu_platform: %s", err) + } + if err := d.Set("min_cpu_platform", instance.MinCpuPlatform); err != nil { + return fmt.Errorf("Error setting min_cpu_platform: %s", err) + } + if err := d.Set("deletion_protection", instance.DeletionProtection); err != nil { + return fmt.Errorf("Error setting deletion_protection: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(instance.SelfLink)); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("instance_id", fmt.Sprintf("%d", instance.Id)); err != nil { + return fmt.Errorf("Error setting instance_id: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("name", instance.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("description", instance.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("hostname", instance.Hostname); err != nil { + return fmt.Errorf("Error setting hostname: %s", err) + } + if err := d.Set("current_status", instance.Status); err != nil { + return fmt.Errorf("Error setting current_status: %s", err) + } + if err := d.Set("confidential_instance_config", flattenConfidentialInstanceConfig(instance.ConfidentialInstanceConfig)); err != nil { + return fmt.Errorf("Error setting confidential_instance_config: %s", err) + } + if err := d.Set("advanced_machine_features", flattenAdvancedMachineFeatures(instance.AdvancedMachineFeatures)); err != nil { + return fmt.Errorf("Error setting advanced_machine_features: %s", err) + } + if d.Get("desired_status") != "" { + if err := d.Set("desired_status", instance.Status); err != nil { + return fmt.Errorf("Error setting desired_status: %s", err) + } + } + if err := d.Set("reservation_affinity", flattenReservationAffinity(instance.ReservationAffinity)); err != nil { + return fmt.Errorf("Error setting reservation_affinity: %s", err) + } + + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, zone, instance.Name)) + + return nil +} + +func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + + // Use beta api directly in order to read network_interface.fingerprint without having to put it in the schema. + // Change back to getInstance(config, d) once updating alias ips is GA. + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", d.Get("name").(string))) + } + + // Enable partial mode for the resource since it is possible + d.Partial(true) + + if d.HasChange("description") { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error retrieving instance: %s", err) + } + + instance.Description = d.Get("description").(string) + + op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error updating instance: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "description, updating", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + + return nil + }, + }) + + if err != nil { + return err + } + } + + if d.HasChange("metadata") { + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return fmt.Errorf("Error parsing metadata: %s", err) + } + + metadataV1 := &compute.Metadata{} + if err := tpgresource.Convert(metadata, metadataV1); err != nil { + return err + } + + // We're retrying for an error 412 where the metadata fingerprint is out of date + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + // retrieve up-to-date metadata from the API in case several updates hit simultaneously. instances + // sometimes but not always share metadata fingerprints. + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error retrieving metadata: %s", err) + } + + metadataV1.Fingerprint = instance.Metadata.Fingerprint + + op, err := config.NewComputeClient(userAgent).Instances.SetMetadata(project, zone, instance.Name, metadataV1).Do() + if err != nil { + return fmt.Errorf("Error updating metadata: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "metadata to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + + return nil + }, + }) + + if err != nil { + return err + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("partner_metadata") { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).View("FULL").Do() + if err != nil { + return fmt.Errorf("Error retrieving partner_metadata: %s", err) + } + instance.Fingerprint = instance.Fingerprint + instance.PartnerMetadata = resourceInstancePatchPartnerMetadata(d, instance.PartnerMetadata) + instance.NullFields = []string{"partnerMetadata"} + + op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error updating partner_metadata: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "partner metadata to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + return nil + }, + }) + + if err != nil { + return err + } + } + +{{ end }} + if d.HasChange("tags") { + tags := resourceInstanceTags(d) + tagsV1 := &compute.Tags{} + if err := tpgresource.Convert(tags, tagsV1); err != nil { + return err + } + op, err := config.NewComputeClient(userAgent).Instances.SetTags( + project, zone, d.Get("name").(string), tagsV1).Do() + if err != nil { + return fmt.Errorf("Error updating tags: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "tags to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("effective_labels") { + labels := tpgresource.ExpandEffectiveLabels(d) + labelFingerprint := d.Get("label_fingerprint").(string) + req := compute.InstancesSetLabelsRequest{Labels: labels, LabelFingerprint: labelFingerprint} + + op, err := config.NewComputeClient(userAgent).Instances.SetLabels(project, zone, instance.Name, &req).Do() + if err != nil { + return fmt.Errorf("Error updating labels: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "labels to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("params.0.resource_manager_tags") { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error retrieving instance: %s", err) + } + + params, err := expandParams(d) + if err != nil { + return fmt.Errorf("Error updating params: %s", err) + } + + instance.Params = params + + op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error updating instance: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "resource_manager_tags, updating", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + + return nil + }, + }) + + if err != nil { + return err + } + } + + if d.HasChange("resource_policies") { + if len(instance.ResourcePolicies) > 0 { + req := compute.InstancesRemoveResourcePoliciesRequest{ResourcePolicies: instance.ResourcePolicies} + + op, err := config.NewComputeClient(userAgent).Instances.RemoveResourcePolicies(project, zone, instance.Name, &req).Do() + if err != nil { + return fmt.Errorf("Error removing existing resource policies: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "resource policies to remove", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + resourcePolicies := tpgresource.ConvertStringArr(d.Get("resource_policies").([]interface{})) + if len(resourcePolicies) > 0 { + req := compute.InstancesAddResourcePoliciesRequest{ResourcePolicies: resourcePolicies} + + op, err := config.NewComputeClient(userAgent).Instances.AddResourcePolicies(project, zone, instance.Name, &req).Do() + if err != nil { + return fmt.Errorf("Error adding resource policies: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "resource policies to add", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + } + + bootRequiredSchedulingChange := schedulingHasChangeRequiringReboot(d) + bootNotRequiredSchedulingChange := schedulingHasChangeWithoutReboot(d) + if bootNotRequiredSchedulingChange { + scheduling, err := expandScheduling(d.Get("scheduling")) + if err != nil { + return fmt.Errorf("Error creating request data to update scheduling: %s", err) + } + + op, err := config.NewComputeClient(userAgent).Instances.SetScheduling( + project, zone, instance.Name, scheduling).Do() + if err != nil { + return fmt.Errorf("Error updating scheduling policy: %s", err) + } + + opErr := ComputeOperationWaitTime( + config, op, project, "scheduling policy update", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + networkInterfaces, err := expandNetworkInterfaces(d, config) + if err != nil { + return fmt.Errorf("Error getting network interface from config: %s", err) + } + + // Sanity check + if len(networkInterfaces) != len(instance.NetworkInterfaces) { + return fmt.Errorf("Instance had unexpected number of network interfaces: %d", len(instance.NetworkInterfaces)) + } + + {{ if ne $.TargetVersionName `ga` -}} + updateSecurityPolicy := false + for i := 0; i < len(instance.NetworkInterfaces); i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + // check if sec policy has been changed + // check if access config has been changed because it may be deleted and needs to be re-created. + if d.HasChange(prefix+".security_policy") || d.HasChange(prefix+".access_config") || d.HasChange(prefix+".ipv6_access_config") { + if instance.Status != "RUNNING" { + return fmt.Errorf("Error to update security policy because the current instance status must be \"RUNNING\". The security policy or some access config may have changed which requires the security policy to be re-applied") + } + updateSecurityPolicy = true + } + } + + securityPolicies := make(map[string][]string) + if updateSecurityPolicy { + // map the security policies to call SetSecurityPolicy because the next section of the code removes and re-creates the access_config which ends up removing the security_policy. + securityPolicies, err = computeInstanceMapSecurityPoliciesUpdate(d, config) + if err != nil { + return err + } + } + {{- end }} + + var updatesToNIWhileStopped []func(inst *compute.Instance) error + for i := 0; i < len(networkInterfaces); i++ { + prefix := fmt.Sprintf("network_interface.%d", i) + networkInterface := networkInterfaces[i] + instNetworkInterface := instance.NetworkInterfaces[i] + + networkName := d.Get(prefix + ".name").(string) + subnetwork := networkInterface.Subnetwork + updateDuringStop := d.HasChange(prefix+".subnetwork") || d.HasChange(prefix+".network") || d.HasChange(prefix+".subnetwork_project") + + // Sanity check + if networkName != instNetworkInterface.Name { + return fmt.Errorf("Instance networkInterface had unexpected name: %s", instNetworkInterface.Name) + } + + // On creation the network is inferred if only subnetwork is given. + // Unforunately for us there is no way to determine if the user is + // explicitly assigning network or we are reusing the one that was inferred + // from state. So here we check if subnetwork changed and network did not. + // In this scenario we assume network was inferred and attempt to figure out + // the new corresponding network. + + if d.HasChange(prefix + ".subnetwork") { + if !d.HasChange(prefix + ".network") { + subnetProjectField := prefix + ".subnetwork_project" + sf, err := tpgresource.ParseSubnetworkFieldValueWithProjectField(subnetwork, subnetProjectField, d, config) + if err != nil { + return fmt.Errorf("Cannot determine self_link for subnetwork %q: %s", subnetwork, err) + } + resp, err := config.NewComputeClient(userAgent).Subnetworks.Get(sf.Project, sf.Region, sf.Name).Do() + if err != nil { + return errwrap.Wrapf("Error getting subnetwork value: {{"{{"}}err{{"}}"}}", err) + } + nf, err := tpgresource.ParseNetworkFieldValue(resp.Network, d, config) + if err != nil { + return fmt.Errorf("Cannot determine self_link for network %q: %s", resp.Network, err) + } + networkInterface.Network = nf.RelativeLink() + } + } + + if !updateDuringStop && d.HasChange(prefix+".access_config") { + // TODO: This code deletes then recreates accessConfigs. This is bad because it may + // leave the machine inaccessible from either ip if the creation part fails (network + // timeout etc). However right now there is a GCE limit of 1 accessConfig so it is + // the only way to do it. In future this should be revised to only change what is + // necessary, and also add before removing. + + // Delete current access configs + err := computeInstanceDeleteAccessConfigs(d, config, instNetworkInterface, project, zone, userAgent, instance.Name) + if err != nil { + return err + } + + // Create new ones + err = computeInstanceAddAccessConfigs(d, config, instNetworkInterface, networkInterface.AccessConfigs, project, zone, userAgent, instance.Name) + if err != nil { + return err + } + + // re-read fingerprint + instance, err = config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return err + } + instNetworkInterface = instance.NetworkInterfaces[i] + } + + if !updateDuringStop && d.HasChange(prefix+".alias_ip_range") { + // Alias IP ranges cannot be updated; they must be removed and then added + // unless you are changing subnetwork/network + if len(instNetworkInterface.AliasIpRanges) > 0 { + ni := &compute.NetworkInterface{ + Fingerprint: instNetworkInterface.Fingerprint, + ForceSendFields: []string{"AliasIpRanges"}, + } + op, err := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, ni).Do() + if err != nil { + return errwrap.Wrapf("Error removing alias_ip_range: {{"{{"}}err{{"}}"}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "updating alias ip ranges", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + // re-read fingerprint + instance, err = config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return err + } + instNetworkInterface = instance.NetworkInterfaces[i] + } + + networkInterfacePatchObj := &compute.NetworkInterface{ + AliasIpRanges: networkInterface.AliasIpRanges, + Fingerprint: instNetworkInterface.Fingerprint, + } + updateCall := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, networkInterfacePatchObj).Do + op, err := updateCall() + if err != nil { + return errwrap.Wrapf("Error updating network interface: {{"{{"}}err{{"}}"}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if !updateDuringStop && d.HasChange(prefix+".stack_type") { + + networkInterfacePatchObj := &compute.NetworkInterface{ + StackType: d.Get(prefix+".stack_type").(string), + Fingerprint: instNetworkInterface.Fingerprint, + } + updateCall := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, networkInterfacePatchObj).Do + op, err := updateCall() + if err != nil { + return errwrap.Wrapf("Error updating network interface: {{"{{"}}err{{"}}"}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if !updateDuringStop && d.HasChange(prefix+".ipv6_address") { + + networkInterfacePatchObj := &compute.NetworkInterface{ + Ipv6Address: d.Get(prefix+".ipv6_address").(string), + Fingerprint: instNetworkInterface.Fingerprint, + } + updateCall := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, networkInterfacePatchObj).Do + op, err := updateCall() + if err != nil { + return errwrap.Wrapf("Error updating network interface: {{"{{"}}err{{"}}"}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if !updateDuringStop && d.HasChange(prefix+".internal_ipv6_prefix_length") { + + networkInterfacePatchObj := &compute.NetworkInterface{ + InternalIpv6PrefixLength: d.Get(prefix+".internal_ipv6_prefix_length").(int64), + Fingerprint: instNetworkInterface.Fingerprint, + } + updateCall := config.NewComputeClient(userAgent).Instances.UpdateNetworkInterface(project, zone, instance.Name, networkName, networkInterfacePatchObj).Do + op, err := updateCall() + if err != nil { + return errwrap.Wrapf("Error updating network interface: {{"{{"}}err{{"}}"}}", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "network interface to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if updateDuringStop { + // Lets be explicit about what we are changing in the patch call + networkInterfacePatchObj := &compute.NetworkInterface{ + Network: networkInterface.Network, + Subnetwork: networkInterface.Subnetwork, + AliasIpRanges: networkInterface.AliasIpRanges, + } + + // network_ip can be inferred if not declared. Let's only patch if it's being changed by user + // otherwise this could fail if the network ip is not compatible with the new Subnetwork/Network. + if d.HasChange(prefix + ".network_ip") { + networkInterfacePatchObj.NetworkIP = networkInterface.NetworkIP + } + + if d.HasChange(prefix+".internal_ipv6_prefix_length") { + networkInterfacePatchObj.Ipv6Address = networkInterface.Ipv6Address + } + + if d.HasChange(prefix+".ipv6_address") { + networkInterfacePatchObj.Ipv6Address = networkInterface.Ipv6Address + } + + // Access config can run into some issues since we can't tell the difference between + // the users declared intent (config within their hcl file) and what we have inferred from the + // server (terraform state). Access configs contain an ip subproperty that can be incompatible + // with the subnetwork/network we are transitioning to. Due to this we only change access + // configs if we notice the configuration (user intent) changes. + accessConfigsHaveChanged := d.HasChange(prefix + ".access_config") + + updateCall := computeInstanceCreateUpdateWhileStoppedCall(d, config, networkInterfacePatchObj, networkInterface.AccessConfigs, accessConfigsHaveChanged, i, project, zone, userAgent, instance.Name) + updatesToNIWhileStopped = append(updatesToNIWhileStopped, updateCall) + } + } + + if d.HasChange("attached_disk") { + o, n := d.GetChange("attached_disk") + + // Keep track of disks currently in the instance. Because the google_compute_disk resource + // can detach disks, it's possible that there are fewer disks currently attached than there + // were at the time we ran terraform plan. + currDisks := map[string]struct{}{} + for _, disk := range instance.Disks { + if !disk.Boot && disk.Type != "SCRATCH" { + currDisks[disk.DeviceName] = struct{}{} + } + } + + // Keep track of disks currently in state. + // Since changing any field within the disk needs to detach+reattach it, + // keep track of the hash of the full disk. + oDisks := map[uint64]string{} + for _, disk := range o.([]interface{}) { + diskConfig := disk.(map[string]interface{}) + computeDisk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return err + } + hash, err := hashstructure.Hash(*computeDisk, nil) + if err != nil { + return err + } + if _, ok := currDisks[computeDisk.DeviceName]; ok { + oDisks[hash] = computeDisk.DeviceName + } + } + + // Keep track of new config's disks. + // Since changing any field within the disk needs to detach+reattach it, + // keep track of the hash of the full disk. + // If a disk with a certain hash is only in the new config, it should be attached. + nDisks := map[uint64]struct{}{} + var attach []*compute.AttachedDisk + for _, disk := range n.([]interface{}) { + diskConfig := disk.(map[string]interface{}) + computeDisk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return err + } + hash, err := hashstructure.Hash(*computeDisk, nil) + if err != nil { + return err + } + nDisks[hash] = struct{}{} + + if _, ok := oDisks[hash]; !ok { + computeDiskV1 := &compute.AttachedDisk{} + err = tpgresource.Convert(computeDisk, computeDiskV1) + if err != nil { + return err + } + attach = append(attach, computeDiskV1) + } + } + + // If a source is only in the old config, it should be detached. + // Detach the old disks. + for hash, deviceName := range oDisks { + if _, ok := nDisks[hash]; !ok { + op, err := config.NewComputeClient(userAgent).Instances.DetachDisk(project, zone, instance.Name, deviceName).Do() + if err != nil { + return errwrap.Wrapf("Error detaching disk: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "detaching disk", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + log.Printf("[DEBUG] Successfully detached disk %s", deviceName) + } + } + + // Attach the new disks + for _, disk := range attach { + op, err := config.NewComputeClient(userAgent).Instances.AttachDisk(project, zone, instance.Name, disk).Do() + if err != nil { + return errwrap.Wrapf("Error attaching disk : {{"{{"}}err{{"}}"}}", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "attaching disk", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + log.Printf("[DEBUG] Successfully attached disk %s", disk.Source) + } + } + + // d.HasChange("service_account") is oversensitive: see https://github.com/hashicorp/terraform/issues/17411 + // Until that's fixed, manually check whether there is a change. + o, n := d.GetChange("service_account") + oList := o.([]interface{}) + nList := n.([]interface{}) + scopesChange := false + if len(oList) != len(nList) { + scopesChange = true + } else if len(oList) == 1 { + // service_account has MaxItems: 1 + // scopes is a required field and so will always be set + oScopes := oList[0].(map[string]interface{})["scopes"].(*schema.Set) + nScopes := nList[0].(map[string]interface{})["scopes"].(*schema.Set) + scopesChange = !oScopes.Equal(nScopes) + } + + if d.HasChange("deletion_protection") { + nDeletionProtection := d.Get("deletion_protection").(bool) + + op, err := config.NewComputeClient(userAgent).Instances.SetDeletionProtection(project, zone, d.Get("name").(string)).DeletionProtection(nDeletionProtection).Do() + if err != nil { + return fmt.Errorf("Error updating deletion protection flag: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "deletion protection to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("can_ip_forward") { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error retrieving instance: %s", err) + } + + instance.CanIpForward = d.Get("can_ip_forward").(bool) + + op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error updating instance: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "can_ip_forward, updating", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + + return nil + }, + }) + + if err != nil { + return err + } + } + + + needToStopInstanceBeforeUpdating := scopesChange || d.HasChange("service_account.0.email") || d.HasChange("machine_type") || d.HasChange("min_cpu_platform") || d.HasChange("enable_display") || d.HasChange("shielded_instance_config") || len(updatesToNIWhileStopped) > 0 || bootRequiredSchedulingChange || d.HasChange("advanced_machine_features") + + if d.HasChange("desired_status") && !needToStopInstanceBeforeUpdating { + desiredStatus := d.Get("desired_status").(string) + + if desiredStatus != "" { + var op *compute.Operation + + if desiredStatus == "RUNNING" { + op, err = startInstanceOperation(d, config) + if err != nil { + return errwrap.Wrapf("Error starting instance: {{"{{"}}err{{"}}"}}", err) + } + } else if desiredStatus == "TERMINATED" { + op, err = config.NewComputeClient(userAgent).Instances.Stop(project, zone, instance.Name).Do() + if err != nil { + return err + } + } + opErr := ComputeOperationWaitTime( + config, op, project, "updating status", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + } + + // Attributes which can only be changed if the instance is stopped + if needToStopInstanceBeforeUpdating { + statusBeforeUpdate := instance.Status + desiredStatus := d.Get("desired_status").(string) + + if statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED" && !d.Get("allow_stopping_for_update").(bool) { +{{- if ne $.TargetVersionName "ga" }} + return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities, scheduling.max_run_duration " + +{{- else }} + return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities " + +{{- end }} + "or network_interface.[#d].(network/subnetwork/subnetwork_project) or advanced_machine_features on a started instance requires stopping it. " + + "To acknowledge this, please set allow_stopping_for_update = true in your config. " + + "You can also stop it by setting desired_status = \"TERMINATED\", but the instance will not be restarted after the update.") + } + + if statusBeforeUpdate != "TERMINATED" { + op, err := config.NewComputeClient(userAgent).Instances.Stop(project, zone, instance.Name).Do() + if err != nil { + return errwrap.Wrapf("Error stopping instance: {{"{{"}}err{{"}}"}}", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "stopping instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("min_cpu_platform") { + minCpuPlatform := d.Get("min_cpu_platform") + req := &compute.InstancesSetMinCpuPlatformRequest{ + MinCpuPlatform: minCpuPlatform.(string), + } + op, err := config.NewComputeClient(userAgent).Instances.SetMinCpuPlatform(project, zone, instance.Name, req).Do() + if err != nil { + return err + } + opErr := ComputeOperationWaitTime(config, op, project, "updating min cpu platform", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("machine_type") { + mt, err := tpgresource.ParseMachineTypesFieldValue(d.Get("machine_type").(string), d, config) + if err != nil { + return err + } + req := &compute.InstancesSetMachineTypeRequest{ + MachineType: mt.RelativeLink(), + } + op, err := config.NewComputeClient(userAgent).Instances.SetMachineType(project, zone, instance.Name, req).Do() + if err != nil { + return err + } + opErr := ComputeOperationWaitTime(config, op, project, "updating machinetype", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("service_account.0.email") || scopesChange { + sa := d.Get("service_account").([]interface{}) + req := &compute.InstancesSetServiceAccountRequest{ForceSendFields: []string{"email"}} + if len(sa) > 0 && sa[0] != nil { + saMap := sa[0].(map[string]interface{}) + req.Email = saMap["email"].(string) + req.Scopes = tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(saMap["scopes"].(*schema.Set))) + } + op, err := config.NewComputeClient(userAgent).Instances.SetServiceAccount(project, zone, instance.Name, req).Do() + if err != nil { + return err + } + opErr := ComputeOperationWaitTime(config, op, project, "updating service account", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("enable_display") { + req := &compute.DisplayDevice{ + EnableDisplay: d.Get("enable_display").(bool), + ForceSendFields: []string{"EnableDisplay"}, + } + op, err := config.NewComputeClient(userAgent).Instances.UpdateDisplayDevice(project, zone, instance.Name, req).Do() + if err != nil { + return fmt.Errorf("Error updating display device: %s", err) + } + opErr := ComputeOperationWaitTime(config, op, project, "updating display device", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("shielded_instance_config") { + shieldedVmConfig := expandShieldedVmConfigs(d) + + op, err := config.NewComputeClient(userAgent).Instances.UpdateShieldedInstanceConfig(project, zone, instance.Name, shieldedVmConfig).Do() + if err != nil { + return fmt.Errorf("Error updating shielded vm config: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, + "shielded vm config update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if bootRequiredSchedulingChange { + scheduling, err := expandScheduling(d.Get("scheduling")) + if err != nil { + return fmt.Errorf("Error creating request data to update scheduling: %s", err) + } + + op, err := config.NewComputeClient(userAgent).Instances.SetScheduling( + project, zone, instance.Name, scheduling).Do() + if err != nil { + return fmt.Errorf("Error updating scheduling policy: %s", err) + } + + opErr := ComputeOperationWaitTime( + config, op, project, "scheduling policy update", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + + if d.HasChange("advanced_machine_features") { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + // retrieve up-to-date instance from the API in case several updates hit simultaneously. instances + // sometimes but not always share metadata fingerprints. + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return fmt.Errorf("Error retrieving instance: %s", err) + } + + instance.AdvancedMachineFeatures = expandAdvancedMachineFeatures(d) + + op, err := config.NewComputeClient(userAgent).Instances.Update(project, zone, instance.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error updating instance: %s", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, "advanced_machine_features to update", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + + return nil + }, + }) + + if err != nil { + return err + } + } + + // If the instance stops it can invalidate the fingerprint for network interface. + // refresh the instance to get a new fingerprint + if len(updatesToNIWhileStopped) > 0 { + instance, err = config.NewComputeClient(userAgent).Instances.Get(project, zone, instance.Name).Do() + if err != nil { + return err + } + } + for _, patch := range updatesToNIWhileStopped { + err := patch(instance) + if err != nil { + return err + } + } + + if (statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED") || + (statusBeforeUpdate == "TERMINATED" && desiredStatus == "RUNNING") { + op, err := startInstanceOperation(d, config) + if err != nil { + return errwrap.Wrapf("Error starting instance: {{"{{"}}err{{"}}"}}", err) + } + + opErr := ComputeOperationWaitTime(config, op, project, + "starting instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if opErr != nil { + return opErr + } + } + } + + {{ if ne $.TargetVersionName `ga` -}} + // The access config must be updated only if the machine is still RUNNING and after each access_config for each interface has been re-created. + err = computeInstanceAddSecurityPolicy(d, config, securityPolicies, project, zone, userAgent, instance.Name) + if err != nil { + return fmt.Errorf("Error updating instance while setting the security policies: %s", err) + } + {{- end }} + + // We made it, disable partial mode + d.Partial(false) + + return resourceComputeInstanceRead(d, meta) +} + +func startInstanceOperation(d *schema.ResourceData, config *transport_tpg.Config) (*compute.Operation, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + // Use beta api directly in order to read network_interface.fingerprint without having to put it in the schema. + // Change back to getInstance(config, d) once updating alias ips is GA. + instance, err := config.NewComputeClient(userAgent).Instances.Get(project, zone, d.Get("name").(string)).Do() + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance %s", instance.Name)) + } + + // Retrieve instance from config to pull encryption keys if necessary + instanceFromConfig, err := expandComputeInstance(project, d, config) + if err != nil { + return nil, err + } + + var encrypted []*compute.CustomerEncryptionKeyProtectedDisk + for _, disk := range instanceFromConfig.Disks { + if disk.DiskEncryptionKey != nil { + key := compute.CustomerEncryptionKey{RawKey: disk.DiskEncryptionKey.RawKey, KmsKeyName: disk.DiskEncryptionKey.KmsKeyName} + eDisk := compute.CustomerEncryptionKeyProtectedDisk{Source: disk.Source, DiskEncryptionKey: &key} + encrypted = append(encrypted, &eDisk) + } + } + + var op *compute.Operation + + if len(encrypted) > 0 { + request := compute.InstancesStartWithEncryptionKeyRequest{Disks: encrypted} + op, err = config.NewComputeClient(userAgent).Instances.StartWithEncryptionKey(project, zone, instance.Name, &request).Do() + } else { + op, err = config.NewComputeClient(userAgent).Instances.Start(project, zone, instance.Name).Do() + } + + return op, err +} + +func expandAttachedDisk(diskConfig map[string]interface{}, d *schema.ResourceData, meta interface{}) (*compute.AttachedDisk, error) { + config := meta.(*transport_tpg.Config) + + s := diskConfig["source"].(string) + var sourceLink string + if strings.Contains(s, "regions/") { + source, err := tpgresource.ParseRegionDiskFieldValue(s, d, config) + if err != nil { + return nil, err + } + sourceLink = source.RelativeLink() + } else { + source, err := tpgresource.ParseDiskFieldValue(s, d, config) + if err != nil { + return nil, err + } + sourceLink = source.RelativeLink() + } + + disk := &compute.AttachedDisk{ + Source: sourceLink, + } + + if v, ok := diskConfig["mode"]; ok { + disk.Mode = v.(string) + } + + if v, ok := diskConfig["device_name"]; ok { + disk.DeviceName = v.(string) + } + + keyValue, keyOk := diskConfig["disk_encryption_key_raw"] + if keyOk { + if keyValue != "" { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ + RawKey: keyValue.(string), + } + } + } + + kmsValue, kmsOk := diskConfig["kms_key_self_link"] + if kmsOk { + if keyOk && keyValue != "" && kmsValue != "" { + return nil, errors.New("Only one of kms_key_self_link and disk_encryption_key_raw can be set") + } + if kmsValue != "" { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ + KmsKeyName: kmsValue.(string), + } + } + } + return disk, nil +} + +// See comment on expandInstanceTemplateGuestAccelerators regarding why this +// code is duplicated. +func expandInstanceGuestAccelerators(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]*compute.AcceleratorConfig, error) { + configs, ok := d.GetOk("guest_accelerator") + if !ok { + return nil, nil + } + accels := configs.([]interface{}) + guestAccelerators := make([]*compute.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + at, err := tpgresource.ParseAcceleratorFieldValue(data["type"].(string), d, config) + if err != nil { + return nil, fmt.Errorf("cannot parse accelerator type: %v", err) + } + guestAccelerators = append(guestAccelerators, &compute.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + AcceleratorType: at.RelativeLink(), + }) + } + + return guestAccelerators, nil +} + +// suppressEmptyGuestAcceleratorDiff is used to work around perpetual diff +// issues when a count of `0` guest accelerators is desired. This may occur when +// guest_accelerator support is controlled via a module variable. E.g.: +// +// guest_accelerators { +// count = "${var.enable_gpu ? var.gpu_count : 0}" +// ... +// } + +// After reconciling the desired and actual state, we would otherwise see a +// perpetual diff resembling: +// +// [] != [{"count":0, "type": "nvidia-tesla-k80"}] +func suppressEmptyGuestAcceleratorDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + oldi, newi := d.GetChange("guest_accelerator") + + old, ok := oldi.([]interface{}) + if !ok { + return fmt.Errorf("Expected old guest accelerator diff to be a slice") + } + + new, ok := newi.([]interface{}) + if !ok { + return fmt.Errorf("Expected new guest accelerator diff to be a slice") + } + + if len(old) != 0 && len(new) != 1 { + return nil + } + + firstAccel, ok := new[0].(map[string]interface{}) + if !ok { + return fmt.Errorf("Unable to type assert guest accelerator") + } + + if firstAccel["count"].(int) == 0 { + if err := d.Clear("guest_accelerator"); err != nil { + return err + } + } + + return nil +} + +// return an error if the desired_status field is set to a value other than RUNNING on Create. +func desiredStatusDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + // when creating an instance, name is not set + oldName, _ := diff.GetChange("name") + + if oldName == nil || oldName == "" { + _, newDesiredStatus := diff.GetChange("desired_status") + + if newDesiredStatus == nil || newDesiredStatus == "" { + return nil + } else if newDesiredStatus != "RUNNING" { + return fmt.Errorf("When creating an instance, desired_status can only accept RUNNING value") + } + return nil + } + + return nil +} + +func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + log.Printf("[INFO] Requesting instance deletion: %s", d.Get("name").(string)) + + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("Cannot delete instance %s: instance Deletion Protection is enabled. Set deletion_protection to false for this resource and run \"terraform apply\" before attempting to delete it.", d.Get("name").(string)) + } else { + op, err := config.NewComputeClient(userAgent).Instances.Delete(project, zone, d.Get("name").(string)).Do() + if err != nil { + return fmt.Errorf("Error deleting instance: %s", err) + } + + // Wait for the operation to complete + opErr := ComputeOperationWaitTime(config, op, project, "instance to delete", userAgent, d.Timeout(schema.TimeoutDelete)) + if opErr != nil { + // Refresh operation to check status + op, _ = config.NewComputeClient(userAgent).ZoneOperations.Get(project, zone, strconv.FormatUint(op.Id, 10)).Do() + // Do not return an error if the operation actually completed + if op == nil || op.Status != "DONE" { + return opErr + } + } + + d.SetId("") + return nil + } +} + +func resourceComputeInstanceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func expandParams(d *schema.ResourceData) (*compute.InstanceParams, error) { + params := &compute.InstanceParams{ + } + + if _, ok := d.GetOk("params.0.resource_manager_tags"); ok { + params.ResourceManagerTags = tpgresource.ExpandStringMap(d, "params.0.resource_manager_tags") + } + + return params, nil +} + +func expandBootDisk(d *schema.ResourceData, config *transport_tpg.Config, project string) (*compute.AttachedDisk, error) { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + disk := &compute.AttachedDisk{ + AutoDelete: d.Get("boot_disk.0.auto_delete").(bool), + Boot: true, + } + + if v, ok := d.GetOk("boot_disk.0.device_name"); ok { + disk.DeviceName = v.(string) + } + + if v, ok := d.GetOk("boot_disk.0.disk_encryption_key_raw"); ok { + if v != "" { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ + RawKey: v.(string), + } + } + } + + if v, ok := d.GetOk("boot_disk.0.kms_key_self_link"); ok { + if v != "" { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{ + KmsKeyName: v.(string), + } + } + } + + if v, ok := d.GetOk("boot_disk.0.source"); ok { + var err error + var source interface { + RelativeLink() string + } + if strings.Contains(v.(string), "regions/") { + source, err = tpgresource.ParseRegionDiskFieldValue(v.(string), d, config) + } else { + source, err = tpgresource.ParseDiskFieldValue(v.(string), d, config) + } + if err != nil { + return nil, err + } + disk.Source = source.RelativeLink() + } + + if _, ok := d.GetOk("boot_disk.0.initialize_params"); ok { + disk.InitializeParams = &compute.AttachedDiskInitializeParams{} + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.size"); ok { + disk.InitializeParams.DiskSizeGb = int64(v.(int)) + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.provisioned_iops"); ok { + disk.InitializeParams.ProvisionedIops = int64(v.(int)) + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.provisioned_throughput"); ok { + disk.InitializeParams.ProvisionedThroughput = int64(v.(int)) + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.enable_confidential_compute"); ok { + disk.InitializeParams.EnableConfidentialCompute = v.(bool) + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.type"); ok { + diskTypeName := v.(string) + diskType, err := readDiskType(config, d, diskTypeName) + if err != nil { + return nil, fmt.Errorf("Error loading disk type '%s': %s", diskTypeName, err) + } + disk.InitializeParams.DiskType = diskType.RelativeLink() + } + + if v, ok := d.GetOk("boot_disk.0.initialize_params.0.image"); ok { + imageName := v.(string) + imageUrl, err := ResolveImage(config, project, imageName, userAgent) + if err != nil { + return nil, fmt.Errorf("Error resolving image name '%s': %s", imageName, err) + } + + disk.InitializeParams.SourceImage = imageUrl + } + + if _, ok := d.GetOk("boot_disk.0.initialize_params.0.labels"); ok { + disk.InitializeParams.Labels = tpgresource.ExpandStringMap(d, "boot_disk.0.initialize_params.0.labels") + } + + if _, ok := d.GetOk("boot_disk.0.initialize_params.0.resource_manager_tags"); ok { + disk.InitializeParams.ResourceManagerTags = tpgresource.ExpandStringMap(d, "boot_disk.0.initialize_params.0.resource_manager_tags") + } + } + + if v, ok := d.GetOk("boot_disk.0.mode"); ok { + disk.Mode = v.(string) + } + + return disk, nil +} + +func flattenBootDisk(d *schema.ResourceData, disk *compute.AttachedDisk, config *transport_tpg.Config) []map[string]interface{} { + result := map[string]interface{}{ + "auto_delete": disk.AutoDelete, + "device_name": disk.DeviceName, + "mode": disk.Mode, + "source": tpgresource.ConvertSelfLinkToV1(disk.Source), + // disk_encryption_key_raw is not returned from the API, so copy it from what the user + // originally specified to avoid diffs. + "disk_encryption_key_raw": d.Get("boot_disk.0.disk_encryption_key_raw"), + } + + diskDetails, err := getDisk(disk.Source, d, config) + if err != nil { + log.Printf("[WARN] Cannot retrieve boot disk details: %s", err) + + if _, ok := d.GetOk("boot_disk.0.initialize_params.#"); ok { + // If we can't read the disk details due to permission for instance, + // copy the initialize_params from what the user originally specified to avoid diffs. + m := d.Get("boot_disk.0.initialize_params") + result["initialize_params"] = m + } + } else { + result["initialize_params"] = []map[string]interface{}{{"{{"}} + "type": tpgresource.GetResourceNameFromSelfLink(diskDetails.Type), + // If the config specifies a family name that doesn't match the image name, then + // the diff won't be properly suppressed. See DiffSuppressFunc for this field. + "image": diskDetails.SourceImage, + "size": diskDetails.SizeGb, + "labels": diskDetails.Labels, + "resource_manager_tags": d.Get("boot_disk.0.initialize_params.0.resource_manager_tags"), + "provisioned_iops": diskDetails.ProvisionedIops, + "provisioned_throughput": diskDetails.ProvisionedThroughput, + "enable_confidential_compute": diskDetails.EnableConfidentialCompute, + {{"}}"}} + } + + if disk.DiskEncryptionKey != nil { + if disk.DiskEncryptionKey.Sha256 != "" { + result["disk_encryption_key_sha256"] = disk.DiskEncryptionKey.Sha256 + } + if disk.DiskEncryptionKey.KmsKeyName != "" { + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + result["kms_key_self_link"] = strings.Split(disk.DiskEncryptionKey.KmsKeyName, "/cryptoKeyVersions")[0] + } + } + + return []map[string]interface{}{result} +} + +func expandScratchDisks(d *schema.ResourceData, config *transport_tpg.Config, project string) ([]*compute.AttachedDisk, error) { + diskType, err := readDiskType(config, d, "local-ssd") + if err != nil { + return nil, fmt.Errorf("Error loading disk type 'local-ssd': %s", err) + } + + n := d.Get("scratch_disk.#").(int) + scratchDisks := make([]*compute.AttachedDisk, 0, n) + for i := 0; i < n; i++ { + scratchDisks = append(scratchDisks, &compute.AttachedDisk{ + AutoDelete: true, + Type: "SCRATCH", + DeviceName: d.Get(fmt.Sprintf("scratch_disk.%d.device_name", i)).(string), + Interface: d.Get(fmt.Sprintf("scratch_disk.%d.interface", i)).(string), + DiskSizeGb: int64(d.Get(fmt.Sprintf("scratch_disk.%d.size", i)).(int)), + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskType: diskType.RelativeLink(), + }, + }) + } + + return scratchDisks, nil +} + +func flattenScratchDisk(disk *compute.AttachedDisk) map[string]interface{} { + result := map[string]interface{}{ + "device_name": disk.DeviceName, + "interface": disk.Interface, + "size": disk.DiskSizeGb, + } + return result +} + +func hash256(raw string) (string, error) { + decoded, err := base64.StdEncoding.DecodeString(raw) + if err != nil { + return "", err + } + h := sha256.Sum256(decoded) + return base64.StdEncoding.EncodeToString(h[:]), nil +} + +func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if k != "service_account.#" { + return false + } + + o, n := d.GetChange("service_account") + var l []interface{} + if old == "0" && new == "1" { + l = n.([]interface{}) + } else if new == "0" && old == "1" { + l = o.([]interface{}) + } else { + // we don't have one set and one unset, so don't suppress the diff + return false + } + + // suppress changes between { } and {scopes:[]} + if l[0] != nil { + contents := l[0].(map[string]interface{}) + if scopes, ok := contents["scopes"]; ok { + a := scopes.(*schema.Set).List() + if a != nil && len(a) > 0 { + return false + } + } + } + return true +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl new file mode 100644 index 000000000000..2ab1d486b783 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image.go.tmpl @@ -0,0 +1,284 @@ +package compute +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeInstanceFromMachineImage() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceFromMachineImageCreate, + Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, + Delete: resourceComputeInstanceDelete, + + // Import doesn't really make sense, because you could just import + // as a google_compute_instance. + + Timeouts: ResourceComputeInstance().Timeouts, + + Schema: computeInstanceFromMachineImageSchema(), + CustomizeDiff: ResourceComputeInstance().CustomizeDiff, + UseJSONNumber: true, + } +} + +func computeInstanceFromMachineImageSchema() map[string]*schema.Schema { + s := ResourceComputeInstance().Schema + + for _, field := range []string{"boot_disk", "machine_type", "network_interface"} { + // The user can set these fields as an override, but doesn't need to - + // the machine image values will be used if they're unset. + s[field].Required = false + s[field].Optional = true + } + + // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. + // Passing field_name = [] in this mode differentiates between an intentionally empty + // block vs an ignored computed block. + nic := s["network_interface"].Elem.(*schema.Resource) + nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr + nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr + + for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { + s[field].ConfigMode = schema.SchemaConfigModeAttr + } + + recurseOnSchema(s, func(field *schema.Schema) { + // We don't want to accidentally use default values to override the instance + // machine image, so remove defaults. + field.Default = nil + + // Make non-required fields computed since they'll be set by the template. + // Leave deprecated and removed fields alone because we don't set them. + if !field.Required && !(field.Deprecated != "") { + field.Computed = true + } + }) + + s["source_machine_image"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name or self link of a machine image to create the instance from on.`, + } + + // Modifying the schema to disable disk overrides, due to an API bug (b/170964971) + // TODO: (camthornton) Remove this when disk override functionality in the API is restored + for _, field := range []string{"boot_disk", "attached_disk", "scratch_disk"} { + s[field].Required = false + s[field].Optional = false + s[field].Computed = true + s[field].MaxItems = 0 + } + bootDiskSchema := s["boot_disk"].Elem.(*schema.Resource).Schema + for _, field := range bootDiskSchema { + field.AtLeastOneOf = []string{} + field.ConflictsWith = []string{} + } + initializeParamsSchema := bootDiskSchema["initialize_params"].Elem.(*schema.Resource).Schema + for _, field := range initializeParamsSchema { + field.AtLeastOneOf = []string{} + } + // End disk schema modifications + + return s +} + +func resourceComputeInstanceFromMachineImageCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Get the zone + z, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Loading zone: %s", z) + zone, err := config.NewComputeClient(userAgent).Zones.Get(project, z).Do() + if err != nil { + return fmt.Errorf("Error loading zone '%s': %s", z, err) + } + + instance, err := expandComputeInstance(project, d, config) + if err != nil { + return err + } + + sa := d.Get("service_account").([]interface{}) + if len(sa) == 0 { + // ServiceAccount is required when the image is from a different project + accounts := make([]*compute.ServiceAccount, 1) + accounts[0] = &compute.ServiceAccount{ + Email: "default", + Scopes: nil, + } + instance.ServiceAccounts = accounts + } + + src := d.Get("source_machine_image").(string) + instance.SourceMachineImage = src + + tpl, err := tpgresource.ParseMachineImageFieldValue(src, d, config) + if err != nil { + return err + } + + // obtain the project where the image resides (could be different from the default) + tmp := strings.Split(src, "projects/") + mi_project := strings.Split(tmp[len(tmp)-1], "/")[0] + + mi, err := config.NewComputeClient(userAgent).MachineImages.Get(mi_project, tpl.Name).Do() + if err != nil { + return err + } + + instance.Disks, err = adjustInstanceFromMachineImageDisks(d, config, mi, zone, project) + if err != nil { + return err + } + + // when we make the original call to expandComputeInstance expandScheduling is called, which sets default values. + // However, we want the values to be read from the machine image instead. + if _, hasSchedule := d.GetOk("scheduling"); !hasSchedule { + instance.Scheduling = mi.SourceInstanceProperties.Scheduling + } + + // Force send all top-level fields that have been set in case they're overridden to zero values. + // Initialize ForceSendFields to empty so we don't get things that the instance resource + // always force-sends. + instance.ForceSendFields = []string{} + for f, s := range computeInstanceFromMachineImageSchema() { + // It seems that GetOkExists always returns true for sets. + // TODO: confirm this and file issue against Terraform core. + // In the meantime, don't force send sets. + if s.Type == schema.TypeSet { + continue + } + + if _, exists := d.GetOkExists(f); exists { + // Assume for now that all fields are exact snake_case versions of the API fields. + // This won't necessarily always be true, but it serves as a good approximation and + // can be adjusted later as we discover issues. + instance.ForceSendFields = append(instance.ForceSendFields, tpgresource.SnakeToPascalCase(f)) + } + } + + log.Printf("[INFO] Requesting instance creation") + op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) + + // Wait for the operation to complete + waitErr := ComputeOperationWaitTime(config, op, project, + "instance to create", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + return resourceComputeInstanceRead(d, meta) +} + +// Instances have disks spread across multiple schema properties. This function +// ensures that overriding one of these properties does not override the others. +func adjustInstanceFromMachineImageDisks(d *schema.ResourceData, config *transport_tpg.Config, mi *compute.MachineImage, zone *compute.Zone, project string) ([]*compute.AttachedDisk, error) { + disks := []*compute.AttachedDisk{} + if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { + bootDisk, err := expandBootDisk(d, config, project) + if err != nil { + return nil, err + } + disks = append(disks, bootDisk) + } else { + // boot disk was not overridden, so use the one from the machine image + for _, disk := range mi.SourceInstanceProperties.Disks { + if disk.Boot { + newdisk := &compute.AttachedDisk{ + AutoDelete: disk.AutoDelete, + Type: disk.Type, + DeviceName: disk.DeviceName, + } + disks = append(disks, newdisk) + break + } + } + } + + if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { + scratchDisks, err := expandScratchDisks(d, config, project) + if err != nil { + return nil, err + } + disks = append(disks, scratchDisks...) + } else { + // scratch disks were not overridden, so use the ones from the machine image + for _, disk := range mi.SourceInstanceProperties.Disks { + if disk.Type == "SCRATCH" { + newdisk := &compute.AttachedDisk{ + AutoDelete: disk.AutoDelete, + Type: disk.Type, + DeviceName: disk.DeviceName, + } + disks = append(disks, newdisk) + } + } + } + + attachedDisksCount := d.Get("attached_disk.#").(int) + if attachedDisksCount > 0 { + for i := 0; i < attachedDisksCount; i++ { + diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) + disk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return nil, err + } + + disks = append(disks, disk) + } + } else { + // attached disks were not overridden, so use the ones from the machine image + for _, disk := range mi.SourceInstanceProperties.Disks { + if !disk.Boot && disk.Type != "SCRATCH" { + newdisk := &compute.AttachedDisk{ + AutoDelete: disk.AutoDelete, + Type: disk.Type, + DeviceName: disk.DeviceName, + } + disks = append(disks, newdisk) + } + } + } + + return disks, nil +} + +{{- else }} +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl new file mode 100644 index 000000000000..3e43a631427d --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl @@ -0,0 +1,684 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +func TestAccComputeInstanceFromMachineImage_basic(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_basic(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceFromMachineImage_maxRunDuration(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceFromMachineImage_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_maxRunDuration(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + }, + }) +} +{{- end }} + +func TestAccComputeInstanceFromMachineImage_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 3600 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_localSsdRecoveryTimeout(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + testAccCheckComputeInstanceLocalSsdRecoveryTimeout(&instance, expectedLocalSsdRecoveryTimeout), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromMachineImageWithOverride_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 7200 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImageWithOverride_localSsdRecoveryTimeout(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + testAccCheckComputeInstanceLocalSsdRecoveryTimeout(&instance, expectedLocalSsdRecoveryTimeout), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceFromMachineImageWithOverride_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImageWithOverride_partnerMetadata(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + }, + }) +} +{{- end }} + +func TestAccComputeInstanceFromMachineImage_overrideMetadataDotStartupScript(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_overrideMetadataDotStartupScript(instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "metadata.startup-script", ""), + ), + }, + }, + }) + +} + +func TestAccComputeInstanceFromMachineImage_diffProject(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + generatedInstanceName := fmt.Sprintf("tf-test-generated-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_machine_image.foobar" + org := envvar.GetTestOrgFromEnv(t) + billingId := envvar.GetTestBillingAccountFromEnv(t) + projectID := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromMachineImageDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromMachineImage_diffProject(projectID, org, billingId, instanceName, generatedInstanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + ), + }, + }, + }) +} + +func testAccCheckComputeInstanceFromMachineImageDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_from_machine_image" { + continue + } + + _, err := config.NewComputeClient(config.UserAgent).Instances.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Instance still exists") + } + } + + return nil + } +} + +func testAccComputeInstanceFromMachineImage_basic(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + can_ip_forward = true +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, instance, instance, newInstance) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceFromMachineImage_maxRunDuration(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + } + +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + } +} +`, instance, instance, newInstance) +} +{{- end }} + + +func testAccComputeInstanceFromMachineImage_localSsdRecoveryTimeout(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, instance, instance, newInstance) +} + +func testAccComputeInstanceFromMachineImageWithOverride_localSsdRecoveryTimeout(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + local_ssd_recovery_timeout { + nanos = 0 + seconds = 7200 + } + } +} +`, instance, instance, newInstance) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceFromMachineImageWithOverride_partnerMetadata(instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key = "value" + } + }) + } +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } +} +`, instance, instance, newInstance) +} +{{- end }} + +func testAccComputeInstanceFromMachineImage_overrideMetadataDotStartupScript(instanceName, generatedInstanceName string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "vm" { + provider = google-beta + + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + startup-script = "#!/bin/bash\necho Hello" + } + +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + // Overrides + metadata = { + startup-script = "" + } +} +`, instanceName, instanceName, generatedInstanceName) +} + +func testAccComputeInstanceFromMachineImage_diffProject(projectID, org, billingId, instance, newInstance string) string { + return fmt.Sprintf(` +resource "google_project" "project" { + provider = google-beta + project_id = "%s" + name = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "service" { + provider = google-beta + project = google_project.project.project_id + service = "compute.googleapis.com" + timeouts { + create = "30m" + update = "40m" + } + disable_dependent_services = true +} + +resource "google_project_service" "monitoring" { + provider = google-beta + project = google_project.project.project_id + service = "monitoring.googleapis.com" + timeouts { + create = "30m" + update = "40m" + } + disable_dependent_services = true + + depends_on = [google_project_service.service] +} + +resource "google_compute_instance" "vm" { + provider = google-beta + project = google_project.project.project_id + boot_disk { + initialize_params { + image = "debian-cloud/debian-10" + } + } + + name = "%s" + machine_type = "n1-standard-1" + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + can_ip_forward = true + + depends_on = [google_project_service.monitoring] +} + +resource "google_compute_machine_image" "foobar" { + provider = google-beta + project = google_project.project.project_id + name = "%s" + source_instance = google_compute_instance.vm.self_link +} + +resource "google_compute_instance_from_machine_image" "foobar" { + provider = google-beta + name = "%s" + zone = "us-central1-a" + + source_machine_image = google_compute_machine_image.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, projectID, projectID, org, billingId, instance, instance, newInstance) +} + +{{- else }} +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl new file mode 100644 index 000000000000..fae3529c0ea6 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template.go.tmpl @@ -0,0 +1,326 @@ +package compute + +import ( + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeInstanceFromTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceFromTemplateCreate, + Read: resourceComputeInstanceRead, + Update: resourceComputeInstanceUpdate, + Delete: resourceComputeInstanceDelete, + + // Import doesn't really make sense, because you could just import + // as a google_compute_instance. + + Timeouts: ResourceComputeInstance().Timeouts, + + Schema: computeInstanceFromTemplateSchema(), + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ResourceComputeInstance().CustomizeDiff, + ), + UseJSONNumber: true, + } +} + +func computeInstanceFromTemplateSchema() map[string]*schema.Schema { + s := ResourceComputeInstance().Schema + + for _, field := range []string{"boot_disk", "machine_type", "network_interface"} { + // The user can set these fields as an override, but doesn't need to - + // the template values will be used if they're unset. + s[field].Required = false + s[field].Optional = true + } + + // schema.SchemaConfigModeAttr allows these fields to be removed in Terraform 0.12. + // Passing field_name = [] in this mode differentiates between an intentionally empty + // block vs an ignored computed block. + nic := s["network_interface"].Elem.(*schema.Resource) + nic.Schema["alias_ip_range"].ConfigMode = schema.SchemaConfigModeAttr + nic.Schema["access_config"].ConfigMode = schema.SchemaConfigModeAttr + + for _, field := range []string{"attached_disk", "guest_accelerator", "service_account", "scratch_disk"} { + s[field].ConfigMode = schema.SchemaConfigModeAttr + } + + // Remove deprecated/removed fields that are never d.Set. We can't + // programmatically remove all of them, because some of them still have d.Set + // calls. + for _, field := range []string{"disk", "network"} { + delete(s, field) + } + + recurseOnSchema(s, func(field *schema.Schema) { + // We don't want to accidentally use default values to override the instance + // template, so remove defaults. + field.Default = nil + + // Make non-required fields computed since they'll be set by the template. + // Leave deprecated and removed fields alone because we don't set them. + if !field.Required && !(field.Deprecated != "") { + field.Computed = true + } + }) + + s["source_instance_template"] = &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name or self link of an instance template to create the instance based on.`, + } + + return s +} + +func recurseOnSchema(s map[string]*schema.Schema, f func(*schema.Schema)) { + for _, field := range s { + f(field) + if e := field.Elem; e != nil { + if r, ok := e.(*schema.Resource); ok { + recurseOnSchema(r.Schema, f) + } + } + } +} + +func resourceComputeInstanceFromTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Get the zone + z, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + log.Printf("[DEBUG] Loading zone: %s", z) + zone, err := config.NewComputeClient(userAgent).Zones.Get(project, z).Do() + if err != nil { + return fmt.Errorf("Error loading zone '%s': %s", z, err) + } + + instance, err := expandComputeInstance(project, d, config) + if err != nil { + return err + } + + sourceInstanceTemplate:= ConvertToUniqueIdWhenPresent(d.Get("source_instance_template").(string)) + tpl, err := tpgresource.ParseInstanceTemplateFieldValue(sourceInstanceTemplate, d, config) + if err != nil { + return err + } + + it := compute.InstanceTemplate{} + var relativeUrl string + + if strings.Contains(sourceInstanceTemplate, "global/instanceTemplates") { + instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, tpl.Name).Do() + if err != nil { + return err + } + + it = *instanceTemplate + relativeUrl = tpl.RelativeLink() + + instance.Disks, err = adjustInstanceFromTemplateDisks(d, config, &it, zone, project, false) + if err != nil { + return err + } + } else { + relativeUrl, err = tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/"+tpl.Name) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/" + tpl.Name) + if err != nil { + return err + } + + instanceTemplate, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return err + } + + instancePropertiesObj, err := json.Marshal(instanceTemplate) + if err != nil { + fmt.Println(err) + return err + } + + if err := json.Unmarshal(instancePropertiesObj, &it); err != nil { + fmt.Println(err) + return err + } + + instance.Disks, err = adjustInstanceFromTemplateDisks(d, config, &it, zone, project, true) + if err != nil { + return err + } + } + + // when we make the original call to expandComputeInstance expandScheduling is called, which sets default values. + // However, we want the values to be read from the template instead. + if _, hasSchedule := d.GetOk("scheduling"); !hasSchedule { + instance.Scheduling = it.Properties.Scheduling + } + + // Force send all top-level fields that have been set in case they're overridden to zero values. + // Initialize ForceSendFields to empty so we don't get things that the instance resource + // always force-sends. + instance.ForceSendFields = []string{} + for f, s := range computeInstanceFromTemplateSchema() { + // It seems that GetOkExists always returns true for sets. + // TODO: confirm this and file issue against Terraform core. + // In the meantime, don't force send sets. + if s.Type == schema.TypeSet { + continue + } + + if _, exists := d.GetOkExists(f); exists { + // Assume for now that all fields are exact snake_case versions of the API fields. + // This won't necessarily always be true, but it serves as a good approximation and + // can be adjusted later as we discover issues. + instance.ForceSendFields = append(instance.ForceSendFields, tpgresource.SnakeToPascalCase(f)) + } + } + + log.Printf("[INFO] Requesting instance creation") + op, err := config.NewComputeClient(userAgent).Instances.Insert(project, zone.Name, instance).SourceInstanceTemplate(relativeUrl).Do() + if err != nil { + return fmt.Errorf("Error creating instance: %s", err) + } + + // Store the ID now + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instances/%s", project, z, instance.Name)) + + // Wait for the operation to complete + waitErr := ComputeOperationWaitTime(config, op, project, + "instance to create", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + // The resource didn't actually create + d.SetId("") + return waitErr + } + + return resourceComputeInstanceRead(d, meta) +} + +// Instances have disks spread across multiple schema properties. This function +// ensures that overriding one of these properties does not override the others. +func adjustInstanceFromTemplateDisks(d *schema.ResourceData, config *transport_tpg.Config, it *compute.InstanceTemplate, zone *compute.Zone, project string, isFromRegionalTemplate bool) ([]*compute.AttachedDisk, error) { + disks := []*compute.AttachedDisk{} + if _, hasBootDisk := d.GetOk("boot_disk"); hasBootDisk { + bootDisk, err := expandBootDisk(d, config, project) + if err != nil { + return nil, err + } + disks = append(disks, bootDisk) + } else { + // boot disk was not overridden, so use the one from the instance template + for _, disk := range it.Properties.Disks { + if disk.Boot { + if disk.Source != "" && !isFromRegionalTemplate { + // Instances need a URL for the disk, but instance templates only have the name + disk.Source = fmt.Sprintf("projects/%s/zones/%s/disks/%s", project, zone.Name, disk.Source) + } + if disk.InitializeParams != nil { + if dt := disk.InitializeParams.DiskType; dt != "" { + // Instances need a URL for the disk type, but instance templates + // only have the name (since they're global). + disk.InitializeParams.DiskType = fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) + } + } + disks = append(disks, disk) + break + } + } + } + + if _, hasScratchDisk := d.GetOk("scratch_disk"); hasScratchDisk { + scratchDisks, err := expandScratchDisks(d, config, project) + if err != nil { + return nil, err + } + disks = append(disks, scratchDisks...) + } else { + // scratch disks were not overridden, so use the ones from the instance template + for _, disk := range it.Properties.Disks { + if disk.Type == "SCRATCH" { + if disk.InitializeParams != nil { + if dt := disk.InitializeParams.DiskType; dt != "" { + // Instances need a URL for the disk type, but instance templates + // only have the name (since they're global). + disk.InitializeParams.DiskType = fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) + } + } + disks = append(disks, disk) + } + } + } + + attachedDisksCount := d.Get("attached_disk.#").(int) + if attachedDisksCount > 0 { + for i := 0; i < attachedDisksCount; i++ { + diskConfig := d.Get(fmt.Sprintf("attached_disk.%d", i)).(map[string]interface{}) + disk, err := expandAttachedDisk(diskConfig, d, config) + if err != nil { + return nil, err + } + + disks = append(disks, disk) + } + } else { + // attached disks were not overridden, so use the ones from the instance template + for _, disk := range it.Properties.Disks { + if !disk.Boot && disk.Type != "SCRATCH" { + if s := disk.Source; s != "" && !isFromRegionalTemplate { + // Instances need a URL for the disk source, but instance templates + // only have the name (since they're global). + disk.Source = fmt.Sprintf("zones/%s/disks/%s", zone.Name, s) + } + if disk.InitializeParams != nil { + if dt := disk.InitializeParams.DiskType; dt != "" { + // Instances need a URL for the disk type, but instance templates + // only have the name (since they're global). + disk.InitializeParams.DiskType = fmt.Sprintf("zones/%s/diskTypes/%s", zone.Name, dt) + } + } + disks = append(disks, disk) + } + } + } + + return disks, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl new file mode 100644 index 000000000000..2f04be954c0b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_template_test.go.tmpl @@ -0,0 +1,1551 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +func TestAccComputeInstanceFromTemplate_basic(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_basic(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplate_self_link_unique(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_self_link_unique(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction + expectedMaxRunDuration.Nanos = 456 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + }, + }) +} +{{- end }} + +func TestAccComputeInstanceFromTemplate_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 3600 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_localSsdRecoveryTimeout(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstanceLocalSsdRecoveryTimeout(&instance, expectedLocalSsdRecoveryTimeout), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplateWithOverride_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 7200 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplateWithOverride_localSsdRecoveryTimeout(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstanceLocalSsdRecoveryTimeout(&instance, expectedLocalSsdRecoveryTimeout), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceFromTemplate_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_partnerMetadata(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplateWithOverride_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplateWithOverride_partnerMetadata(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccComputeInstanceFromRegionTemplate_basic(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromRegionTemplate_basic(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "machine_type", "n1-standard-1"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "1"), + resource.TestCheckResourceAttr(resourceName, "scheduling.0.automatic_restart", "false"), + ), + }, + }, + }) +} + +{{ end }} + +func TestAccComputeInstanceFromTemplate_overrideBootDisk(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + overrideDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.inst" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_overrideBootDisk(templateDisk, overrideDisk, templateName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "boot_disk.#", "1"), + resource.TestMatchResourceAttr(resourceName, "boot_disk.0.source", regexp.MustCompile(overrideDisk)), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplate_overrideAttachedDisk(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + overrideDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.inst" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_overrideAttachedDisk(templateDisk, overrideDisk, templateName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "1"), + resource.TestMatchResourceAttr(resourceName, "attached_disk.0.source", regexp.MustCompile(overrideDisk)), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplate_overrideScratchDisk(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + overrideDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.inst" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_overrideScratchDisk(templateDisk, overrideDisk, templateName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were set based on the template + resource.TestCheckResourceAttr(resourceName, "scratch_disk.#", "2"), + resource.TestCheckResourceAttr(resourceName, "scratch_disk.0.interface", "NVME"), + resource.TestCheckResourceAttr(resourceName, "scratch_disk.1.interface", "NVME"), + resource.TestCheckResourceAttr(resourceName, "scratch_disk.1.device_name", "override-local-ssd"), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplate_overrideScheduling(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateDisk := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.inst" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_overrideScheduling(templateDisk, templateName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplate_012_removableFields(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.inst" + + // First config is a basic instance from template, second tests the empty list syntax + config1 := testAccComputeInstanceFromTemplate_012_removableFieldsTpl(templateName) + + testAccComputeInstanceFromTemplate_012_removableFields1(instanceName) + config2 := testAccComputeInstanceFromTemplate_012_removableFieldsTpl(templateName) + + testAccComputeInstanceFromTemplate_012_removableFields2(instanceName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: config1, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + resource.TestCheckResourceAttr(resourceName, "service_account.#", "1"), + resource.TestCheckResourceAttr(resourceName, "service_account.0.scopes.#", "3"), + ), + }, + { + Config: config2, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + + // Check that fields were able to be removed + resource.TestCheckResourceAttr(resourceName, "scratch_disk.#", "0"), + resource.TestCheckResourceAttr(resourceName, "attached_disk.#", "0"), + resource.TestCheckResourceAttr(resourceName, "network_interface.0.alias_ip_range.#", "0"), + ), + }, + }, + }) +} + +func TestAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(t *testing.T) { + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + templateName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_instance_from_template.inst" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceFromTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instanceName, templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, resourceName, &instance), + resource.TestCheckResourceAttr(resourceName, "metadata.startup-script", ""), + ), + }, + }, + }) + +} + +func testAccCheckComputeInstanceFromTemplateDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_from_template" { + continue + } + + _, err := config.NewComputeClient(config.UserAgent).Instances.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Instance still exists") + } + } + + return nil + } +} + +func testAccComputeInstanceFromTemplate_basic(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, template, template, instance) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceFromTemplate_maxRunDuration_onInstanceStopAction(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 456 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + } +} +`, template, template, instance) +} +{{- end }} + +func testAccComputeInstanceFromTemplate_localSsdRecoveryTimeout(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, template, template, instance) +} + +func testAccComputeInstanceFromTemplateWithOverride_localSsdRecoveryTimeout(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + local_ssd_recovery_timeout { + nanos = 0 + seconds = 7200 + } + } +} +`, template, template, instance) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceFromTemplate_partnerMetadata(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, template, template, instance) +} + +func testAccComputeInstanceFromTemplateWithOverride_partnerMetadata(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + } + }) + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } +} +`, template, template, instance) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} + +func testAccComputeInstanceFromRegionTemplate_basic(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + region = "us-central1" + replica_zones = ["us-central1-a", "us-central1-f"] +} + +resource "google_compute_region_instance_template" "foobar" { + name = "%s" + region = "us-central1" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + disk { + source = google_compute_region_disk.foobar.self_link + auto_delete = false + boot = false + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_region_instance_template.foobar.id + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, template, template, instance) +} + +{{ end }} + +func testAccComputeInstanceFromTemplate_self_link_unique(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + disk { + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + device_name = "test-local-ssd" + disk_type = "local-ssd" + type = "SCRATCH" + interface = "NVME" + disk_size_gb = 375 + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = false + disk_type = "pd-ssd" + type = "PERSISTENT" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = true + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "foobar" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link_unique + + // Overrides + can_ip_forward = false + labels = { + my_key = "my_value" + } + scheduling { + automatic_restart = false + } +} +`, template, template, instance) +} + +func testAccComputeInstanceFromTemplate_overrideBootDisk(templateDisk, overrideDisk, template, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "template_disk" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "override_disk" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 20 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "template" { + name = "%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = google_compute_disk.template_disk.name + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.template.self_link + + // Overrides + boot_disk { + source = google_compute_disk.override_disk.self_link + } +} +`, templateDisk, overrideDisk, template, instance) +} + +func testAccComputeInstanceFromTemplate_overrideAttachedDisk(templateDisk, overrideDisk, template, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "template_disk" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "override_disk" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 20 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "template" { + name = "%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = google_compute_disk.template_disk.name + auto_delete = false + boot = false + } + + disk { + source_image = "debian-cloud/debian-11" + auto_delete = true + boot = false + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.template.self_link + + // Overrides + attached_disk { + source = google_compute_disk.override_disk.name + } +} +`, templateDisk, overrideDisk, template, instance) +} + +func testAccComputeInstanceFromTemplate_overrideScratchDisk(templateDisk, overrideDisk, template, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "template_disk" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "override_disk" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 20 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "template" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 375 + interface = "SCSI" + auto_delete = true + boot = false + } + + disk { + device_name = "test-local-ssd" + type = "SCRATCH" + disk_type = "local-ssd" + disk_size_gb = 375 + interface = "SCSI" + auto_delete = true + boot = false + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.template.self_link + + // Overrides + scratch_disk { + interface = "NVME" + } + + scratch_disk { + device_name = "override-local-ssd" + interface = "NVME" + } +} +`, templateDisk, overrideDisk, template, instance) +} + +func testAccComputeInstanceFromTemplate_overrideScheduling(templateDisk, template, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + scheduling { + automatic_restart = false + preemptible = true + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link +} +`, templateDisk, template, instance) +} + +func testAccComputeInstanceFromTemplate_012_removableFieldsTpl(template string) string { + + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 20 + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + can_ip_forward = true +} +`, template) +} + +func testAccComputeInstanceFromTemplate_012_removableFields1(instance string) string { + return fmt.Sprintf(` +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + allow_stopping_for_update = true + + source_instance_template = google_compute_instance_template.foobar.self_link +} +`, instance) +} + +func testAccComputeInstanceFromTemplate_012_removableFields2(instance string) string { + return fmt.Sprintf(` +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + allow_stopping_for_update = true + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + network_interface { + alias_ip_range = [] + } + + service_account = [] + + scratch_disk = [] + + attached_disk = [] + + timeouts { + create = "10m" + update = "10m" + } +} +`, instance) +} + +func testAccComputeInstanceFromTemplate_overrideMetadataDotStartupScript(instance, template string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + metadata = { + startup-script = "#!/bin/bash\necho Hello" + } + + can_ip_forward = true +} + +resource "google_compute_instance_from_template" "inst" { + name = "%s" + zone = "us-central1-a" + + source_instance_template = google_compute_instance_template.foobar.self_link + + // Overrides + metadata = { + startup-script = "" + } +} +`, template, instance) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group.go.tmpl new file mode 100644 index 000000000000..b5782de796cb --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group.go.tmpl @@ -0,0 +1,471 @@ +package compute + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeInstanceGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupCreate, + Read: resourceComputeInstanceGroupRead, + Update: resourceComputeInstanceGroupUpdate, + Delete: resourceComputeInstanceGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeInstanceGroupImportState, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(6 * time.Minute), + Update: schema.DefaultTimeout(6 * time.Minute), + Delete: schema.DefaultTimeout(6 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderZone, + ), + + SchemaVersion: 2, + MigrateState: resourceComputeInstanceGroupMigrateState, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the instance group. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.`, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The zone that this instance group should be created in.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional textual description of the instance group.`, + }, + + "instances": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: tpgresource.SelfLinkRelativePathHash, + Description: `The list of instances in the group, in self_link format. When adding instances they must all be in the same network and zone as the instance group.`, + }, + + "named_port": { + Type: schema.TypeList, + Optional: true, + Description: `The named port configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name which the port will be mapped to.`, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + Description: `The port number to map the name to.`, + }, + }, + }, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + ForceNew: true, + Description: `The URL of the network the instance group is in. If this is different from the network where the instances are in, the creation fails. Defaults to the network where the instances are in (if neither network nor instances is specified, this field will be blank).`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "size": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of instances in the group.`, + }, + }, + UseJSONNumber: true, + } +} + +func getInstanceReferences(instanceUrls []string) (refs []*compute.InstanceReference) { + for _, v := range instanceUrls { + refs = append(refs, &compute.InstanceReference{ + Instance: v, + }) + } + return refs +} + +func validInstanceURLs(instanceUrls []string) bool { + for _, v := range instanceUrls { + if !strings.HasPrefix(v, "https://") { + return false + } + } + return true +} + +func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + // Build the parameter + instanceGroup := &compute.InstanceGroup{ + Name: name, + } + + // Set optional fields + if v, ok := d.GetOk("description"); ok { + instanceGroup.Description = v.(string) + } + + if v, ok := d.GetOk("named_port"); ok { + instanceGroup.NamedPorts = getNamedPorts(v.([]interface{})) + } + + if v, ok := d.GetOk("network"); ok { + instanceGroup.Network = v.(string) + } + + log.Printf("[DEBUG] InstanceGroup insert request: %#v", instanceGroup) + op, err := config.NewComputeClient(userAgent).InstanceGroups.Insert( + project, zone, instanceGroup).Do() + if err != nil { + return fmt.Errorf("Error creating InstanceGroup: %s", err) + } + + // It probably maybe worked, so store the ID now + d.SetId(fmt.Sprintf("projects/%s/zones/%s/instanceGroups/%s", project, zone, name)) + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Creating InstanceGroup", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + d.SetId("") + return err + } + + if v, ok := d.GetOk("instances"); ok { + tmpUrls := tpgresource.ConvertStringArr(v.(*schema.Set).List()) + + var instanceUrls []string + for _, v := range tmpUrls { + if strings.HasPrefix(v, "https://") { + instanceUrls = append(instanceUrls, v) + } else { + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}"+v) + if err != nil { + return err + } + instanceUrls = append(instanceUrls, url) + } + } + + addInstanceReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(instanceUrls), + } + + log.Printf("[DEBUG] InstanceGroup add instances request: %#v", addInstanceReq) + op, err := config.NewComputeClient(userAgent).InstanceGroups.AddInstances( + project, zone, name, addInstanceReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances to InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Adding instances to InstanceGroup", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + } + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + // retrieve instance group + instanceGroup, err := config.NewComputeClient(userAgent).InstanceGroups.Get( + project, zone, name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Group %q", name)) + } + + // retrieve instance group members + var memberUrls []string + members, err := config.NewComputeClient(userAgent).InstanceGroups.ListInstances( + project, zone, name, &compute.InstanceGroupsListInstancesRequest{ + InstanceState: "ALL", + }).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't have any instances + if err := d.Set("instances", nil); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + } else { + // any other errors return them + return fmt.Errorf("Error reading InstanceGroup Members: %s", err) + } + } else { + for _, member := range members.Items { + memberUrls = append(memberUrls, member.Instance) + } + log.Printf("[DEBUG] InstanceGroup members: %v", memberUrls) + if err := d.Set("instances", memberUrls); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + } + + if err := d.Set("named_port", flattenNamedPorts(instanceGroup.NamedPorts)); err != nil { + return fmt.Errorf("Error setting named_port: %s", err) + } + if err := d.Set("description", instanceGroup.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + + // Set computed fields + if err := d.Set("network", instanceGroup.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("size", instanceGroup.Size); err != nil { + return fmt.Errorf("Error setting size: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("zone", zone); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("self_link", instanceGroup.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + + return nil +} +func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + + d.Partial(true) + + if d.HasChange("instances") { + // to-do check for no instances + from_, to_ := d.GetChange("instances") + + from := tpgresource.ConvertStringArr(from_.(*schema.Set).List()) + to := tpgresource.ConvertStringArr(to_.(*schema.Set).List()) + + if !validInstanceURLs(from) { + return fmt.Errorf("Error invalid instance URLs: %v", from) + } + if !validInstanceURLs(to) { + return fmt.Errorf("Error invalid instance URLs: %v", to) + } + + add, remove := tpgresource.CalcAddRemove(from, to) + + if len(remove) > 0 { + removeReq := &compute.InstanceGroupsRemoveInstancesRequest{ + Instances: getInstanceReferences(remove), + } + + log.Printf("[DEBUG] InstanceGroup remove instances request: %#v", removeReq) + removeOp, err := config.NewComputeClient(userAgent).InstanceGroups.RemoveInstances( + project, zone, name, removeReq).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Instances already removed from InstanceGroup: %s", remove) + } else { + return fmt.Errorf("Error removing instances from InstanceGroup: %s", err) + } + } else { + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, removeOp, project, "Updating InstanceGroup", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + + if len(add) > 0 { + + addReq := &compute.InstanceGroupsAddInstancesRequest{ + Instances: getInstanceReferences(add), + } + + log.Printf("[DEBUG] InstanceGroup adding instances request: %#v", addReq) + addOp, err := config.NewComputeClient(userAgent).InstanceGroups.AddInstances( + project, zone, name, addReq).Do() + if err != nil { + return fmt.Errorf("Error adding instances from InstanceGroup: %s", err) + } + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, addOp, project, "Updating InstanceGroup", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + + if d.HasChange("named_port") { + namedPorts := getNamedPorts(d.Get("named_port").([]interface{})) + + namedPortsReq := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + log.Printf("[DEBUG] InstanceGroup updating named ports request: %#v", namedPortsReq) + op, err := config.NewComputeClient(userAgent).InstanceGroups.SetNamedPorts( + project, zone, name, namedPortsReq).Do() + if err != nil { + return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating InstanceGroup", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + return resourceComputeInstanceGroupRead(d, meta) +} + +func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + name := d.Get("name").(string) + op, err := config.NewComputeClient(userAgent).InstanceGroups.Delete(project, zone, name).Do() + if err != nil { + return fmt.Errorf("Error deleting InstanceGroup: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Deleting InstanceGroup", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceComputeInstanceGroupImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroups/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/instanceGroups/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, err + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager.go.tmpl new file mode 100644 index 000000000000..3ee029c91947 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager.go.tmpl @@ -0,0 +1,1638 @@ +package compute + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceGroupManagerCreate, + Read: resourceComputeInstanceGroupManagerRead, + Update: resourceComputeInstanceGroupManagerUpdate, + Delete: resourceComputeInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{ + State: resourceInstanceGroupManagerStateImporter, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Update: schema.DefaultTimeout(15 * time.Minute), + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderZone, + ), + Schema: map[string]*schema.Schema{ + "base_instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The base instance name to use for instances in this group. The value must be a valid RFC1035 name. Supported characters are lowercase letters, numbers, and hyphens (-). Instances are named by appending a hyphen and a random four-character string to the base instance name.`, + }, + + "version": { + Type: schema.TypeList, + Required: true, + Description: `Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Version name.`, + }, + + "instance_template": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePathsIgnoreParams, + Description: `The full URL to an instance template from which all new instances of this version will be created.`, + }, + + "target_size": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The number of instances calculated as a fixed number or a percentage depending on the settings.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of instances which are managed for this version. Conflicts with percent.`, + }, + + "percent": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + Description: `The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set target_size values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version.`, + }, + }, + }, + }, + }, + }, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the instance group manager. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.`, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The zone that instances in this group should be created in.`, + }, + + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional textual description of the instance group manager.`, + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint of the instance group manager.`, + }, + + "instance_group": { + Type: schema.TypeString, + Computed: true, + Description: `The full URL of the instance group created by the manager.`, + }, + + "named_port": { + Type: schema.TypeSet, + Optional: true, + Description: `The named port configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the port.`, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + Description: `The port number.`, + }, + }, + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the created resource.`, + }, + + "target_pools": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: tpgresource.SelfLinkRelativePathHash, + Description: `The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.`, + }, + + "target_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to 0.`, + }, + + "list_managed_instances_results": { + Type: schema.TypeString, + Optional: true, + Default: "PAGELESS", + ValidateFunc: validation.StringInSlice([]string{"PAGELESS", "PAGINATED"}, false), + Description: `Pagination behavior of the listManagedInstances API method for this managed instance group. Valid values are: "PAGELESS", "PAGINATED". If PAGELESS (default), Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response. If PAGINATED, pagination is enabled, maxResults and pageToken query parameters are respected.`, + }, + + "auto_healing_policies": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The autohealing policies for this managed instance group. You can specify only one value.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "health_check": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The health check resource that signals autohealing.`, + }, + + "initial_delay_sec": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600.`, + }, + }, + }, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "standby_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Standby policy for stopped and suspended instances.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_delay_sec": { + Computed: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.`, + }, + + "mode": { + Computed: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"MANUAL", "SCALE_OUT_POOL"}, true), + Description: `Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is "MANUAL".`, + }, + }, + }, + }, + + "target_suspended_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of suspended instances for this managed instance group.`, + }, + + "target_stopped_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of stopped instances for this managed instance group.`, + }, + {{- end }} + + "update_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The update policy for this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimal_action": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "REFRESH", "RESTART", "REPLACE"}, false), + Description: `Minimal action to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to update without stopping instances, RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a REFRESH, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.`, + }, + + "most_disruptive_allowed_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "REFRESH", "RESTART", "REPLACE"}, false), + Description: `Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all.`, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), + Description: `The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).`, + }, + + "max_surge_fixed": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"update_policy.0.max_surge_percent"}, + Description: `The maximum number of instances that can be created above the specified targetSize during the update process. Conflicts with max_surge_percent. If neither is set, defaults to 1`, + }, + + "max_surge_percent": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + Description: `The maximum number of instances(calculated as percentage) that can be created above the specified targetSize during the update process. Conflicts with max_surge_fixed.`, + }, + + "max_unavailable_fixed": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, + Description: `The maximum number of instances that can be unavailable during the update process. Conflicts with max_unavailable_percent. If neither is set, defaults to 1.`, + }, + + "max_unavailable_percent": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + Description: `The maximum number of instances(calculated as percentage) that can be unavailable during the update process. Conflicts with max_unavailable_fixed.`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "min_ready_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600].`, + ValidateFunc: validation.IntBetween(0, 3600), + }, +{{- end }} + "replacement_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"RECREATE", "SUBSTITUTE", ""}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("SUBSTITUTE"), + Description: `The instance replacement method for managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set max_unavailable_fixed or max_unavailable_percent to be greater than 0.`, + }, + }, + }, + }, + + "instance_lifecycle_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The instance lifecycle policy for this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_action_on_failure": { + Type: schema.TypeString, + Default: "REPAIR", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REPAIR", "DO_NOTHING"}, true), + Description: `Default behavior for all instance or health check failures.`, + }, + "force_update_on_repair": { + Type: schema.TypeString, + Default: "NO", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"YES", "NO"}, true), + Description: `Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type.`, + }, + }, + }, + }, + + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "params": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Input only additional params for instance group manager creation.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + // This field is intentionally not updatable. The API overrides all existing tags on the field when updated. + ForceNew: true, + Description: `Resource manager tags to bind to the managed instance group. The tags are key-value pairs. Keys must be in the format tagKeys/123 and values in the format tagValues/456.`, + }, + }, + }, + }, + {{- end }} + "wait_for_instances": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.`, + }, + "wait_for_instances_status": { + Type: schema.TypeString, + Optional: true, + Default: "STABLE", + ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, + }, + "stateful_internal_ip": { + Type: schema.TypeList, + Optional: true, + Description: `External IPs considered stateful by the instance group. `, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface_name": { + Type: schema.TypeString, + Optional: true, + Description: `The network interface name`, + }, + "delete_rule": { + Type: schema.TypeString, + Default: "NEVER", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), + Description: `A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER.`, + }, + }, + }, + }, + "stateful_external_ip": { + Type: schema.TypeList, + Optional: true, + Description: `External IPs considered stateful by the instance group. `, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface_name": { + Type: schema.TypeString, + Optional: true, + Description: `The network interface name`, + }, + "delete_rule": { + Type: schema.TypeString, + Default: "NEVER", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), + Description: `A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER.`, + }, + }, + }, + }, + "stateful_disk": { + Type: schema.TypeSet, + Optional: true, + Description: `Disks created on the instances that will be preserved on instance delete, update, etc.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + Description: `The device name of the disk to be attached.`, + }, + + "delete_rule": { + Type: schema.TypeString, + Default: "NEVER", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), + Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER.`, + }, + }, + }, + }, + "operation": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The status of this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_stable": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.`, + }, + + "version_target": { + Type: schema.TypeList, + Computed: true, + Description: `A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_reached": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.`, + }, + }, + }, + }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + "current_revision": { + Type: schema.TypeString, + Computed: true, + Description: `Current all-instances configuration revision. This value is in RFC3339 text format.`, + }, + }, + }, + }, + "stateful": { + Type: schema.TypeList, + Computed: true, + Description: `Stateful status of the given Instance Group Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "has_stateful_config": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.`, + }, + "per_instance_configs": { + Type: schema.TypeList, + Computed: true, + Description: `Status of per-instance configs on the instances.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all_effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func parseUniqueId(s string) (string, string) { + splits:= strings.SplitN(s, "?uniqueId=", 2) + if len(splits) == 2 { + return splits[0], splits[1] + } + return s, "" +} + +func compareSelfLinkRelativePathsIgnoreParams(_unused1, old, new string, _unused2 *schema.ResourceData) bool { + oldName, oldUniqueId:= parseUniqueId(old) + newName, newUniqueId:= parseUniqueId(new) + if oldUniqueId != "" && newUniqueId != "" && oldUniqueId != newUniqueId { + return false + } + return tpgresource.CompareSelfLinkRelativePaths(_unused1, oldName, newName, _unused2) +} + +func ConvertToUniqueIdWhenPresent(s string) string { + original, uniqueId:= parseUniqueId(s) + if uniqueId != "" { + splits:= strings.Split(original, "/") + splits[len(splits)-1] = uniqueId + return strings.Join(splits, "/") + } + return s +} + +func getNamedPorts(nps []interface{}) []*compute.NamedPort { + namedPorts := make([]*compute.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &compute.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + + return namedPorts +} + +func getNamedPortsBeta(nps []interface{}) []*compute.NamedPort { + namedPorts := make([]*compute.NamedPort, 0, len(nps)) + for _, v := range nps { + np := v.(map[string]interface{}) + namedPorts = append(namedPorts, &compute.NamedPort{ + Name: np["name"].(string), + Port: int64(np["port"].(int)), + }) + } + + return namedPorts +} + +func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + + // Build the parameter + manager := &compute.InstanceGroupManager{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + TargetSize: int64(d.Get("target_size").(int)), + ListManagedInstancesResults: d.Get("list_managed_instances_results").(string), + NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), + TargetPools: tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)), + AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), + Versions: expandVersions(d.Get("version").([]interface{})), + {{- if ne $.TargetVersionName "ga" }} + StandbyPolicy: expandStandbyPolicy(d), + TargetSuspendedSize: int64(d.Get("target_suspended_size").(int)), + TargetStoppedSize: int64(d.Get("target_stopped_size").(int)), + {{- end }} + UpdatePolicy: expandUpdatePolicy(d.Get("update_policy").([]interface{})), + InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), + StatefulPolicy: expandStatefulPolicy(d), + {{- if ne $.TargetVersionName "ga" }} + Params: expandInstanceGroupManagerParams(d), + {{- end }} + + // Force send TargetSize to allow a value of 0. + ForceSendFields: []string{"TargetSize"}, + } + + log.Printf("[DEBUG] InstanceGroupManager insert request: %#v", manager) + op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Insert( + project, zone, manager).Do() + + if err != nil { + return fmt.Errorf("Error creating InstanceGroupManager: %s", err) + } + + // It probably maybe worked, so store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/instanceGroupManagers/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + d.SetId(id) + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + // Check if the create operation failed because Terraform was prematurely terminated. If it was we can persist the + // operation id to state so that a subsequent refresh of this resource will wait until the operation has terminated + // before attempting to Read the state of the manager. This allows a graceful resumption of a Create that was killed + // by the upstream Terraform process exiting early such as a sigterm. + select { + case <-config.Context.Done(): + log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", op.Name) + if err := d.Set("operation", op.Name); err != nil { + return fmt.Errorf("Error setting operation: %s", err) + } + return nil + default: + // leaving default case to ensure this is non blocking + } + return err + } + + if d.Get("wait_for_instances").(bool) { + err := computeIGMWaitForInstanceStatus(d, meta) + if err != nil { + return err + } + } + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func flattenNamedPortsBeta(namedPorts []*compute.NamedPort) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(namedPorts)) + for _, namedPort := range namedPorts { + namedPortMap := make(map[string]interface{}) + namedPortMap["name"] = namedPort.Name + namedPortMap["port"] = namedPort.Port + result = append(result, namedPortMap) + } + return result + +} + +func flattenVersions(versions []*compute.InstanceGroupManagerVersion) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(versions)) + for _, version := range versions { + versionMap := make(map[string]interface{}) + versionMap["name"] = version.Name + versionMap["instance_template"] = tpgresource.ConvertSelfLinkToV1(version.InstanceTemplate) + versionMap["target_size"] = flattenFixedOrPercent(version.TargetSize) + result = append(result, versionMap) + } + + return result +} + +func flattenFixedOrPercent(fixedOrPercent *compute.FixedOrPercent) []map[string]interface{} { + result := make(map[string]interface{}) + if value := fixedOrPercent.Percent; value > 0 { + result["percent"] = value + } else if value := fixedOrPercent.Fixed; value > 0 { + result["fixed"] = fixedOrPercent.Fixed + } else { + return []map[string]interface{}{} + } + return []map[string]interface{}{result} +} + +func getManager(d *schema.ResourceData, meta interface{}) (*compute.InstanceGroupManager, error) { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + zone, _ := tpgresource.GetZone(d, config) + name := d.Get("name").(string) + + manager, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Get(project, zone, name).Do() + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Group Manager %q", name)) + } + + if manager == nil { + log.Printf("[WARN] Removing Instance Group Manager %q because it's gone", d.Get("name").(string)) + + // The resource doesn't exist anymore + d.SetId("") + return nil, nil + } + + return manager, nil +} + +func resourceComputeInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + operation := d.Get("operation").(string) + if operation != "" { + log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) + zone, _ := tpgresource.GetZone(d, config) + op := &compute.Operation{ + Name: operation, + Zone: zone, + } + if err := d.Set("operation", ""); err != nil { + return fmt.Errorf("Error unsetting operation: %s", err) + } + err = ComputeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + // remove from state to allow refresh to finish + log.Printf("[DEBUG] Resumed operation returned an error, removing from state: %s", err) + d.SetId("") + return nil + } + } + + manager, err := getManager(d, meta) + if err != nil { + return err + } + if manager == nil { + log.Printf("[WARN] Instance Group Manager %q not found, removing from state.", d.Id()) + d.SetId("") + return nil + } + + if err := d.Set("base_instance_name", manager.BaseInstanceName); err != nil { + return fmt.Errorf("Error setting base_instance_name: %s", err) + } + if err := d.Set("name", manager.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("zone", tpgresource.GetResourceNameFromSelfLink(manager.Zone)); err != nil { + return fmt.Errorf("Error setting zone: %s", err) + } + if err := d.Set("creation_timestamp", manager.CreationTimestamp); err != nil { + return fmt.Errorf("Error reading creation_timestamp: %s", err) + } + if err := d.Set("description", manager.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("target_size", manager.TargetSize); err != nil { + return fmt.Errorf("Error setting target_size: %s", err) + } + if err := d.Set("list_managed_instances_results", manager.ListManagedInstancesResults); err != nil { + return fmt.Errorf("Error setting list_managed_instances_results: %s", err) + } + if err = d.Set("target_pools", tpgresource.MapStringArr(manager.TargetPools, tpgresource.ConvertSelfLinkToV1)); err != nil { + return fmt.Errorf("Error setting target_pools in state: %s", err.Error()) + } + if err = d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil { + return fmt.Errorf("Error setting named_port in state: %s", err.Error()) + } + if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { + return fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) + } + if err = d.Set("stateful_internal_ip", flattenStatefulPolicyStatefulInternalIps(d, manager.StatefulPolicy)); err != nil { + return fmt.Errorf("Error setting stateful_internal_ip in state: %s", err.Error()) + } + if err = d.Set("stateful_external_ip", flattenStatefulPolicyStatefulExternalIps(d, manager.StatefulPolicy)); err != nil { + return fmt.Errorf("Error setting stateful_external_ip in state: %s", err.Error()) + } + if err := d.Set("fingerprint", manager.Fingerprint); err != nil { + return fmt.Errorf("Error setting fingerprint: %s", err) + } + if err := d.Set("instance_group", tpgresource.ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { + return fmt.Errorf("Error setting instance_group: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(manager.SelfLink)); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + + if err = d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { + return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) + } + if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { + return err + } + {{- if ne $.TargetVersionName "ga" }} + if err = d.Set("standby_policy", flattenStandbyPolicy(manager.StandbyPolicy)); err != nil { + return fmt.Errorf("Error setting standby_policy in state: %s", err.Error()) + } + if err := d.Set("target_suspended_size", manager.TargetSuspendedSize); err != nil { + return fmt.Errorf("Error setting target_suspended_size: %s", err) + } + if err := d.Set("target_stopped_size", manager.TargetStoppedSize); err != nil { + return fmt.Errorf("Error setting target_stopped_size: %s", err) + } + {{- end }} + if err = d.Set("update_policy", flattenUpdatePolicy(manager.UpdatePolicy)); err != nil { + return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) + } + if err = d.Set("instance_lifecycle_policy", flattenInstanceLifecyclePolicy(manager.InstanceLifecyclePolicy)); err != nil { + return fmt.Errorf("Error setting instance lifecycle policy in state: %s", err.Error()) + } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } + if err = d.Set("status", flattenStatus(manager.Status)); err != nil { + return fmt.Errorf("Error setting status in state: %s", err.Error()) + } + + // If unset in state set to default value + if d.Get("wait_for_instances_status").(string) == "" { + if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { + return fmt.Errorf("Error setting wait_for_instances_status in state: %s", err.Error()) + } + } + + return nil +} + +func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + + updatedManager := &compute.InstanceGroupManager{ + Fingerprint: d.Get("fingerprint").(string), + } + var change bool + + if d.HasChange("description") { + updatedManager.Description = d.Get("description").(string) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "Description") + change = true + } + + if d.HasChange("target_pools") { + updatedManager.TargetPools = tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetPools") + change = true + } + + if d.HasChange("auto_healing_policies") { + updatedManager.AutoHealingPolicies = expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "AutoHealingPolicies") + change = true + } + + if d.HasChange("version") { + updatedManager.Versions = expandVersions(d.Get("version").([]interface{})) + change = true + } + + {{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("standby_policy") { + updatedManager.StandbyPolicy = expandStandbyPolicy(d) + change = true + } + + if d.HasChange("target_suspended_size") { + updatedManager.TargetSuspendedSize = int64(d.Get("target_suspended_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetSuspendedSize") + change = true + } + + if d.HasChange("target_stopped_size") { + updatedManager.TargetStoppedSize = int64(d.Get("target_stopped_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetStoppedSize") + change = true + } + {{- end }} + + if d.HasChange("update_policy") { + updatedManager.UpdatePolicy = expandUpdatePolicy(d.Get("update_policy").([]interface{})) + change = true + } + + if d.HasChange("instance_lifecycle_policy") { + updatedManager.InstanceLifecyclePolicy = expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})) + change = true + } + + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + + if d.HasChange("stateful_internal_ip") || d.HasChange("stateful_external_ip") || d.HasChange("stateful_disk") { + updatedManager.StatefulPolicy = expandStatefulPolicy(d) + change = true + } + + if d.HasChange("list_managed_instances_results") { + updatedManager.ListManagedInstancesResults = d.Get("list_managed_instances_results").(string) + change = true + } + + if change { + op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Patch(project, zone, d.Get("name").(string), updatedManager).Do() + if err != nil { + return fmt.Errorf("Error updating managed group instances: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating managed group instances", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + // named ports can't be updated through PATCH + // so we call the update method on the instance group, instead of the igm + if d.HasChange("named_port") { + d.Partial(true) + + // Build the parameters for a "SetNamedPorts" request: + namedPorts := getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()) + setNamedPorts := &compute.InstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + // Make the request: + op, err := config.NewComputeClient(userAgent).InstanceGroups.SetNamedPorts( + project, zone, d.Get("name").(string), setNamedPorts).Do() + + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete: + err = ComputeOperationWaitTime(config, op, project, "Updating InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + // target_size should be updated through resize + if d.HasChange("target_size") { + d.Partial(true) + + targetSize := int64(d.Get("target_size").(int)) + op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Resize( + project, zone, d.Get("name").(string), targetSize).Do() + + if err != nil { + return fmt.Errorf("Error updating InstanceGroupManager: %s", err) + } + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Updating InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + if d.Get("wait_for_instances").(bool) { + err := computeIGMWaitForInstanceStatus(d, meta) + if err != nil { + return err + } + } + + return resourceComputeInstanceGroupManagerRead(d, meta) +} + +func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, _ := tpgresource.GetZone(d, config) + name := d.Get("name").(string) + + op, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Delete(project, zone, name).Do() + attempt := 0 + for err != nil && attempt < 20 { + attempt++ + time.Sleep(2000 * time.Millisecond) + op, err = config.NewComputeClient(userAgent).InstanceGroupManagers.Delete(project, zone, name).Do() + } + + if err != nil { + return fmt.Errorf("Error deleting instance group manager: %s", err) + } + + currentSize := int64(d.Get("target_size").(int)) + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Deleting InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutDelete)) + + for err != nil && currentSize > 0 { + if !strings.Contains(err.Error(), "timeout") { + return err + } + + instanceGroup, igErr := config.NewComputeClient(userAgent).InstanceGroups.Get( + project, zone, name).Do() + if igErr != nil { + return fmt.Errorf("Error getting instance group size: %s", err) + } + + instanceGroupSize := instanceGroup.Size + + if instanceGroupSize >= currentSize { + return fmt.Errorf("Error, instance group isn't shrinking during delete") + } + + log.Printf("[INFO] timeout occurred, but instance group is shrinking (%d < %d)", instanceGroupSize, currentSize) + currentSize = instanceGroupSize + err = ComputeOperationWaitTime(config, op, project, "Deleting InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutDelete)) + } + + d.SetId("") + return nil +} + +func computeIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { + waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" + conf := retry.StateChangeConf{ + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, + Target: []string{"created"}, + Refresh: waitForInstancesRefreshFunc(getManager, waitForUpdates, d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + } + _, err := conf.WaitForState() + if err != nil { + return err + } + return nil +} + +func expandAutoHealingPolicies(configured []interface{}) []*compute.InstanceGroupManagerAutoHealingPolicy { + autoHealingPolicies := make([]*compute.InstanceGroupManagerAutoHealingPolicy, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + autoHealingPolicy := compute.InstanceGroupManagerAutoHealingPolicy{ + HealthCheck: data["health_check"].(string), + InitialDelaySec: int64(data["initial_delay_sec"].(int)), + } + + autoHealingPolicies = append(autoHealingPolicies, &autoHealingPolicy) + } + return autoHealingPolicies +} + +func expandStatefulPolicy(d *schema.ResourceData) *compute.StatefulPolicy { + + preservedState := &compute.StatefulPolicyPreservedState{} + + isRemovingAStatefulDisk := false + if d.HasChange("stateful_disk") { + oldDisks, newDisks := d.GetChange("stateful_disk") + preservedState.Disks = expandStatefulDisks(newDisks.(*schema.Set).List()) + // Remove Disks + for _, raw := range oldDisks.(*schema.Set).List() { + data := raw.(map[string]interface{}) + deviceName := data["device_name"].(string) + if _, exist := preservedState.Disks[deviceName]; !exist { + isRemovingAStatefulDisk = true + preservedState.NullFields = append(preservedState.NullFields, "Disks." + deviceName) + } + } + preservedState.ForceSendFields = append(preservedState.ForceSendFields, "Disks") + } + if !isRemovingAStatefulDisk { + preservedState := &compute.StatefulPolicyPreservedState{} + stateful_disks := d.Get("stateful_disk").(*schema.Set).List() + disks := make(map[string]compute.StatefulPolicyPreservedStateDiskDevice) + for _, raw := range stateful_disks { + data := raw.(map[string]interface{}) + disk := compute.StatefulPolicyPreservedStateDiskDevice{ + AutoDelete: data["delete_rule"].(string), + } + disks[data["device_name"].(string)] = disk + } + preservedState.Disks = disks + } + + if d.HasChange("stateful_internal_ip") { + oldInternalIps, newInternalIps := d.GetChange("stateful_internal_ip") + preservedState.InternalIPs = expandStatefulIps(newInternalIps.([]interface{})) + // Remove Internal Ips + for _, raw := range oldInternalIps.([]interface{}) { + data := raw.(map[string]interface{}) + networkIp := data["interface_name"].(string) + if _, exist := preservedState.InternalIPs[networkIp]; !exist { + preservedState.NullFields = append(preservedState.NullFields, "InternalIPs." + networkIp) + } + } + preservedState.ForceSendFields = append(preservedState.ForceSendFields, "InternalIPs") + } + + if d.HasChange("stateful_external_ip") { + oldExternalIps, newExternalIps := d.GetChange("stateful_external_ip") + preservedState.ExternalIPs = expandStatefulIps(newExternalIps.([]interface{})) + // Remove External Ips + for _, raw := range oldExternalIps.([]interface{}) { + data := raw.(map[string]interface{}) + networkIp := data["interface_name"].(string) + if _, exist := preservedState.ExternalIPs[networkIp]; !exist { + preservedState.NullFields = append(preservedState.NullFields, "ExternalIPs." + networkIp) + } + } + preservedState.ForceSendFields = append(preservedState.ForceSendFields, "ExternalIPs") + } + + statefulPolicy := &compute.StatefulPolicy{PreservedState: preservedState} + statefulPolicy.ForceSendFields = append(statefulPolicy.ForceSendFields, "PreservedState") + + return statefulPolicy +} + +func expandStatefulDisks(statefulDisk []interface{}) map[string]compute.StatefulPolicyPreservedStateDiskDevice { + statefulDisksMap := make(map[string]compute.StatefulPolicyPreservedStateDiskDevice) + + for _, raw := range statefulDisk { + data := raw.(map[string]interface{}) + deviceName := compute.StatefulPolicyPreservedStateDiskDevice{ + AutoDelete: data["delete_rule"].(string), + } + statefulDisksMap[data["device_name"].(string)] = deviceName + } + return statefulDisksMap +} + +func expandStatefulIps(statefulIP []interface{}) map[string]compute.StatefulPolicyPreservedStateNetworkIp { + statefulIpsMap := make(map[string]compute.StatefulPolicyPreservedStateNetworkIp) + + for _, raw := range statefulIP { + data := raw.(map[string]interface{}) + networkIp := compute.StatefulPolicyPreservedStateNetworkIp{ + AutoDelete: data["delete_rule"].(string), + } + statefulIpsMap[data["interface_name"].(string)] = networkIp + } + return statefulIpsMap +} + +func expandVersions(configured []interface{}) []*compute.InstanceGroupManagerVersion { + versions := make([]*compute.InstanceGroupManagerVersion, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + + version := compute.InstanceGroupManagerVersion{ + Name: data["name"].(string), + InstanceTemplate: ConvertToUniqueIdWhenPresent(data["instance_template"].(string)), + TargetSize: expandFixedOrPercent(data["target_size"].([]interface{})), + } + + versions = append(versions, &version) + } + return versions +} + +func expandFixedOrPercent(configured []interface{}) *compute.FixedOrPercent { + fixedOrPercent := &compute.FixedOrPercent{} + + for _, raw := range configured { + if raw != nil { + data := raw.(map[string]interface{}) + if percent := data["percent"]; percent.(int) > 0 { + fixedOrPercent.Percent = int64(percent.(int)) + } else { + fixedOrPercent.Fixed = int64(data["fixed"].(int)) + fixedOrPercent.ForceSendFields = []string{"Fixed"} + } + } + } + return fixedOrPercent +} + +func expandInstanceLifecyclePolicy(configured []interface{}) *compute.InstanceGroupManagerInstanceLifecyclePolicy { + instanceLifecyclePolicy := &compute.InstanceGroupManagerInstanceLifecyclePolicy{} + + for _, raw := range configured { + data := raw.(map[string]interface{}) + instanceLifecyclePolicy.ForceUpdateOnRepair = data["force_update_on_repair"].(string) + instanceLifecyclePolicy.DefaultActionOnFailure = data["default_action_on_failure"].(string) + } + return instanceLifecyclePolicy +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandStandbyPolicy(d *schema.ResourceData) *compute.InstanceGroupManagerStandbyPolicy { + standbyPolicy := &compute.InstanceGroupManagerStandbyPolicy{} + for _, sp := range d.Get("standby_policy").([]any) { + spData := sp.(map[string]any) + standbyPolicy.InitialDelaySec = int64(spData["initial_delay_sec"].(int)) + standbyPolicy.ForceSendFields = []string{"InitialDelaySec"} + standbyPolicy.Mode = spData["mode"].(string) + } + return standbyPolicy +} +{{- end }} + +func expandUpdatePolicy(configured []interface{}) *compute.InstanceGroupManagerUpdatePolicy { + updatePolicy := &compute.InstanceGroupManagerUpdatePolicy{} + + for _, raw := range configured { + data := raw.(map[string]interface{}) + + updatePolicy.MinimalAction = data["minimal_action"].(string) + mostDisruptiveAllowedAction := data["most_disruptive_allowed_action"].(string) + if mostDisruptiveAllowedAction != "" { + updatePolicy.MostDisruptiveAllowedAction = mostDisruptiveAllowedAction + } else { + updatePolicy.NullFields = append(updatePolicy.NullFields, "MostDisruptiveAllowedAction") + } + updatePolicy.Type = data["type"].(string) + updatePolicy.ReplacementMethod = data["replacement_method"].(string) +{{- if ne $.TargetVersionName "ga" }} + updatePolicy.MinReadySec = int64(data["min_ready_sec"].(int)) + updatePolicy.ForceSendFields = []string{"MinReadySec"} +{{- end }} + + // percent and fixed values are conflicting + // when the percent values are set, the fixed values will be ignored + if v := data["max_surge_percent"]; v.(int) > 0 { + updatePolicy.MaxSurge = &compute.FixedOrPercent{ + Percent: int64(v.(int)), + NullFields: []string{"Fixed"}, + } + } else { + updatePolicy.MaxSurge = &compute.FixedOrPercent{ + Fixed: int64(data["max_surge_fixed"].(int)), + // allow setting this value to 0 + ForceSendFields: []string{"Fixed"}, + NullFields: []string{"Percent"}, + } + } + + if v := data["max_unavailable_percent"]; v.(int) > 0 { + updatePolicy.MaxUnavailable = &compute.FixedOrPercent{ + Percent: int64(v.(int)), + NullFields: []string{"Fixed"}, + } + } else { + updatePolicy.MaxUnavailable = &compute.FixedOrPercent{ + Fixed: int64(data["max_unavailable_fixed"].(int)), + // allow setting this value to 0 + ForceSendFields: []string{"Fixed"}, + NullFields: []string{"Percent"}, + } + } + } + return updatePolicy +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandInstanceGroupManagerParams(d *schema.ResourceData) *compute.InstanceGroupManagerParams { + params := &compute.InstanceGroupManagerParams{} + + if _, ok := d.GetOk("params.0.resource_manager_tags"); ok { + params.ResourceManagerTags = tpgresource.ExpandStringMap(d, "params.0.resource_manager_tags") + } + + return params +} +{{- end }} + +func flattenAutoHealingPolicies(autoHealingPolicies []*compute.InstanceGroupManagerAutoHealingPolicy) []map[string]interface{} { + autoHealingPoliciesSchema := make([]map[string]interface{}, 0, len(autoHealingPolicies)) + for _, autoHealingPolicy := range autoHealingPolicies { + data := map[string]interface{}{ + "health_check": autoHealingPolicy.HealthCheck, + "initial_delay_sec": autoHealingPolicy.InitialDelaySec, + } + + autoHealingPoliciesSchema = append(autoHealingPoliciesSchema, data) + } + return autoHealingPoliciesSchema +} + +func flattenStatefulPolicy(statefulPolicy *compute.StatefulPolicy) []map[string]interface{} { + if statefulPolicy == nil || statefulPolicy.PreservedState == nil || statefulPolicy.PreservedState.Disks == nil { + return make([]map[string]interface{}, 0, 0) + } + result := make([]map[string]interface{}, 0, len(statefulPolicy.PreservedState.Disks)) + for deviceName, disk := range statefulPolicy.PreservedState.Disks { + data := map[string]interface{}{ + "device_name": deviceName, + "delete_rule": disk.AutoDelete, + } + + result = append(result, data) + } + return result +} + +func flattenStatefulPolicyStatefulInternalIps(d *schema.ResourceData, statefulPolicy *compute.StatefulPolicy) []map[string]interface{} { + if statefulPolicy == nil || statefulPolicy.PreservedState == nil || statefulPolicy.PreservedState.InternalIPs == nil { + return make([]map[string]interface{}, 0, 0) + } + + return flattenStatefulPolicyStatefulIps(d, "stateful_internal_ip", statefulPolicy.PreservedState.InternalIPs) +} + +func flattenStatefulPolicyStatefulExternalIps(d *schema.ResourceData, statefulPolicy *compute.StatefulPolicy) []map[string]interface{} { + if statefulPolicy == nil || statefulPolicy.PreservedState == nil || statefulPolicy.PreservedState.ExternalIPs == nil { + return make([]map[string]interface{}, 0) + } + + return flattenStatefulPolicyStatefulIps(d, "stateful_external_ip", statefulPolicy.PreservedState.ExternalIPs) +} + +func flattenStatefulPolicyStatefulIps(d *schema.ResourceData, ipfieldName string, ips map[string]compute.StatefulPolicyPreservedStateNetworkIp) []map[string]interface{} { + // statefulPolicy.PreservedState.ExternalIPs and statefulPolicy.PreservedState.InternalIPs are affected by API-side reordering + // of external/internal IPs, where ordering is done by the interface_name value. + // Below we reorder the IPs to match the order in the config. + // Also, data is converted from a map (client library's statefulPolicy.PreservedState.ExternalIPs, or .InternalIPs) to a slice (stored in state). + // Any IPs found from the API response that aren't in the config are appended to the end of the slice. + configData := []map[string]interface{}{} + for _, item := range d.Get(ipfieldName).([]interface{}) { + configData = append(configData, item.(map[string]interface{})) + } + apiData := []map[string]interface{}{} + for interfaceName, ip := range ips { + data := map[string]interface{}{ + "interface_name": interfaceName, + "delete_rule": ip.AutoDelete, + } + apiData = append(apiData, data) + } + sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "interface_name") + if err != nil { + log.Printf("[ERROR] Could not sort API response for %s: %s", ipfieldName, err) + return apiData + } + return sorted +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenStandbyPolicy(standbyPolicy *compute.InstanceGroupManagerStandbyPolicy) []map[string]any{ + results := []map[string]any{} + if standbyPolicy != nil { + sp := map[string]any{} + sp["initial_delay_sec"] = standbyPolicy.InitialDelaySec + sp["mode"] = standbyPolicy.Mode + results = append(results, sp) + } + return results +} +{{- end }} + +func flattenUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdatePolicy) []map[string]interface{} { + results := []map[string]interface{}{} + if updatePolicy != nil { + up := map[string]interface{}{} + if updatePolicy.MaxSurge != nil { + up["max_surge_fixed"] = updatePolicy.MaxSurge.Fixed + up["max_surge_percent"] = updatePolicy.MaxSurge.Percent + } else { + up["max_surge_fixed"] = 0 + up["max_surge_percent"] = 0 + } + if updatePolicy.MaxUnavailable != nil { + up["max_unavailable_fixed"] = updatePolicy.MaxUnavailable.Fixed + up["max_unavailable_percent"] = updatePolicy.MaxUnavailable.Percent + } else { + up["max_unavailable_fixed"] = 0 + up["max_unavailable_percent"] = 0 + } +{{- if ne $.TargetVersionName "ga" }} + up["min_ready_sec"] = updatePolicy.MinReadySec +{{- end }} + up["minimal_action"] = updatePolicy.MinimalAction + up["most_disruptive_allowed_action"] = updatePolicy.MostDisruptiveAllowedAction + up["type"] = updatePolicy.Type + up["replacement_method"] = updatePolicy.ReplacementMethod + results = append(results, up) + } + return results +} + +func flattenInstanceLifecyclePolicy(instanceLifecyclePolicy *compute.InstanceGroupManagerInstanceLifecyclePolicy) []map[string]interface{} { + results := []map[string]interface{}{} + if instanceLifecyclePolicy != nil { + ilp := map[string]interface{}{} + ilp["force_update_on_repair"] = instanceLifecyclePolicy.ForceUpdateOnRepair + ilp["default_action_on_failure"] = instanceLifecyclePolicy.DefaultActionOnFailure + results = append(results, ilp) + } + return results +} + +func expandAllInstancesConfig(old []interface{}, new []interface{}) *compute.InstanceGroupManagerAllInstancesConfig { + var properties *compute.InstancePropertiesPatch + for _, raw := range new { + properties = &compute.InstancePropertiesPatch{} + if raw != nil { + data := raw.(map[string]interface{}) + properties.Metadata = tpgresource.ConvertStringMap(data["metadata"].(map[string]interface{})) + if len(properties.Metadata) == 0 { + properties.NullFields = append(properties.NullFields, "Metadata") + } + properties.Labels = tpgresource.ConvertStringMap(data["labels"].(map[string]interface{})) + if len(properties.Labels) == 0 { + properties.NullFields = append(properties.NullFields, "Labels") + } + } + } + + if properties != nil { + for _, raw := range old { + if raw != nil { + data := raw.(map[string]interface{}) + for k := range data["metadata"].(map[string]interface{}) { + if _, exist := properties.Metadata[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Metadata.%s", k)) + } + } + for k := range data["labels"].(map[string]interface{}) { + if _, exist := properties.Labels[k]; !exist { + properties.NullFields = append(properties.NullFields, fmt.Sprintf("Labels.%s", k)) + } + } + } + } + } + if properties != nil { + allInstancesConfig := &compute.InstanceGroupManagerAllInstancesConfig{} + allInstancesConfig.Properties = properties + return allInstancesConfig + } else { + return nil + } +} + +func flattenAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + props := map[string]interface{}{} + if len(allInstancesConfig.Properties.Metadata) > 0 { + props["metadata"] = allInstancesConfig.Properties.Metadata + } + if len(allInstancesConfig.Properties.Labels) > 0 { + props["labels"] = allInstancesConfig.Properties.Labels + } + results = append(results, props) + return results +} + +func flattenStatus(status *compute.InstanceGroupManagerStatus) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "is_stable": status.IsStable, + "stateful": flattenStatusStateful(status.Stateful), + "version_target": flattenStatusVersionTarget(status.VersionTarget), + } + if status.AllInstancesConfig != nil { + data["all_instances_config"] = flattenStatusAllInstancesConfig(status.AllInstancesConfig) + } + results = append(results, data) + return results +} + +func flattenStatusStateful(stateful *compute.InstanceGroupManagerStatusStateful) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "has_stateful_config": stateful.HasStatefulConfig, + "per_instance_configs": flattenStatusStatefulConfigs(stateful.PerInstanceConfigs), + } + results = append(results, data) + return results +} + +func flattenStatusStatefulConfigs(statefulConfigs *compute.InstanceGroupManagerStatusStatefulPerInstanceConfigs) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "all_effective": statefulConfigs.AllEffective, + } + results = append(results, data) + return results +} + +func flattenStatusVersionTarget(versionTarget *compute.InstanceGroupManagerStatusVersionTarget) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "is_reached": versionTarget.IsReached, + } + results = append(results, data) + return results +} + +func flattenStatusAllInstancesConfig(allInstancesConfig *compute.InstanceGroupManagerStatusAllInstancesConfig) []map[string]interface{} { + results := []map[string]interface{}{} + data := map[string]interface{}{ + "effective": allInstancesConfig.Effective, + "current_revision": allInstancesConfig.CurrentRevision, + } + results = append(results, data) + return results +} + +func resourceInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if err := d.Set("wait_for_instances", false); err != nil { + return nil, fmt.Errorf("Error setting wait_for_instances: %s", err) + } + if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { + return nil, fmt.Errorf("Error setting wait_for_instances_status: %s", err) + } + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/zones/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/zones/{{"{{"}}zone{{"}}"}}/instanceGroupManagers/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_internal_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_internal_test.go.tmpl new file mode 100644 index 000000000000..63a7ee9897de --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_internal_test.go.tmpl @@ -0,0 +1,319 @@ +package compute + +import ( + "reflect" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestInstanceGroupManager_parseUniqueId(t *testing.T) { + expectations := map[string][]string{ + "projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123": {"projects/imre-test/global/instanceTemplates/example-template-custom", "123"}, + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123": {"https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom", "123"}, + "projects/imre-test/global/instanceTemplates/example-template-custom": {"projects/imre-test/global/instanceTemplates/example-template-custom", ""}, + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom": {"https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom", ""}, + "example-template-custom?uniqueId=123": {"example-template-custom", "123"}, + + // this test demonstrates that uniqueIds can't override eachother + "projects/imre-test/global/instanceTemplates/example?uniqueId=123?uniqueId=456": {"projects/imre-test/global/instanceTemplates/example", "123?uniqueId=456"}, + } + + for k, v := range expectations { + aName, aUniqueId := parseUniqueId(k) + if v[0] != aName { + t.Errorf("parseUniqueId failed; name of %v should be %v, not %v", k, v[0], aName) + } + if v[1] != aUniqueId { + t.Errorf("parseUniqueId failed; uniqueId of %v should be %v, not %v", k, v[1], aUniqueId) + } + } +} + +func TestInstanceGroupManager_compareInstanceTemplate(t *testing.T) { + shouldAllMatch := []string{ + // uniqueId not present + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom", + "projects/imre-test/global/instanceTemplates/example-template-custom", + // uniqueId present + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123", + "projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123", + } + shouldNotMatch := map[string]string{ + // mismatching name + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom": "projects/imre-test/global/instanceTemplates/example-template-custom2", + "projects/imre-test/global/instanceTemplates/example-template-custom": "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom2", + // matching name, but mismatching uniqueId + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123": "projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=1234", + "projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123": "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=1234", + } + for _, v1 := range shouldAllMatch { + for _, v2 := range shouldAllMatch { + if !compareSelfLinkRelativePathsIgnoreParams("", v1, v2, nil) { + t.Fatalf("compareSelfLinkRelativePathsIgnoreParams did not match (and should have) %v and %v", v1, v2) + } + } + } + + for v1, v2 := range shouldNotMatch { + if compareSelfLinkRelativePathsIgnoreParams("", v1, v2, nil) { + t.Fatalf("compareSelfLinkRelativePathsIgnoreParams did match (and shouldn't) %v and %v", v1, v2) + } + } +} + +func TestInstanceGroupManager_convertUniqueId(t *testing.T) { + matches := map[string]string{ + // uniqueId not present (should return the same) + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom": "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom", + "projects/imre-test/global/instanceTemplates/example-template-custom": "projects/imre-test/global/instanceTemplates/example-template-custom", + // uniqueId present (should return the last component replaced) + "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123": "https://www.googleapis.com/compute/v1/projects/imre-test/global/instanceTemplates/123", + "projects/imre-test/global/instanceTemplates/example-template-custom?uniqueId=123": "projects/imre-test/global/instanceTemplates/123", + "tf-test-igm-8amncgtq22?uniqueId=8361222501423044003": "8361222501423044003", + } + for input, expected := range matches { + actual := ConvertToUniqueIdWhenPresent(input) + if actual != expected { + t.Fatalf("invalid return value by ConvertToUniqueIdWhenPresent for input %v; expected: %v, actual: %v", input, expected, actual) + } + } +} + +func TestFlattenStatefulPolicyStatefulIps(t *testing.T) { + cases := map[string]struct { + ConfigValues []interface{} + Ips map[string]compute.StatefulPolicyPreservedStateNetworkIp + Expected []map[string]interface{} + }{ + "No IPs in config nor API data": { + ConfigValues: []interface{}{}, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{}, + Expected: []map[string]interface{}{}, + }, + "Single IP (nic0) in config and API data": { + ConfigValues: []interface{}{ + map[string]interface{}{ + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{ + "nic0": { + AutoDelete: "NEVER", + }, + }, + Expected: []map[string]interface{}{ + { + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + }, + "Two IPs (nic0, nic1). Unordered in config and sorted in API data": { + ConfigValues: []interface{}{ + map[string]interface{}{ + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + map[string]interface{}{ + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{ + "nic0": { + AutoDelete: "NEVER", + }, + "nic1": { + AutoDelete: "NEVER", + }, + }, + Expected: []map[string]interface{}{ + { + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + }, + "Two IPs (nic0, nic1). Only nic0 in config and both stored in API data": { + ConfigValues: []interface{}{ + map[string]interface{}{ + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{ + "nic0": { + AutoDelete: "NEVER", + }, + "nic1": { + AutoDelete: "NEVER", + }, + }, + Expected: []map[string]interface{}{ + { + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + }, + }, + "Five IPs (nic0 - nic4). None stored in config and all stored in API data": { + ConfigValues: []interface{}{}, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{ + // Out of order here to encourage randomness + "nic3": { + AutoDelete: "NEVER", + }, + "nic0": { + AutoDelete: "NEVER", + }, + "nic1": { + AutoDelete: "NEVER", + }, + "nic4": { + AutoDelete: "NEVER", + }, + "nic2": { + AutoDelete: "NEVER", + }, + }, + Expected: []map[string]interface{}{ + { + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic2", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic3", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic4", + "delete_rule": "NEVER", + }, + }, + }, + "Three IPs (nic0, nic1, nic2). Only nic1, nic2 in config and all 3 stored in API data": { + ConfigValues: []interface{}{ + map[string]interface{}{ + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + map[string]interface{}{ + "interface_name": "nic2", + "delete_rule": "NEVER", + }, + }, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{ + "nic0": { + AutoDelete: "NEVER", + }, + "nic1": { + AutoDelete: "NEVER", + }, + "nic2": { + AutoDelete: "NEVER", + }, + }, + Expected: []map[string]interface{}{ + { + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic2", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + }, + "Three IPs (nic0, nic1, nic2). Only nic0, nic2 in config and only nic1, nic2 stored in API data": { + ConfigValues: []interface{}{ + map[string]interface{}{ + "interface_name": "nic2", + "delete_rule": "NEVER", + }, + map[string]interface{}{ + "interface_name": "nic0", + "delete_rule": "NEVER", + }, + }, + Ips: map[string]compute.StatefulPolicyPreservedStateNetworkIp{ + "nic1": { + AutoDelete: "NEVER", + }, + "nic2": { + AutoDelete: "NEVER", + }, + }, + Expected: []map[string]interface{}{ + { + "interface_name": "nic2", + "delete_rule": "NEVER", + }, + { + "interface_name": "nic1", + "delete_rule": "NEVER", + }, + }, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Terraform config + schema := ResourceComputeRegionInstanceGroupManager().Schema + config := map[string]interface{}{ + "stateful_external_ip": tc.ConfigValues, + "stateful_internal_ip": tc.ConfigValues, + } + d := tpgresource.SetupTestResourceDataFromConfigMap(t, schema, config) + + // API response + statefulPolicyPreservedState := compute.StatefulPolicyPreservedState{ + ExternalIPs: tc.Ips, + InternalIPs: tc.Ips, + } + statefulPolicy := compute.StatefulPolicy{ + PreservedState: &statefulPolicyPreservedState, + } + + outputExternal := flattenStatefulPolicyStatefulExternalIps(d, &statefulPolicy) + if !reflect.DeepEqual(tc.Expected, outputExternal) { + t.Fatalf("expected external IPs output to be %#v, but got %#v", tc.Expected, outputExternal) + } + + outputInternal := flattenStatefulPolicyStatefulInternalIps(d, &statefulPolicy) + if !reflect.DeepEqual(tc.Expected, outputInternal) { + t.Fatalf("expected internal IPs output to be %#v, but got %#v", tc.Expected, outputInternal) + } + }) + } +} + diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_test.go.tmpl new file mode 100644 index 000000000000..d6e1632ae483 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_manager_test.go.tmpl @@ -0,0 +1,2034 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + {{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-provider-google/google/envvar" + {{- end }} +) + +func TestAccInstanceGroupManager_basic(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm1 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm2 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_basic(template, target, igm1, igm2), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + ResourceName: "google_compute_instance_group_manager.igm-no-tp", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_self_link_unique(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm1 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm2 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_self_link_unique(template, target, igm1, igm2), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + ResourceName: "google_compute_instance_group_manager.igm-no-tp", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_targetSizeZero(t *testing.T) { + t.Parallel() + + templateName := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igmName := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_targetSizeZero(templateName, igmName), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_update(t *testing.T) { + t.Parallel() + + template1 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target1 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target2 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + template2 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + description := "Manager 1" + description2 := "Manager 2" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_update(template1, target1, description, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "DO_NOTHING"), + ), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_update2(template1, target1, target2, template2, description, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_update3(template1, target1, target2, template2, description2, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_updateLifecycle(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + tag1 := "tag1" + tag2 := "tag2" + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_updateLifecycle(tag1, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_updateLifecycle(tag2, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_updatePolicy(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_rollingUpdatePolicy(igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_rollingUpdatePolicy2(igm), + }, + + { + ResourceName: "google_compute_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_rollingUpdatePolicy3(igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_rollingUpdatePolicy4(igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_rollingUpdatePolicy5(igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_separateRegions(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + igm1 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm2 := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_separateRegions(igm1, igm2), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic-2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_versions(t *testing.T) { + t.Parallel() + + primaryTemplate := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + canaryTemplate := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_versions(primaryTemplate, canaryTemplate, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_autoHealingPolicies(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + hck := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_autoHealingPolicies(template, target, igm, hck), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_autoHealingPoliciesRemoved(template, target, igm, hck), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccInstanceGroupManager_stateful(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + hck := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_stateful(network, template, target, igm, hck), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_statefulUpdated(network, template, target, igm, hck), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_statefulRemoved(network, template, target, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccInstanceGroupManager_stoppedSuspendedTargetSize(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_stoppedSuspendedTargetSize(template, network, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(template, network, igm), + }, + { + ResourceName: "google_compute_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} +{{- end }} + +func TestAccInstanceGroupManager_waitForStatus(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + perInstanceConfig := fmt.Sprintf("tf-test-config-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_waitForStatus(template, target, igm, perInstanceConfig), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status", "wait_for_instances_status", "wait_for_instances"}, + }, + { + Config: testAccInstanceGroupManager_waitForStatusUpdated(template, target, igm, perInstanceConfig), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status", "wait_for_instances_status", "wait_for_instances"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccInstanceGroupManager_resourceManagerTags(t *testing.T) { + t.Parallel() + + tag_name := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + template_name := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm_name := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + project_id := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccInstanceGroupManager_resourceManagerTags(template_name, tag_name, igm_name, project_id), + }, + { + ResourceName: "google_compute_instance_group_manager.igm-tags", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status", "params"}, + }, + }, + }) +} +{{- end }} + +func testAccCheckInstanceGroupManagerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_group_manager" { + continue + } + _, err := config.NewComputeClient(config.UserAgent).InstanceGroupManagers.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("InstanceGroupManager still exists") + } + } + + return nil + } +} + +func testAccInstanceGroupManager_basic(template, target, igm1, igm2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 + list_managed_instances_results = "PAGINATED" +} + +resource "google_compute_instance_group_manager" "igm-no-tp" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-no-tp" + zone = "us-central1-c" + target_size = 2 +} +`, template, target, igm1, igm2) +} + +func testAccInstanceGroupManager_self_link_unique(template, target, igm1, igm2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link_unique + } + + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 + list_managed_instances_results = "PAGINATED" +} + +resource "google_compute_instance_group_manager" "igm-no-tp" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-no-tp" + zone = "us-central1-c" + target_size = 2 +} +`, template, target, igm1, igm2) +} + +func testAccInstanceGroupManager_targetSizeZero(template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" +} +`, template, igm) +} + +func testAccInstanceGroupManager_update(template, target, description, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-update" { + description = "%s" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-update.self_link + } + + target_pools = [google_compute_target_pool.igm-update.self_link] + base_instance_name = "tf-test-igm-update" + zone = "us-central1-c" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } + all_instances_config { + metadata = { + foo = "bar" + } + labels = { + doo = "dad" + } + } + + instance_lifecycle_policy { + force_update_on_repair = "YES" + default_action_on_failure = "DO_NOTHING" + } +} +`, template, target, description, igm) +} + +// Change IGM's instance template and target size +func testAccInstanceGroupManager_update2(template1, target1, target2, template2, description, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_target_pool" "igm-update2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_template" "igm-update2" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm-update" { + description = "%s" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-update2.self_link + } + + target_pools = [ + google_compute_target_pool.igm-update.self_link, + google_compute_target_pool.igm-update2.self_link, + ] + base_instance_name = "tf-test-igm-update" + zone = "us-central1-c" + target_size = 3 + list_managed_instances_results = "PAGINATED" + named_port { + name = "customhttp" + port = 8080 + } + named_port { + name = "customhttps" + port = 8443 + } + + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } + + instance_lifecycle_policy { + force_update_on_repair = "NO" + default_action_on_failure = "REPAIR" + } +} +`, template1, target1, target2, template2, description, igm) +} + +// Remove target pools +func testAccInstanceGroupManager_update3(template1, target1, target2, template2, description2, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_target_pool" "igm-update2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_template" "igm-update2" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm-update" { + description = "%s" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-update2.self_link + } + + base_instance_name = "tf-test-igm-update" + zone = "us-central1-c" + target_size = 3 + list_managed_instances_results = "PAGINATED" + named_port { + name = "customhttp" + port = 8080 + } + named_port { + name = "customhttps" + port = 8443 + } +} +`, template1, target1, target2, template2, description2, igm) +} + +func testAccInstanceGroupManager_updateLifecycle(tag, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["%s"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-update.self_link + } + + base_instance_name = "tf-test-igm-update" + zone = "us-central1-c" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } +} +`, tag, igm) +} + +func testAccInstanceGroupManager_rollingUpdatePolicy(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "prod" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update-policy" + zone = "us-central1-c" + target_size = 3 + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_percent = 50 + max_unavailable_percent = 50 + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccInstanceGroupManager_rollingUpdatePolicy2(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "prod2" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update-policy" + zone = "us-central1-c" + target_size = 3 + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + most_disruptive_allowed_action = "REPLACE" + max_surge_fixed = 2 + max_unavailable_fixed = 2 + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccInstanceGroupManager_rollingUpdatePolicy3(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "prod2" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update-policy" + zone = "us-central1-c" + target_size = 3 + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 2 +{{- if ne $.TargetVersionName "ga" }} + min_ready_sec = 10 +{{- end }} + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccInstanceGroupManager_rollingUpdatePolicy4(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "prod2" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update-policy" + zone = "us-central1-c" + target_size = 3 + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_fixed = 2 + max_unavailable_fixed = 0 + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccInstanceGroupManager_rollingUpdatePolicy5(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "prod2" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update-policy" + zone = "us-central1-c" + target_size = 3 + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 2 + replacement_method = "RECREATE" + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccInstanceGroupManager_separateRegions(igm1, igm2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 +} + +resource "google_compute_instance_group_manager" "igm-basic-2" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-basic-2" + zone = "us-west1-b" + target_size = 2 +} +`, igm1, igm2) +} + +func testAccInstanceGroupManager_autoHealingPolicies(template, target, igm, hck string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 + auto_healing_policies { + health_check = google_compute_http_health_check.zero.self_link + initial_delay_sec = "10" + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, template, target, igm, hck) +} + +func testAccInstanceGroupManager_autoHealingPoliciesRemoved(template, target, igm, hck string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, template, target, igm, hck) +} + +func testAccInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-primary" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_template" "igm-canary" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 + + version { + name = "primary" + instance_template = google_compute_instance_template.igm-primary.self_link + } + + version { + name = "canary" + instance_template = google_compute_instance_template.igm-canary.self_link + target_size { + fixed = 1 + } + } +} +`, primaryTemplate, canaryTemplate, igm) +} + +func testAccInstanceGroupManager_stateful(network, template, target, igm, hck string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "igm-basic" { + name = "%s" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "non-stateful" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "my-stateful-disk2" + } + + network_interface { + network = "default" + } + + network_interface { + network = google_compute_network.igm-basic.self_link + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 + stateful_disk { + device_name = "my-stateful-disk" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_internal_ip { + interface_name = "nic0" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_external_ip { + interface_name = "nic0" + delete_rule = "NEVER" + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, network, template, target, igm, hck) +} + +func testAccInstanceGroupManager_statefulUpdated(network, template, target, igm, hck string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "igm-basic" { + name = "%s" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "non-stateful" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "my-stateful-disk2" + } + + network_interface { + network = "default" + } + + network_interface { + network = google_compute_network.igm-basic.self_link + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 + stateful_disk { + device_name = "my-stateful-disk" + delete_rule = "NEVER" + } + stateful_disk { + device_name = "my-stateful-disk2" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_internal_ip { + interface_name = "nic0" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_external_ip { + interface_name = "nic0" + delete_rule = "NEVER" + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, network, template, target, igm, hck) +} + +func testAccInstanceGroupManager_statefulRemoved(network, template, target, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "igm-basic" { + name = "%s" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "non-stateful" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "my-stateful-disk2" + } + + network_interface { + network = "default" + } + + network_interface { + network = google_compute_network.igm-basic.self_link + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + target_size = 2 +} +`, network, template, target, igm) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccInstanceGroupManager_stoppedSuspendedTargetSize(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + zone = "us-central1-c" + target_size = 2 + standby_policy { + initial_delay_sec = 20 + mode = "MANUAL" + } + target_suspended_size = 2 + target_stopped_size = 1 +} +`, network, template, igm) +} + +func testAccInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + zone = "us-central1-c" + target_size = 2 + standby_policy { + mode = "SCALE_OUT_POOL" + } + target_suspended_size = 1 +} +`, network, template, igm) +} +{{- end }} + +func testAccInstanceGroupManager_waitForStatus(template, target, igm, perInstanceConfig string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + wait_for_instances = true + wait_for_instances_status = "STABLE" +} + +resource "google_compute_per_instance_config" "per-instance" { + instance_group_manager = google_compute_instance_group_manager.igm-basic.name + zone = "us-central1-c" + name = "%s" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + foo = "bar" + } + } +} +`, template, target, igm, perInstanceConfig) +} + +func testAccInstanceGroupManager_waitForStatusUpdated(template, target, igm, perInstanceConfig string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "prod2" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + zone = "us-central1-c" + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + replacement_method = "RECREATE" + max_surge_fixed = 0 + max_unavailable_percent = 50 + } + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } + instance_lifecycle_policy { + force_update_on_repair = "YES" + default_action_on_failure = "REPAIR" + } + wait_for_instances = true + wait_for_instances_status = "UPDATED" +} + +resource "google_compute_per_instance_config" "per-instance" { + instance_group_manager = google_compute_instance_group_manager.igm-basic.name + zone = "us-central1-c" + name = "%s" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + foo = "baz" + } + } +} +`, template, target, igm, perInstanceConfig) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccInstanceGroupManager_resourceManagerTags(template_name, tag_name, igm_name, project_id string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-tags" { + name = "%s" + description = "Terraform test instance template." + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + } +} + +resource "google_tags_tag_key" "igm-key" { + description = "Terraform test tag key." + parent = "projects/%s" + short_name = "%s" +} + +resource "google_tags_tag_value" "igm-value" { + description = "Terraform test tag value." + parent = "tagKeys/${google_tags_tag_key.igm-key.name}" + short_name = "%s" +} + +resource "google_compute_instance_group_manager" "igm-tags" { + description = "Terraform test instance group manager." + name = "%s" + base_instance_name = "tf-igm-tags-test" + zone = "us-central1-a" + target_size = 0 + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-tags.self_link + } + + params { + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.igm-key.name}" = "tagValues/${google_tags_tag_value.igm-value.name}" + } + } +} +`, template_name, project_id, tag_name, tag_name, igm_name) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_membership_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_membership_test.go.tmpl new file mode 100644 index 000000000000..a29af0152a5e --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_membership_test.go.tmpl @@ -0,0 +1,232 @@ +package compute_test +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeInstanceGroupMembership_instanceGroupMembershipBasic(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + suffix := acctest.RandString(t, 10) + context := map[string]interface{}{ + "random_suffix": suffix, + "zone": envvar.GetTestZoneFromEnv(), + } + + igId := fmt.Sprintf("projects/%s/zones/%s/instanceGroups/instance-group-%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one membership resource + Config: testAccComputeInstanceGroupMembership_instanceGroupMembershipBasic(context), + }, + { + ResourceName: "google_compute_instance_group_membership.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Add two new members + Config: testAccComputeInstanceGroupMembership_instanceGroupMembershipAdditional(context), + }, + { + ResourceName: "google_compute_instance_group_membership.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone", "instance_group"}, + }, + { + ResourceName: "google_compute_instance_group_membership.add1", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone", "instance_group"}, + }, + { + ResourceName: "google_compute_instance_group_membership.add2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone", "instance_group"}, + }, + { + // Remove add1 and add2 membership resources + Config: testAccComputeInstanceGroupMembership_instanceGroupMembershipBasic(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceGroupMembershipDestroyed( + t, igId, + testAccComputeInstanceGroupMembershipGetInstanceName("add1-instance", suffix), + testAccComputeInstanceGroupMembershipGetInstanceName("add2-instance", suffix), + ), + ), + }, + { + ResourceName: "google_compute_instance_group_membership.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone", "instance_group"}, + }, + { + // Delete all membership resources + Config: testAccComputeInstanceGroupMembership_noInstanceGroupMembership(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceGroupMembershipDestroyed( + t, igId, + testAccComputeInstanceGroupMembershipGetInstanceName("default-instance", suffix)), + ), + }, + }, + }) +} + +func testAccComputeInstanceGroupMembership_instanceGroupMembershipBasic(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_instance_group_membership" "default" { + zone = "%{zone}" + instance_group = google_compute_instance_group.default.name + instance = google_compute_instance.default.self_link + } + `, context) + testAccComputeInstanceGroupMembership_noInstanceGroupMembership(context) +} + +func testAccComputeInstanceGroupMembership_instanceGroupMembershipAdditional(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_instance_group_membership" "add1" { + instance_group = google_compute_instance_group.default.name + instance = google_compute_instance.add1.self_link + } + + resource "google_compute_instance_group_membership" "add2" { + instance_group = google_compute_instance_group.default.name + instance = google_compute_instance.add2.self_link + } + `, context) + testAccComputeInstanceGroupMembership_instanceGroupMembershipBasic(context) +} + +func testAccComputeInstanceGroupMembership_noInstanceGroupMembership(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_network" "default-network" { + name = "default-%{random_suffix}" + } + + resource "google_compute_instance" "default" { + name = "default-instance-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network = google_compute_network.default-network.name + } + } + + resource "google_compute_instance" "add1" { + name = "add1-instance-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network = google_compute_network.default-network.name + } + } + + resource "google_compute_instance" "add2" { + name = "add2-instance-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network = google_compute_network.default-network.name + } + } + + resource "google_compute_instance_group" "default" { + name = "instance-group-%{random_suffix}" + } + `, context) +} + +func testAccCheckComputeInstanceGroupMembershipDestroyed(t *testing.T, instanceGroupId string, instances ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundInstances, err := testAccComputeInstanceGroupMembershipListMembership(t, instanceGroupId) + if err != nil { + return fmt.Errorf("unable to confirm instance group members with instances %+v was destroyed: %v", instances, err) + } + for _, p := range instances { + if _, ok := foundInstances[p]; ok { + return fmt.Errorf("instance group with instance %s still exists", p) + } + } + return nil + } +} + +func testAccComputeInstanceGroupMembershipListMembership(t *testing.T, instanceGroupId string) (map[string]struct{}, error) { + config := acctest.GoogleProviderConfig(t) + + {{ if eq $.TargetVersionName `ga` }} + url := fmt.Sprintf("https://www.googleapis.com/compute/v1/%s/listInstances", instanceGroupId) + {{- else }} + url := fmt.Sprintf("https://www.googleapis.com/compute/beta/%s/listInstances", instanceGroupId) + {{- end }} + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + + if err != nil { + return nil, err + } + + v, ok := res["items"] + if !ok || v == nil { + return nil, nil + } + + items := v.([]interface{}) + instances := make(map[string]struct{}) + for _, item := range items { + instanceWithStatus := item.(map[string]interface{}) + v, ok := instanceWithStatus["instance"] + if !ok || v == nil { + continue + } + instance := v.(string) + instances[fmt.Sprintf("%v", instance)] = struct{}{} + } + return instances, nil +} + +func testAccComputeInstanceGroupMembershipGetInstanceName(instanceName string, suffix string) (string) { + return fmt.Sprintf("projects/%s/zones/%s/instances/%s-%s", + envvar.GetTestProjectFromEnv(), + envvar.GetTestZoneFromEnv(), + instanceName, + suffix) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_test.go.tmpl new file mode 100644 index 000000000000..d71408949dbe --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_group_test.go.tmpl @@ -0,0 +1,632 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeInstanceGroup_basic(t *testing.T) { + t.Parallel() + + var instanceGroup compute.InstanceGroup + var resourceName = "google_compute_instance_group.basic" + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var zone = "us-central1-c" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComputeInstanceGroup_destroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_basic(zone, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.basic", &instanceGroup), + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.empty", &instanceGroup), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateId: fmt.Sprintf("%s/%s/%s", envvar.GetTestProjectFromEnv(), zone, instanceName), + }, + }, + }) +} + +func TestAccComputeInstanceGroup_rename(t *testing.T) { + t.Parallel() + + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var instanceGroupName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var backendName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var healthName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComputeInstanceGroup_destroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_rename(instanceName, instanceGroupName, backendName, healthName), + }, + { + ResourceName: "google_compute_instance_group.basic", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstanceGroup_rename(instanceName, instanceGroupName+"2", backendName, healthName), + }, + { + ResourceName: "google_compute_instance_group.basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceGroup_update(t *testing.T) { + t.Parallel() + + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComputeInstanceGroup_destroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_update(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_named_ports( + t, + "google_compute_instance_group.update", + map[string]int64{"http": 8080, "https": 8443}, + &instanceGroup), + ), + }, + { + Config: testAccComputeInstanceGroup_update2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.update", &instanceGroup), + testAccComputeInstanceGroup_updated( + t, "google_compute_instance_group.update", 1, &instanceGroup), + testAccComputeInstanceGroup_named_ports( + t, + "google_compute_instance_group.update", + map[string]int64{"http": 8081, "test": 8444}, + &instanceGroup), + ), + }, + }, + }) +} + +func TestAccComputeInstanceGroup_outOfOrderInstances(t *testing.T) { + t.Parallel() + + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComputeInstanceGroup_destroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_outOfOrderInstances(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.group", &instanceGroup), + ), + }, + }, + }) +} + +func TestAccComputeInstanceGroup_network(t *testing.T) { + t.Parallel() + + var instanceGroup compute.InstanceGroup + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccComputeInstanceGroup_destroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceGroup_network(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.with_instance", &instanceGroup), + testAccComputeInstanceGroup_hasCorrectNetwork( + t, "google_compute_instance_group.with_instance", "google_compute_network.ig_network", &instanceGroup), + testAccComputeInstanceGroup_exists( + t, "google_compute_instance_group.without_instance", &instanceGroup), + testAccComputeInstanceGroup_hasCorrectNetwork( + t, "google_compute_instance_group.without_instance", "google_compute_network.ig_network", &instanceGroup), + ), + }, + }, + }) +} + +func testAccComputeInstanceGroup_destroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_group" { + continue + } + _, err := config.NewComputeClient(config.UserAgent).InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("InstanceGroup still exists") + } + } + + return nil + } +} + +func testAccComputeInstanceGroup_exists(t *testing.T, n string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + *instanceGroup = *found + + return nil + } +} + +func testAccComputeInstanceGroup_updated(t *testing.T, n string, size int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + instanceGroup, err := config.NewComputeClient(config.UserAgent).InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + // Cannot check the target pool as the instance creation is asynchronous. However, can + // check the target_size. + if instanceGroup.Size != size { + return fmt.Errorf("instance count incorrect. saw real value %v instead of expected value %v", instanceGroup.Size, size) + } + + return nil + } +} + +func testAccComputeInstanceGroup_named_ports(t *testing.T, n string, np map[string]int64, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + instanceGroup, err := config.NewComputeClient(config.UserAgent).InstanceGroups.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + var found bool + for _, namedPort := range instanceGroup.NamedPorts { + found = false + for name, port := range np { + if namedPort.Name == name && namedPort.Port == port { + found = true + } + } + if !found { + return fmt.Errorf("named port incorrect") + } + } + + return nil + } +} + +func testAccComputeInstanceGroup_hasCorrectNetwork(t *testing.T, nInstanceGroup string, nNetwork string, instanceGroup *compute.InstanceGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + rsInstanceGroup, ok := s.RootModule().Resources[nInstanceGroup] + if !ok { + return fmt.Errorf("Not found: %s", nInstanceGroup) + } + if rsInstanceGroup.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + instanceGroup, err := config.NewComputeClient(config.UserAgent).InstanceGroups.Get( + config.Project, rsInstanceGroup.Primary.Attributes["zone"], rsInstanceGroup.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + rsNetwork, ok := s.RootModule().Resources[nNetwork] + if !ok { + return fmt.Errorf("Not found: %s", nNetwork) + } + if rsNetwork.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + network, err := config.NewComputeClient(config.UserAgent).Networks.Get( + config.Project, rsNetwork.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if instanceGroup.Network != network.SelfLink { + return fmt.Errorf("network incorrect: actual=%s vs expected=%s", instanceGroup.Network, network.SelfLink) + } + + return nil + } +} + +func testAccComputeInstanceGroup_basic(zone, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "ig_instance" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group" "basic" { + description = "Terraform test instance group" + name = "%s" + zone = "%s" + instances = [google_compute_instance.ig_instance.id] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } +} + +resource "google_compute_instance_group" "empty" { + description = "Terraform test instance group empty" + name = "%s-empty" + zone = "%s" + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } +} +`, instance, instance, zone, instance, zone) +} + +func testAccComputeInstanceGroup_rename(instance, instanceGroup, backend, health string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "ig_instance" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group" "basic" { + name = "%s" + zone = "us-central1-c" + instances = [google_compute_instance.ig_instance.self_link] + named_port { + name = "http" + port = "8080" + } + + named_port { + name = "https" + port = "8443" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_backend_service" "default_backend" { + name = "%s" + port_name = "https" + protocol = "HTTPS" + + backend { + group = google_compute_instance_group.basic.self_link + } + + health_checks = [ + google_compute_https_health_check.healthcheck.self_link, + ] +} + +resource "google_compute_https_health_check" "healthcheck" { + name = "%s" + request_path = "/health_check" +} +`, instance, instanceGroup, backend, health) +} + +func testAccComputeInstanceGroup_update(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + count = 2 + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = google_compute_instance.ig_instance.*.self_link + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } +} +`, instance, instance) +} + +// Change IGM's instance template and target size +func testAccComputeInstanceGroup_update2(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "ig_instance" { + name = "%s-${count.index}" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + count = 1 + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group" "update" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = google_compute_instance.ig_instance.*.self_link + + named_port { + name = "http" + port = "8081" + } + named_port { + name = "test" + port = "8444" + } +} +`, instance, instance) +} + +func testAccComputeInstanceGroup_outOfOrderInstances(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "ig_instance" { + name = "%s-1" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance" "ig_instance_2" { + name = "%s-2" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance_group" "group" { + description = "Terraform test instance group" + name = "%s" + zone = "us-central1-c" + instances = [google_compute_instance.ig_instance_2.self_link, google_compute_instance.ig_instance.self_link] + named_port { + name = "http" + port = "8080" + } + named_port { + name = "https" + port = "8443" + } +} +`, instance, instance, instance) +} + +func testAccComputeInstanceGroup_network(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "ig_network" { + name = "%[1]s" + auto_create_subnetworks = true +} + +resource "google_compute_instance" "ig_instance" { + name = "%[1]s" + machine_type = "e2-medium" + can_ip_forward = false + zone = "us-central1-c" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.ig_network.name + } +} + +resource "google_compute_instance_group" "with_instance" { + description = "Terraform test instance group" + name = "%[1]s-with-instance" + zone = "us-central1-c" + instances = [google_compute_instance.ig_instance.self_link] +} + +resource "google_compute_instance_group" "without_instance" { + description = "Terraform test instance group" + name = "%[1]s-without-instance" + zone = "us-central1-c" + network = google_compute_network.ig_network.self_link +} +`, instance) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate.go.tmpl new file mode 100644 index 000000000000..701298e7c4df --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate.go.tmpl @@ -0,0 +1,529 @@ +package compute + +import ( + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeInstanceMigrateState( + v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if is.Empty() { + log.Println("[DEBUG] Empty InstanceState; nothing to migrate.") + return is, nil + } + + var err error + + switch v { + case 0: + log.Println("[INFO] Found Compute Instance State v0; migrating to v1") + is, err = migrateStateV0toV1(is) + if err != nil { + return is, err + } + fallthrough + case 1: + log.Println("[INFO] Found Compute Instance State v1; migrating to v2") + is, err = migrateStateV1toV2(is) + if err != nil { + return is, err + } + fallthrough + case 2: + log.Println("[INFO] Found Compute Instance State v2; migrating to v3") + is, err = migrateStateV2toV3(is) + if err != nil { + return is, err + } + fallthrough + case 3: + log.Println("[INFO] Found Compute Instance State v3; migrating to v4") + is, err = migrateStateV3toV4(is, meta) + if err != nil { + return is, err + } + fallthrough + case 4: + log.Println("[INFO] Found Compute Instance State v4; migrating to v5") + is, err = migrateStateV4toV5(is, meta) + if err != nil { + return is, err + } + fallthrough + case 5: + log.Println("[INFO] Found Compute Instance State v5; migrating to v6") + is, err = migrateStateV5toV6(is) + if err != nil { + return is, err + } + // when adding case 6, make sure to turn this into a fallthrough + return is, err + default: + return is, fmt.Errorf("Unexpected schema version: %d", v) + } +} + +func migrateStateV0toV1(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Delete old count + delete(is.Attributes, "metadata.#") + + newMetadata := make(map[string]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "metadata.") { + continue + } + + // We have a key that looks like "metadata.*" and we know it's not + // metadata.# because we deleted it above, so it must be metadata.. + // from the List of Maps. Just need to convert it to a single Map by + // ditching the '' field. + kParts := strings.SplitN(k, ".", 3) + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 3 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found metadata key in unexpected format: %s", k) + } + + // Rejoin as "metadata." + newK := strings.Join([]string{kParts[0], kParts[2]}, ".") + newMetadata[newK] = v + delete(is.Attributes, k) + } + + for k, v := range newMetadata { + is.Attributes[k] = v + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV1toV2(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Maps service account index to list of scopes for that account + newScopesMap := make(map[string][]string) + + for k, v := range is.Attributes { + if !strings.HasPrefix(k, "service_account.") { + continue + } + + if k == "service_account.#" { + continue + } + + if strings.HasSuffix(k, ".scopes.#") { + continue + } + + if strings.HasSuffix(k, ".email") { + continue + } + + // Key is now of the form service_account.%d.scopes.%d + kParts := strings.Split(k, ".") + + // Sanity check: all three parts should be there and should be a number + badFormat := false + if len(kParts) != 4 { + badFormat = true + } else if _, err := strconv.Atoi(kParts[1]); err != nil { + badFormat = true + } + + if badFormat { + return is, fmt.Errorf( + "migration error: found scope key in unexpected format: %s", k) + } + + newScopesMap[kParts[1]] = append(newScopesMap[kParts[1]], v) + + delete(is.Attributes, k) + } + + for service_acct_index, newScopes := range newScopesMap { + for _, newScope := range newScopes { + hash := tpgresource.Hashcode(tpgresource.CanonicalizeServiceScope(newScope)) + newKey := fmt.Sprintf("service_account.%s.scopes.%d", service_acct_index, hash) + is.Attributes[newKey] = newScope + } + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV2toV3(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + is.Attributes["create_timeout"] = "4" + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV3toV4(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + + // Read instance from GCP. Since disks are not necessarily returned from the API in the order they were set, + // we have no other way to know which source belongs to which attached disk. + // Also note that the following code modifies the returned instance- if you need immutability, please change + // this to make a copy of the needed data. + config := meta.(*transport_tpg.Config) + instance, err := getInstanceFromInstanceState(config, is) + if err != nil { + return is, fmt.Errorf("migration error: %s", err) + } + diskList, err := getAllDisksFromInstanceState(config, is) + if err != nil { + return is, fmt.Errorf("migration error: %s", err) + } + allDisks := make(map[string]*compute.Disk) + for _, disk := range diskList { + allDisks[disk.Name] = disk + } + + hasBootDisk := is.Attributes["boot_disk.#"] == "1" + + scratchDisks := 0 + if v := is.Attributes["scratch_disk.#"]; v != "" { + scratchDisks, err = strconv.Atoi(v) + if err != nil { + return is, fmt.Errorf("migration error: found scratch_disk.# value in unexpected format: %s", err) + } + } + + attachedDisks := 0 + if v := is.Attributes["attached_disk.#"]; v != "" { + attachedDisks, err = strconv.Atoi(v) + if err != nil { + return is, fmt.Errorf("migration error: found attached_disk.# value in unexpected format: %s", err) + } + } + + disks := 0 + if v := is.Attributes["disk.#"]; v != "" { + disks, err = strconv.Atoi(is.Attributes["disk.#"]) + if err != nil { + return is, fmt.Errorf("migration error: found disk.# value in unexpected format: %s", err) + } + } + + for i := 0; i < disks; i++ { + if !hasBootDisk && i == 0 { + is.Attributes["boot_disk.#"] = "1" + + // Note: the GCP API does not allow for scratch disks to be boot disks, so this situation + // should never occur. + if is.Attributes["disk.0.scratch_disk"] == "true" { + return is, fmt.Errorf("migration error: found scratch disk at index 0") + } + + for _, disk := range instance.Disks { + if disk.Boot { + is.Attributes["boot_disk.0.source"] = tpgresource.GetResourceNameFromSelfLink(disk.Source) + is.Attributes["boot_disk.0.device_name"] = disk.DeviceName + break + } + } + is.Attributes["boot_disk.0.auto_delete"] = is.Attributes["disk.0.auto_delete"] + is.Attributes["boot_disk.0.disk_encryption_key_raw"] = is.Attributes["disk.0.disk_encryption_key_raw"] + is.Attributes["boot_disk.0.disk_encryption_key_sha256"] = is.Attributes["disk.0.disk_encryption_key_sha256"] + + if is.Attributes["disk.0.size"] != "" && is.Attributes["disk.0.size"] != "0" { + is.Attributes["boot_disk.0.initialize_params.#"] = "1" + is.Attributes["boot_disk.0.initialize_params.0.size"] = is.Attributes["disk.0.size"] + } + if is.Attributes["disk.0.type"] != "" { + is.Attributes["boot_disk.0.initialize_params.#"] = "1" + is.Attributes["boot_disk.0.initialize_params.0.type"] = is.Attributes["disk.0.type"] + } + if is.Attributes["disk.0.image"] != "" { + is.Attributes["boot_disk.0.initialize_params.#"] = "1" + is.Attributes["boot_disk.0.initialize_params.0.image"] = is.Attributes["disk.0.image"] + } + } else if is.Attributes[fmt.Sprintf("disk.%d.scratch", i)] == "true" { + // Note: the GCP API does not allow for scratch disks without auto_delete, so this situation + // should never occur. + if is.Attributes[fmt.Sprintf("disk.%d.auto_delete", i)] != "true" { + return is, fmt.Errorf("migration error: attempted to migrate scratch disk where auto_delete is not true") + } + + is.Attributes[fmt.Sprintf("scratch_disk.%d.interface", scratchDisks)] = "SCSI" + + scratchDisks++ + } else { + // If disk is neither boot nor scratch, then it is attached. + + disk, err := getDiskFromAttributes(config, instance, allDisks, is.Attributes, i) + if err != nil { + return is, fmt.Errorf("migration error: %s", err) + } + + is.Attributes[fmt.Sprintf("attached_disk.%d.source", attachedDisks)] = disk.Source + is.Attributes[fmt.Sprintf("attached_disk.%d.device_name", attachedDisks)] = disk.DeviceName + is.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_raw", attachedDisks)] = is.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)] + is.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_sha256", attachedDisks)] = is.Attributes[fmt.Sprintf("disk.%d.disk_encryption_key_sha256", i)] + + attachedDisks++ + } + } + + for k := range is.Attributes { + if !strings.HasPrefix(k, "disk.") { + continue + } + + delete(is.Attributes, k) + } + if scratchDisks > 0 { + is.Attributes["scratch_disk.#"] = strconv.Itoa(scratchDisks) + } + if attachedDisks > 0 { + is.Attributes["attached_disk.#"] = strconv.Itoa(attachedDisks) + } + + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} + +func migrateStateV4toV5(is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { + if v := is.Attributes["disk.#"]; v != "" { + return migrateStateV3toV4(is, meta) + } + return is, nil +} + +func getInstanceFromInstanceState(config *transport_tpg.Config, is *terraform.InstanceState) (*compute.Instance, error) { + project, ok := is.Attributes["project"] + if !ok { + if config.Project == "" { + return nil, fmt.Errorf("could not determine 'project'") + } else { + project = config.Project + } + } + + zone, ok := is.Attributes["zone"] + if !ok { + if config.Zone == "" { + return nil, fmt.Errorf("could not determine 'zone'") + } else { + zone = config.Zone + } + } + + instance, err := config.NewComputeClient(config.UserAgent).Instances.Get( + project, zone, is.ID).Do() + if err != nil { + return nil, fmt.Errorf("error reading instance: %s", err) + } + + return instance, nil +} + +func getAllDisksFromInstanceState(config *transport_tpg.Config, is *terraform.InstanceState) ([]*compute.Disk, error) { + project, ok := is.Attributes["project"] + if !ok { + if config.Project == "" { + return nil, fmt.Errorf("could not determine 'project'") + } else { + project = config.Project + } + } + + zone, ok := is.Attributes["zone"] + if !ok { + if config.Zone == "" { + return nil, fmt.Errorf("could not determine 'zone'") + } else { + zone = config.Zone + } + } + + diskList := []*compute.Disk{} + token := "" + for { + disks, err := config.NewComputeClient(config.UserAgent).Disks.List(project, zone).PageToken(token).Do() + if err != nil { + return nil, fmt.Errorf("error reading disks: %s", err) + } + diskList = append(diskList, disks.Items...) + token = disks.NextPageToken + if token == "" { + break + } + } + + return diskList, nil +} + +func getDiskFromAttributes(config *transport_tpg.Config, instance *compute.Instance, allDisks map[string]*compute.Disk, attributes map[string]string, i int) (*compute.AttachedDisk, error) { + if diskSource := attributes[fmt.Sprintf("disk.%d.disk", i)]; diskSource != "" { + return getDiskFromSource(instance, diskSource) + } + + if deviceName := attributes[fmt.Sprintf("disk.%d.device_name", i)]; deviceName != "" { + return getDiskFromDeviceName(instance, deviceName) + } + + if encryptionKey := attributes[fmt.Sprintf("disk.%d.disk_encryption_key_raw", i)]; encryptionKey != "" { + return getDiskFromEncryptionKey(instance, encryptionKey) + } + + autoDelete, err := strconv.ParseBool(attributes[fmt.Sprintf("disk.%d.auto_delete", i)]) + if err != nil { + return nil, fmt.Errorf("error parsing auto_delete attribute of disk %d", i) + } + image := attributes[fmt.Sprintf("disk.%d.image", i)] + + // We know project and zone are set because we used them to read the instance + project, ok := attributes["project"] + if !ok { + project = config.Project + } + zone := attributes["zone"] + return getDiskFromAutoDeleteAndImage(config, instance, allDisks, autoDelete, image, project, zone) +} + +func getDiskFromSource(instance *compute.Instance, source string) (*compute.AttachedDisk, error) { + for _, disk := range instance.Disks { + if disk.Boot || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + // we can just compare suffixes because terraform only allows setting "disk" by name and uses + // the zone of the instance so we know there can be no duplicate names. + if strings.HasSuffix(disk.Source, "/"+source) { + return disk, nil + } + } + return nil, fmt.Errorf("could not find attached disk with source %q", source) +} + +func getDiskFromDeviceName(instance *compute.Instance, deviceName string) (*compute.AttachedDisk, error) { + for _, disk := range instance.Disks { + if disk.Boot || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.DeviceName == deviceName { + return disk, nil + } + } + return nil, fmt.Errorf("could not find attached disk with deviceName %q", deviceName) +} + +func getDiskFromEncryptionKey(instance *compute.Instance, encryptionKey string) (*compute.AttachedDisk, error) { + encryptionSha, err := hash256(encryptionKey) + if err != nil { + return nil, err + } + for _, disk := range instance.Disks { + if disk.Boot || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.DiskEncryptionKey.Sha256 == encryptionSha { + return disk, nil + } + } + return nil, fmt.Errorf("could not find attached disk with encryption hash %q", encryptionSha) +} + +func getDiskFromAutoDeleteAndImage(config *transport_tpg.Config, instance *compute.Instance, allDisks map[string]*compute.Disk, autoDelete bool, image, project, zone string) (*compute.AttachedDisk, error) { + img, err := ResolveImage(config, project, image, config.UserAgent) + if err != nil { + return nil, err + } + imgParts := strings.Split(img, "/projects/") + canonicalImage := imgParts[len(imgParts)-1] + + for i, disk := range instance.Disks { + if disk.Boot || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.AutoDelete == autoDelete { + // Read the disk to check if its image matches + fullDisk := allDisks[tpgresource.GetResourceNameFromSelfLink(disk.Source)] + sourceImage, err := tpgresource.GetRelativePath(fullDisk.SourceImage) + if err != nil { + return nil, err + } + if canonicalImage == sourceImage { + // Delete this disk because there might be multiple that match + instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) + return disk, nil + } + } + } + + // We're not done! It's possible the disk was created with an image family rather than the image itself. + // Now, do the exact same iteration but do some prefix matching to check if the families match. + // This assumes that all disks with a given family have a sourceImage whose name starts with the name of + // the image family. + canonicalImage = strings.Replace(canonicalImage, "/family/", "/", -1) + for i, disk := range instance.Disks { + if disk.Boot || disk.Type == "SCRATCH" { + // Ignore boot/scratch disks since this is just for finding attached disks + continue + } + if disk.AutoDelete == autoDelete { + // Read the disk to check if its image matches + fullDisk := allDisks[tpgresource.GetResourceNameFromSelfLink(disk.Source)] + sourceImage, err := tpgresource.GetRelativePath(fullDisk.SourceImage) + if err != nil { + return nil, err + } + + if strings.Contains(sourceImage, "/"+canonicalImage+"-") { + // Delete this disk because there might be multiple that match + instance.Disks = append(instance.Disks[:i], instance.Disks[i+1:]...) + return disk, nil + } + } + } + + return nil, fmt.Errorf("could not find attached disk with image %q", image) +} + +func migrateStateV5toV6(is *terraform.InstanceState) (*terraform.InstanceState, error) { + log.Printf("[DEBUG] Attributes before migration: %#v", is.Attributes) + if is.Attributes["boot_disk.0.initialize_params.#"] == "1" { + if (is.Attributes["boot_disk.0.initialize_params.0.size"] == "0" || + is.Attributes["boot_disk.0.initialize_params.0.size"] == "") && + is.Attributes["boot_disk.0.initialize_params.0.type"] == "" && + is.Attributes["boot_disk.0.initialize_params.0.image"] == "" { + is.Attributes["boot_disk.0.initialize_params.#"] = "0" + delete(is.Attributes, "boot_disk.0.initialize_params.0.size") + delete(is.Attributes, "boot_disk.0.initialize_params.0.type") + delete(is.Attributes, "boot_disk.0.initialize_params.0.image") + } + } + log.Printf("[DEBUG] Attributes after migration: %#v", is.Attributes) + return is, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate_test.go.tmpl new file mode 100644 index 000000000000..24b4216c3610 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_migrate_test.go.tmpl @@ -0,0 +1,962 @@ +package compute_test + +import ( + "context" + "fmt" + "log" + "os" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} + + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccComputeInstanceMigrateState(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + cases := map[string]struct { + StateVersion int + Attributes map[string]string + Expected map[string]string + }{ + "v0.4.2 and earlier": { + StateVersion: 0, + Attributes: map[string]string{ + "disk.#": "0", + "metadata.#": "2", + "metadata.0.foo": "bar", + "metadata.1.baz": "qux", + "metadata.2.with.dots": "should.work", + }, + Expected: map[string]string{ + "create_timeout": "4", + "metadata.foo": "bar", + "metadata.baz": "qux", + "metadata.with.dots": "should.work", + }, + }, + "change scope from list to set": { + StateVersion: 1, + Attributes: map[string]string{ + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", + "service_account.0.scopes.0": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.1": "https://www.googleapis.com/auth/datastore", + "service_account.0.scopes.2": "https://www.googleapis.com/auth/devstorage.full_control", + "service_account.0.scopes.3": "https://www.googleapis.com/auth/logging.write", + }, + Expected: map[string]string{ + "create_timeout": "4", + "service_account.#": "1", + "service_account.0.email": "xxxxxx-compute@developer.gserviceaccount.com", + "service_account.0.scopes.#": "4", + "service_account.0.scopes.1693978638": "https://www.googleapis.com/auth/devstorage.full_control", + "service_account.0.scopes.172152165": "https://www.googleapis.com/auth/logging.write", + "service_account.0.scopes.299962681": "https://www.googleapis.com/auth/compute", + "service_account.0.scopes.3435931483": "https://www.googleapis.com/auth/datastore", + }, + }, + "add new create_timeout attribute": { + StateVersion: 2, + Attributes: map[string]string{}, + Expected: map[string]string{ + "create_timeout": "4", + }, + }, + "remove empty initialize_params": { + StateVersion: 5, + Attributes: map[string]string{ + "boot_disk.0.initialize_params.#": "1", + "boot_disk.0.initialize_params.0.size": "0", + }, + Expected: map[string]string{ + "boot_disk.0.initialize_params.#": "0", + }, + }, + } + + config := getInitializedConfig(t) + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + }, + MachineType: "zones/" + config.Zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, config.Zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, config.Zone) + + for tn, tc := range cases { + runInstanceMigrateTest(t, instanceName, tn, tc.StateVersion, tc.Attributes, tc.Expected, config) + } +} + +func TestAccComputeInstanceMigrateState_empty(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + var is *terraform.InstanceState + var meta interface{} + + // should handle nil + is, err := tpgcompute.ResourceComputeInstanceMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } + if is != nil { + t.Fatalf("expected nil instancestate, got: %#v", is) + } + + // should handle non-nil but empty + is = &terraform.InstanceState{} + _, err = tpgcompute.ResourceComputeInstanceMigrateState(0, is, meta) + + if err != nil { + t.Fatalf("err: %#v", err) + } +} + +func TestAccComputeInstanceMigrateState_bootDisk(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + // Seed test data + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "disk.#": "1", + "disk.0.disk": "disk-1", + "disk.0.type": "pd-ssd", + "disk.0.auto_delete": "true", + "disk.0.size": "12", + "disk.0.device_name": "persistent-disk-0", + "disk.0.disk_encryption_key_raw": "encrypt-key", + "disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "boot_disk.0.auto_delete": "true", + "boot_disk.0.device_name": "persistent-disk-0", + "boot_disk.0.disk_encryption_key_raw": "encrypt-key", + "boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "boot_disk.0.initialize_params.#": "1", + "boot_disk.0.initialize_params.0.size": "12", + "boot_disk.0.initialize_params.0.type": "pd-ssd", + "boot_disk.0.source": instanceName, + "zone": zone, + "create_timeout": "4", + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 2 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_v4FixBootDisk(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + // Seed test data + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "disk.#": "1", + "disk.0.disk": "disk-1", + "disk.0.type": "pd-ssd", + "disk.0.auto_delete": "true", + "disk.0.size": "12", + "disk.0.device_name": "persistent-disk-0", + "disk.0.disk_encryption_key_raw": "encrypt-key", + "disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "boot_disk.0.auto_delete": "true", + "boot_disk.0.device_name": "persistent-disk-0", + "boot_disk.0.disk_encryption_key_raw": "encrypt-key", + "boot_disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "boot_disk.0.initialize_params.#": "1", + "boot_disk.0.initialize_params.0.size": "12", + "boot_disk.0.initialize_params.0.type": "pd-ssd", + "boot_disk.0.source": instanceName, + "zone": zone, + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to boot disk", 4 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_attachedDiskFromSource(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + // Seed test data + diskName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + disk := &compute.Disk{ + Name: diskName, + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + Zone: zone, + } + op, err := config.NewComputeClient(config.UserAgent).Disks.Insert(config.Project, zone, disk).Do() + if err != nil { + t.Fatalf("Error creating disk: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "disk to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpDisk(config, diskName, zone) + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + Source: "projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err = config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr = tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "1", + "disk.0.disk": diskName, + "disk.0.device_name": "persistent-disk-1", + "disk.0.disk_encryption_key_raw": "encrypt-key", + "disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "attached_disk.#": "1", + "attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName, + "attached_disk.0.device_name": "persistent-disk-1", + "attached_disk.0.disk_encryption_key_raw": "encrypt-key", + "attached_disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "zone": zone, + "create_timeout": "4", + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromSource(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + // Seed test data + diskName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + disk := &compute.Disk{ + Name: diskName, + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + Zone: zone, + } + op, err := config.NewComputeClient(config.UserAgent).Disks.Insert(config.Project, zone, disk).Do() + if err != nil { + t.Fatalf("Error creating disk: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "disk to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpDisk(config, diskName, zone) + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + Source: "projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err = config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr = tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "1", + "disk.0.disk": diskName, + "disk.0.device_name": "persistent-disk-1", + "disk.0.disk_encryption_key_raw": "encrypt-key", + "disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "attached_disk.#": "1", + "attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + diskName, + "attached_disk.0.device_name": "persistent-disk-1", + "attached_disk.0.disk_encryption_key_raw": "encrypt-key", + "attached_disk.0.disk_encryption_key_sha256": "encrypt-key-sha", + "zone": zone, + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_attachedDiskFromEncryptionKey(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + DiskEncryptionKey: &compute.CustomerEncryptionKey{ + RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "1", + "disk.0.image": "projects/debian-cloud/global/images/family/debian-11", + "disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", + "disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "attached_disk.#": "1", + "attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1", + "attached_disk.0.device_name": "persistent-disk-1", + "attached_disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", + "attached_disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=", + "zone": zone, + "create_timeout": "4", + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromEncryptionKey(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + DiskEncryptionKey: &compute.CustomerEncryptionKey{ + RawKey: "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "1", + "disk.0.image": "projects/debian-cloud/global/images/family/debian-11", + "disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", + "disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "attached_disk.#": "1", + "attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1", + "attached_disk.0.device_name": "persistent-disk-1", + "attached_disk.0.disk_encryption_key_raw": "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=", + "attached_disk.0.disk_encryption_key_sha256": "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=", + "zone": zone, + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_attachedDiskFromAutoDeleteAndImage(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "2", + "disk.0.image": "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", + "disk.0.auto_delete": "true", + "disk.1.image": "global/images/family/debian-11", + "disk.1.auto_delete": "true", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "attached_disk.#": "2", + "attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-2", + "attached_disk.0.device_name": "persistent-disk-2", + "attached_disk.1.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1", + "attached_disk.1.device_name": "persistent-disk-1", + "zone": zone, + "create_timeout": "4", + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 2 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_v4FixAttachedDiskFromAutoDeleteAndImage(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/e2-medium", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "2", + "disk.0.image": "projects/debian-cloud/global/images/debian-11-bullseye-v20220719", + "disk.0.auto_delete": "true", + "disk.1.image": "global/images/family/debian-11", + "disk.1.auto_delete": "true", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "attached_disk.#": "2", + "attached_disk.0.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-2", + "attached_disk.0.device_name": "persistent-disk-2", + "attached_disk.1.source": "https://www.googleapis.com/compute/v1/projects/" + config.Project + "/zones/" + zone + "/disks/" + instanceName + "-1", + "attached_disk.1.device_name": "persistent-disk-1", + "zone": zone, + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to attached disk", 4 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_scratchDisk(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + // Seed test data + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + Type: "SCRATCH", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskType: "zones/" + zone + "/diskTypes/local-ssd", + }, + }, + }, + // can't be e2 because of local-ssd + MachineType: "zones/" + zone + "/machineTypes/n1-standard-1", + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "1", + "disk.0.auto_delete": "true", + "disk.0.type": "local-ssd", + "disk.0.scratch": "true", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "scratch_disk.#": "1", + "scratch_disk.0.interface": "SCSI", + "zone": zone, + "create_timeout": "4", + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 2 /* state version */, attributes, expected, config) +} + +func TestAccComputeInstanceMigrateState_v4FixScratchDisk(t *testing.T) { + t.Parallel() + + if os.Getenv(envvar.TestEnvVar) == "" { + t.Skipf("Network access not allowed; use %s=1 to enable", envvar.TestEnvVar) + } + config := getInitializedConfig(t) + zone := "us-central1-f" + + // Seed test data + instanceName := fmt.Sprintf("instance-test-%s", acctest.RandString(t, 10)) + instance := &compute.Instance{ + Name: instanceName, + Disks: []*compute.AttachedDisk{ + { + Boot: true, + AutoDelete: true, + InitializeParams: &compute.AttachedDiskInitializeParams{ + SourceImage: "projects/debian-cloud/global/images/family/debian-11", + }, + }, + { + AutoDelete: true, + Type: "SCRATCH", + InitializeParams: &compute.AttachedDiskInitializeParams{ + DiskType: "zones/" + zone + "/diskTypes/local-ssd", + }, + }, + }, + MachineType: "zones/" + zone + "/machineTypes/n1-standard-1", // can't be e2 because of local-ssd + NetworkInterfaces: []*compute.NetworkInterface{ + { + Network: "global/networks/default", + }, + }, + } + op, err := config.NewComputeClient(config.UserAgent).Instances.Insert(config.Project, zone, instance).Do() + if err != nil { + t.Fatalf("Error creating instance: %s", err) + } + waitErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to create", config.UserAgent, 4*time.Minute) + if waitErr != nil { + t.Fatal(waitErr) + } + defer cleanUpInstance(config, instanceName, zone) + + attributes := map[string]string{ + "boot_disk.#": "1", + "disk.#": "1", + "disk.0.auto_delete": "true", + "disk.0.type": "local-ssd", + "disk.0.scratch": "true", + "zone": zone, + } + expected := map[string]string{ + "boot_disk.#": "1", + "scratch_disk.#": "1", + "scratch_disk.0.interface": "SCSI", + "zone": zone, + } + + runInstanceMigrateTest(t, instanceName, "migrate disk to scratch disk", 4 /* state version */, attributes, expected, config) +} + +func runInstanceMigrateTest(t *testing.T, id, testName string, version int, attributes, expected map[string]string, meta interface{}) { + is := &terraform.InstanceState{ + ID: id, + Attributes: attributes, + } + _, err := tpgcompute.ResourceComputeInstanceMigrateState(version, is, meta) + if err != nil { + t.Fatal(err) + } + + for k, v := range expected { + // source is the only self link, so compare by relpaths if source is being + // compared + if strings.HasSuffix(k, "source") { + if !tpgresource.CompareSelfLinkOrResourceName("", attributes[k], v, nil) && attributes[k] != v { + t.Fatalf( + "bad uri: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + testName, k, expected[k], k, attributes[k], attributes) + } + } else { + if attributes[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + testName, k, expected[k], k, attributes[k], attributes) + } + } + } + + for k, v := range attributes { + // source is the only self link, so compare by relpaths if source is being + // compared + if strings.HasSuffix(k, "source") { + if !tpgresource.CompareSelfLinkOrResourceName("", expected[k], v, nil) && expected[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + testName, k, expected[k], k, attributes[k], expected) + } + } else { + if expected[k] != v { + t.Fatalf( + "bad: %s\n\n expected: %#v -> %#v\n got: %#v -> %#v\n in: %#v", + testName, k, expected[k], k, attributes[k], expected) + } + } + } +} + +func cleanUpInstance(config *transport_tpg.Config, instanceName, zone string) { + op, err := config.NewComputeClient(config.UserAgent).Instances.Delete(config.Project, zone, instanceName).Do() + if err != nil { + log.Printf("[WARNING] Error deleting instance %q, dangling resources may exist: %s", instanceName, err) + return + } + + // Wait for the operation to complete + opErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "instance to delete", config.UserAgent, 4*time.Minute) + if opErr != nil { + log.Printf("[WARNING] Error deleting instance %q, dangling resources may exist: %s", instanceName, opErr) + } +} + +func cleanUpDisk(config *transport_tpg.Config, diskName, zone string) { + op, err := config.NewComputeClient(config.UserAgent).Disks.Delete(config.Project, zone, diskName).Do() + if err != nil { + log.Printf("[WARNING] Error deleting disk %q, dangling resources may exist: %s", diskName, err) + return + } + + // Wait for the operation to complete + opErr := tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "disk to delete", config.UserAgent, 4*time.Minute) + if opErr != nil { + log.Printf("[WARNING] Error deleting disk %q, dangling resources may exist: %s", diskName, opErr) + } +} + +func getInitializedConfig(t *testing.T) *transport_tpg.Config { + // Migrate tests are non standard and handle the config directly + acctest.SkipIfVcr(t) + // Check that all required environment variables are set + acctest.AccTestPreCheck(t) + + config := &transport_tpg.Config{ + Project: envvar.GetTestProjectFromEnv(), + Credentials: envvar.GetTestCredsFromEnv(), + Region: envvar.GetTestRegionFromEnv(), + Zone: envvar.GetTestZoneFromEnv(), + } + + transport_tpg.ConfigureBasePaths(config) + + err := config.LoadAndValidate(context.Background()) + if err != nil { + t.Fatal(err) + } + return config +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go new file mode 100644 index 000000000000..a4064516cf40 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_settings_test.go @@ -0,0 +1,98 @@ +package compute_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeInstanceSettings_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceSettingsDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceSettings_basic(context), + }, + { + ResourceName: "google_compute_instance_settings.gce_instance_settings", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"zone"}, + }, + { + Config: testAccComputeInstanceSettings_update(context), + }, + { + ResourceName: "google_compute_instance_settings.gce_instance_settings", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"zone"}, + }, + { + Config: testAccComputeInstanceSettings_delete(context), + }, + { + ResourceName: "google_compute_instance_settings.gce_instance_settings", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"zone"}, + }, + }, + }) +} + +func testAccComputeInstanceSettings_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_instance_settings" "gce_instance_settings" { + zone = "us-east7-b" + metadata { + items = { + foo = "baz" + } + } +} + +`, context) +} + +func testAccComputeInstanceSettings_update(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_instance_settings" "gce_instance_settings" { + zone = "us-east7-b" + metadata { + items = { + foo = "bar" + baz = "qux" + } + } +} + +`, context) +} + +func testAccComputeInstanceSettings_delete(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_instance_settings" "gce_instance_settings" { + zone = "us-east7-b" + metadata { + items = { + baz = "qux" + } + } +} + +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl new file mode 100644 index 000000000000..cd2c9d8bfaf9 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl @@ -0,0 +1,1978 @@ + +package compute + +import ( + "context" + "fmt" + "reflect" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +var ( + schedulingInstTemplateKeys = []string{ + "scheduling.0.on_host_maintenance", + "scheduling.0.automatic_restart", + "scheduling.0.preemptible", + "scheduling.0.node_affinities", + "scheduling.0.min_node_cpus", + "scheduling.0.provisioning_model", + "scheduling.0.instance_termination_action", +{{- if ne $.TargetVersionName "ga" }} + "scheduling.0.max_run_duration", + "scheduling.0.maintenance_interval", + "scheduling.0.on_instance_stop_action", +{{- end }} + "scheduling.0.local_ssd_recovery_timeout", + } + + shieldedInstanceTemplateConfigKeys = []string{ + "shielded_instance_config.0.enable_secure_boot", + "shielded_instance_config.0.enable_vtpm", + "shielded_instance_config.0.enable_integrity_monitoring", + } +) + +var DEFAULT_SCRATCH_DISK_SIZE_GB = 375 +var VALID_SCRATCH_DISK_SIZES_GB [2]int = [2]int{375, 3000} + +func ResourceComputeInstanceTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceTemplateCreate, + Read: resourceComputeInstanceTemplateRead, + Update: resourceComputeInstanceTemplateUpdate, + Delete: resourceComputeInstanceTemplateDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeInstanceTemplateImportState, + }, + SchemaVersion: 1, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + resourceComputeInstanceTemplateSourceImageCustomizeDiff, + resourceComputeInstanceTemplateScratchDiskCustomizeDiff, + resourceComputeInstanceTemplateBootDiskCustomizeDiff, + tpgresource.SetLabelsDiff, + ), + MigrateState: resourceComputeInstanceTemplateMigrateState, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + // A compute instance template is more or less a subset of a compute + // instance. Please attempt to maintain consistency with the + // resource_compute_instance schema when updating this one. + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: verify.ValidateGCEName, + Description: `The name of the instance template. If you leave this blank, Terraform will auto-generate a unique name.`, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + + "disk": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Disks to attach to instances created from this template. This can be specified multiple times for multiple disks.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Description: `Whether or not the disk should be auto-deleted. This defaults to true.`, + }, + + "boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Indicates that this is a boot disk.`, + }, + + "device_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk.`, + }, + + "disk_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the disk. When not provided, this defaults to the name of the instance.`, + }, + + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The size of the image in gigabytes. If not specified, it will inherit the size of its base image. For SCRATCH disks, the size must be one of 375 or 3000 GB, with a default of 375 GB.`, + }, + + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The Google Compute Engine disk type. Such as "pd-ssd", "local-ssd", "pd-balanced" or "pd-standard".`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `A set of key/value label pairs to assign to disks,`, + }, + + "provisioned_iops": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk).`, + }, + + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + + "source_image": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. ~> Note: Either source or source_image is required when creating a new instance except for when creating a local SSD.`, + }, + "source_image_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source +image. Required if the source image is protected by a +customer-supplied encryption key. + +Instance templates do not store customer-supplied +encryption keys, so you cannot create disks for +instances in a managed instance group if the source +images are encrypted with your own keys.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account being used for the encryption +request for the given KMS key. If absent, the Compute +Engine default service account is used.`, + }, + "kms_key_self_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The self link of the encryption key that is stored in +Google Cloud KMS.`, + }, + }, + }, + }, + "source_snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The source snapshot to create this disk. When creating +a new instance, one of initializeParams.sourceSnapshot, +initializeParams.sourceImage, or disks.source is +required except for local SSD.`, + }, + "source_snapshot_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source snapshot.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account being used for the encryption +request for the given KMS key. If absent, the Compute +Engine default service account is used.`, + }, + "kms_key_self_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The self link of the encryption key that is stored in +Google Cloud KMS.`, + }, + }, + }, + }, + + "interface": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Specifies the disk interface to use for attaching this disk.`, + }, + + "mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If you are attaching or creating a boot disk, this must read-write mode.`, + }, + + "source": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name (not self_link) of the disk (such as those managed by google_compute_disk) to attach. ~> Note: Either source or source_image is required when creating a new instance except for when creating a local SSD.`, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The type of Google Compute Engine disk, can be either "SCRATCH" or "PERSISTENT".`, + }, + + "disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `Encrypts or decrypts a disk using a customer-supplied encryption key.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The self link of the encryption key that is stored in Google Cloud KMS.`, + }, + }, + }, + }, + + "resource_policies": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `A list (short name or id) of resource policies to attach to this disk. Currently a max of 1 resource policy is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareResourceNames, + }, + }, + }, + }, + }, + + "machine_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The machine type to create. To create a machine with a custom type (such as extended memory), format the value like custom-VCPUS-MEM_IN_MB like custom-6-20480 for 6 vCPU and 20GB of RAM.`, + }, + + "can_ip_forward": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: `Whether to allow sending and receiving of packets with non-matching source or destination IPs. This defaults to false.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A brief description of this resource.`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "enable_display": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enable Virtual Displays on this instance. Note: allow_stopping_for_update must be set to true in order to update this field.`, + }, +{{- end }} + + "instance_description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A description of the instance.`, + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Metadata key/value pairs to make available from within instances created from this template.`, + }, + + "metadata_startup_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An alternative to using the startup-script metadata key, mostly to match the compute_instance resource. This replaces the startup-script metadata key on the created instance and thus the two mechanisms are not allowed to be used simultaneously.`, + }, + {{- if ne $.TargetVersionName "ga" }} + "partner_metadata": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: ComparePartnerMetadataDiff, + DiffSuppressOnRefresh: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Partner Metadata Map made available within the instance.`, + }, + {{- end }} + + "metadata_fingerprint": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The unique fingerprint of the metadata.`, + }, + "network_performance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Configures network performance settings for the instance. If not specified, the instance will be created with its default network performance configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TIER_1", "DEFAULT"}, false), + Description: `The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT`, + }, + }, + }, + }, + "network_interface": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Networks to attach to instances created from this template. This can be specified multiple times for multiple networks.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the network to attach this interface to. Use network attribute for Legacy or Auto subnetted networks and subnetwork for custom subnetted networks.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the subnetwork to attach this interface to. The subnetwork must exist in the same region this instance will be created in. Either network or subnetwork must be provided.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "network_attachment": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}.`, + }, + {{- end }} + + "subnetwork_project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the subnetwork belongs. If it is not provided, the provider project is used.`, + }, + + "network_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The private IP address to assign to the instance. If empty, the address will be automatically assigned.`, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The name of the network_interface.`, + }, + "nic_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"GVNIC", "VIRTIO_NET"}, false), + Description: `The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET`, + }, + "access_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Access configurations, i.e. IPs via which this instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet (this means that ssh provisioners will not work unless you are running Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance on that network). This block can be repeated multiple times.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The IP address that will be 1:1 mapped to the instance's network ip. If not given, one will be generated.`, + }, + "network_tier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The networking tier used for configuring this instance template. This field can take the following values: PREMIUM, STANDARD, FIXED_STANDARD. If this field is not specified, it is assumed to be PREMIUM.`, + }, + // Possibly configurable- this was added so we don't break if it's inadvertently set + "public_ptr_domain_name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The DNS domain name for the public PTR record.The DNS domain name for the public PTR record.`, + }, + }, + }, + }, + + "alias_ip_range": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, + }, + "subnetwork_range_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used.`, + }, + }, + }, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"IPV4_ONLY", "IPV4_IPV6", ""}, false), + Description: `The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used.`, + }, + + "ipv6_access_type": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork.`, + }, + + "ipv6_access_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6`, + }, + // Possibly configurable- this was added so we don't break if it's inadvertently set + // (assuming the same ass access config) + "public_ptr_domain_name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The domain name to be used when creating DNSv6 records for the external IPv6 ranges.`, + }, + "external_ipv6": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.`, + }, + "external_ipv6_prefix_length": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The prefix length of the external IPv6 range.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The name of this access configuration.`, + }, + }, + }, + }, + "internal_ipv6_prefix_length": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The prefix length of the primary internal IPv6 range.`, + }, + "ipv6_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork.`, + }, + "queue_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.`, + }, + }, + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `An instance template is a global resource that is not bound to a zone or a region. However, you can still specify some regional resources in an instance template, which restricts the template to the region where that resource resides. For example, a custom subnetwork resource is tied to a specific region. Defaults to the region of the Provider if no value is given.`, + }, + + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `A map of resource manager tags. + Resource manager tag keys and values have the same definition as resource manager tags. + Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. + The field is ignored (both PUT & PATCH) when empty.`, + }, + + "scheduling": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `The scheduling strategy to use.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preemptible": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: false, + ForceNew: true, + Description: `Allows instance to be preempted. This defaults to false.`, + }, + + "automatic_restart": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: true, + ForceNew: true, + Description: `Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true.`, + }, + + "on_host_maintenance": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingInstTemplateKeys, + ForceNew: true, + Description: `Defines the maintenance behavior for this instance.`, + }, + + "node_affinities": { + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + ForceNew: true, + Elem: instanceSchedulingNodeAffinitiesElemSchema(), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), + Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, + }, + "min_node_cpus": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Minimum number of cpus for the instance.`, + }, + "provisioning_model": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Whether the instance is spot. If this is set as SPOT.`, + }, + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "max_run_duration" : { + Type: schema.TypeList, + Optional: true, + Description: `The timeout for new network connections to hosts.`, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + "on_instance_stop_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: `Defines the behaviour for instances with the instance_termination_action.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "discard_local_ssd": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the contents of any attached Local SSD disks will be discarded.`, + Default: false, + ForceNew: true, + }, + }, + }, + }, + "maintenance_interval" : { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC`, + }, +{{- end }} + "local_ssd_recovery_timeout" : { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specifies the maximum amount of time a Local Ssd Vm should wait while + recovery of the Local Ssd state is attempted. Its value should be in + between 0 and 168 hours with hour granularity and the default value being 1 + hour.`, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + }, + }, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The URI of the created resource.`, + }, + + "self_link_unique": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `A special URI of the created resource that uniquely identifies this instance template.`, + }, + + "service_account": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Service account to attach to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The service account e-mail address. If not given, the default Google Compute Engine service account is used.`, + }, + + "scopes": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: `A list of service scopes. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return tpgresource.CanonicalizeServiceScope(v.(string)) + }, + }, + Set: tpgresource.StringScopeHashcode, + }, + }, + }, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Enable Shielded VM on this instance. Shielded VM provides verifiable integrity to prevent against malware and rootkits. Defaults to disabled. Note: shielded_instance_config can only be used with boot images with shielded vm support.`, + // Since this block is used by the API based on which + // image being used, the field needs to be marked as Computed. + Computed: true, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: false, + ForceNew: true, + Description: `Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false.`, + }, + + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, + Description: `Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true.`, + }, + + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, + Description: `Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true.`, + }, + }, + }, + }, + "confidential_instance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + {{- if eq $.TargetVersionName "ga" }} + "enable_confidential_compute": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Defines whether the instance should have confidential compute enabled.`, + }, + {{- else }} + "enable_confidential_compute": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + "confidential_instance_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: ` + Specifies which confidential computing technology to use. + This could be one of the following values: SEV, SEV_SNP. + If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + {{- end }} + }, + }, + }, + "advanced_machine_features": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Controls for advanced machine-related behavior features.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_nested_virtualization": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: `Whether to enable nested virtualization or not.`, + }, + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + Computed: false, + ForceNew: true, + Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, + }, + "visible_core_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width.`, + }, + }, + }, + }, + "guest_accelerator": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `List of the type and count of accelerator cards attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of the guest accelerator cards exposed to this instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80.`, + }, + }, + }, + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell or Intel Skylake.`, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `Tags to attach to the instance.`, + }, + + "tags_fingerprint": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The unique fingerprint of the tags.`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `A set of key/value label pairs to assign to instances created from this template. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Set: schema.HashString, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Set: schema.HashString, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "resource_policies": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `A list of self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareResourceNames, + }, + }, + + "reservation_affinity": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Specifies the reservations that this instance can consume from.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"ANY_RESERVATION", "SPECIFIC_RESERVATION", "NO_RESERVATION"}, false), + Description: `The type of reservation from which this instance can consume resources.`, + }, + + "specific_reservation": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Specifies the label selector for the reservation to use.`, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value.`, + }, + "values": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + ForceNew: true, + Description: `Corresponds to the label values of a reservation resource.`, + }, + }, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeInstanceTemplateSourceImageCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + numDisks := diff.Get("disk.#").(int) + for i := 0; i < numDisks; i++ { + key := fmt.Sprintf("disk.%d.source_image", i) + if diff.HasChange(key) { + var err error + old, new := diff.GetChange(key) + if old == "" || new == "" { + continue + } + // project must be retrieved once we know there is a diff to resolve, otherwise it will + // attempt to retrieve project during `plan` before all calculated fields are ready + // see https://github.com/hashicorp/terraform-provider-google/issues/2878 + project, err := tpgresource.GetProjectFromDiff(diff, config) + if err != nil { + return err + } + oldResolved, err := ResolveImage(config, project, old.(string), config.UserAgent) + if err != nil { + return err + } + oldResolved, err = resolveImageRefToRelativeURI(project, oldResolved) + if err != nil { + return err + } + newResolved, err := ResolveImage(config, project, new.(string), config.UserAgent) + if err != nil { + return err + } + newResolved, err = resolveImageRefToRelativeURI(project, newResolved) + if err != nil { + return err + } + if oldResolved != newResolved { + continue + } + err = diff.Clear(key) + if err != nil { + return err + } + } + } + return nil +} + +func resourceComputeInstanceTemplateScratchDiskCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff) +} + +func resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { + numDisks := diff.Get("disk.#").(int) + for i := 0; i < numDisks; i++ { + // misspelled on purpose, type is a special symbol + typee := diff.Get(fmt.Sprintf("disk.%d.type", i)).(string) + diskType := diff.Get(fmt.Sprintf("disk.%d.disk_type", i)).(string) + if typee == "SCRATCH" && diskType != "local-ssd" { + return fmt.Errorf("SCRATCH disks must have a disk_type of local-ssd. disk %d has disk_type %s", i, diskType) + } + + if diskType == "local-ssd" && typee != "SCRATCH" { + return fmt.Errorf("disks with a disk_type of local-ssd must be SCRATCH disks. disk %d is a %s disk", i, typee) + } + + diskSize := diff.Get(fmt.Sprintf("disk.%d.disk_size_gb", i)).(int) + if typee == "SCRATCH" && !(diskSize == 375 || diskSize == 3000) { // see VALID_SCRATCH_DISK_SIZES_GB + return fmt.Errorf("SCRATCH disks must be one of %v GB, disk %d is %d", VALID_SCRATCH_DISK_SIZES_GB, i, diskSize) + } + + interfacee := diff.Get(fmt.Sprintf("disk.%d.interface", i)).(string) + if typee == "SCRATCH" && diskSize == 3000 && interfacee != "NVME" { + return fmt.Errorf("SCRATCH disks with a size of 3000 GB must have an interface of NVME. disk %d has interface %s", i, interfacee) + } + } + + return nil +} + +func resourceComputeInstanceTemplateBootDiskCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + numDisks := diff.Get("disk.#").(int) + // No disk except the first can be the boot disk + for i := 1; i < numDisks; i++ { + key := fmt.Sprintf("disk.%d.boot", i) + if v, ok := diff.GetOk(key); ok { + if v.(bool) { + return fmt.Errorf("Only the first disk specified in instance_template can be the boot disk. %s was true", key) + } + } + } + return nil +} + +func buildDisks(d *schema.ResourceData, config *transport_tpg.Config) ([]*compute.AttachedDisk, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + disksCount := d.Get("disk.#").(int) + + disks := make([]*compute.AttachedDisk, 0, disksCount) + for i := 0; i < disksCount; i++ { + prefix := fmt.Sprintf("disk.%d", i) + + // Build the disk + var disk compute.AttachedDisk + disk.Type = "PERSISTENT" + disk.Mode = "READ_WRITE" + disk.Interface = "SCSI" + disk.Boot = i == 0 + disk.AutoDelete = d.Get(prefix + ".auto_delete").(bool) + + if v, ok := d.GetOk(prefix + ".boot"); ok { + disk.Boot = v.(bool) + } + + if v, ok := d.GetOk(prefix + ".device_name"); ok { + disk.DeviceName = v.(string) + } + + if _, ok := d.GetOk(prefix + ".disk_encryption_key"); ok { + disk.DiskEncryptionKey = &compute.CustomerEncryptionKey{} + if v, ok := d.GetOk(prefix + ".disk_encryption_key.0.kms_key_self_link"); ok { + disk.DiskEncryptionKey.KmsKeyName = v.(string) + } + } + // Assign disk.DiskSizeGb and disk.InitializeParams.DiskSizeGb the same value + if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { + disk.DiskSizeGb = int64(v.(int)) + } + if v, ok := d.GetOk(prefix + ".source"); ok { + disk.Source = v.(string) + conflicts := []string{"disk_size_gb", "disk_name", "disk_type", "provisioned_iops", "source_image", "source_snapshot", "labels"} + for _, conflict := range conflicts { + if _, ok := d.GetOk(prefix + "." + conflict); ok { + return nil, fmt.Errorf("Cannot use `source` with any of the fields in %s", conflicts) + } + } + } else { + disk.InitializeParams = &compute.AttachedDiskInitializeParams{} + + if v, ok := d.GetOk(prefix + ".disk_name"); ok { + disk.InitializeParams.DiskName = v.(string) + } + // Assign disk.DiskSizeGb and disk.InitializeParams.DiskSizeGb the same value + if v, ok := d.GetOk(prefix + ".disk_size_gb"); ok { + disk.InitializeParams.DiskSizeGb = int64(v.(int)) + } + disk.InitializeParams.DiskType = "pd-standard" + if v, ok := d.GetOk(prefix + ".disk_type"); ok { + disk.InitializeParams.DiskType = v.(string) + } + if v, ok := d.GetOk(prefix + ".provisioned_iops"); ok { + disk.InitializeParams.ProvisionedIops = int64(v.(int)) + } + if _, ok := d.GetOk(prefix + ".resource_manager_tags"); ok { + disk.InitializeParams.ResourceManagerTags = tpgresource.ExpandStringMap(d, prefix + ".resource_manager_tags") + } + disk.InitializeParams.Labels = tpgresource.ExpandStringMap(d, prefix+".labels") + + if v, ok := d.GetOk(prefix + ".source_image"); ok { + imageName := v.(string) + imageUrl, err := ResolveImage(config, project, imageName, userAgent) + if err != nil { + return nil, fmt.Errorf( + "Error resolving image name '%s': %s", + imageName, err) + } + disk.InitializeParams.SourceImage = imageUrl + } + + if _, ok := d.GetOk(prefix + ".source_image_encryption_key"); ok { + disk.InitializeParams.SourceImageEncryptionKey = &compute.CustomerEncryptionKey{} + if v, ok := d.GetOk(prefix + ".source_image_encryption_key.0.kms_key_self_link"); ok { + disk.InitializeParams.SourceImageEncryptionKey.KmsKeyName = v.(string) + } + if v, ok := d.GetOk(prefix + ".source_image_encryption_key.0.kms_key_service_account"); ok { + disk.InitializeParams.SourceImageEncryptionKey.KmsKeyServiceAccount = v.(string) + } + } + + if v, ok := d.GetOk(prefix + ".source_snapshot"); ok { + disk.InitializeParams.SourceSnapshot = v.(string) + } + + if _, ok := d.GetOk(prefix + ".source_snapshot_encryption_key"); ok { + disk.InitializeParams.SourceSnapshotEncryptionKey = &compute.CustomerEncryptionKey{} + if v, ok := d.GetOk(prefix + ".source_snapshot_encryption_key.0.kms_key_self_link"); ok { + disk.InitializeParams.SourceSnapshotEncryptionKey.KmsKeyName = v.(string) + } + if v, ok := d.GetOk(prefix + ".source_snapshot_encryption_key.0.kms_key_service_account"); ok { + disk.InitializeParams.SourceSnapshotEncryptionKey.KmsKeyServiceAccount = v.(string) + } + } + + if _, ok := d.GetOk(prefix + ".resource_policies"); ok { + // instance template only supports a resource name here (not uri) + disk.InitializeParams.ResourcePolicies = expandInstanceTemplateResourcePolicies(d, prefix + ".resource_policies") + } + } + + if v, ok := d.GetOk(prefix + ".interface"); ok { + disk.Interface = v.(string) + } + + if v, ok := d.GetOk(prefix + ".mode"); ok { + disk.Mode = v.(string) + } + + if v, ok := d.GetOk(prefix + ".type"); ok { + disk.Type = v.(string) + } + + disks = append(disks, &disk) + } + + return disks, nil +} + +// We don't share this code with compute instances because instances want a +// partial URL, but instance templates want the bare accelerator name (despite +// the docs saying otherwise). +// +// Using a partial URL on an instance template results in: +// Invalid value for field 'resource.properties.guestAccelerators[0].acceleratorType': +// 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80'. +// Accelerator type 'zones/us-east1-b/acceleratorTypes/nvidia-tesla-k80' +// must be a valid resource name (not an url). +func expandInstanceTemplateGuestAccelerators(d tpgresource.TerraformResourceData, config *transport_tpg.Config) []*compute.AcceleratorConfig { + configs, ok := d.GetOk("guest_accelerator") + if !ok { + return nil + } + accels := configs.([]interface{}) + guestAccelerators := make([]*compute.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + guestAccelerators = append(guestAccelerators, &compute.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + // We can't use ParseAcceleratorFieldValue here because an instance + // template does not have a zone we can use. + AcceleratorType: data["type"].(string), + }) + } + + return guestAccelerators +} + +func expandInstanceTemplateResourcePolicies(d tpgresource.TerraformResourceData, dataKey string) []string { + return tpgresource.ConvertAndMapStringArr(d.Get(dataKey).([]interface{}), tpgresource.GetResourceNameFromSelfLink) +} + +func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + disks, err := buildDisks(d, config) + if err != nil { + return err + } + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return err + } + {{- if ne $.TargetVersionName "ga" }} + PartnerMetadata, err := resourceInstancePartnerMetadata(d) + if err != nil { + return err + } + {{- end }} + + networks, err := expandNetworkInterfaces(d, config) + if err != nil { + return err + } + + scheduling, err := expandResourceComputeInstanceTemplateScheduling(d, config) + if err != nil { + return err + } + networkPerformanceConfig, err := expandNetworkPerformanceConfig(d, config) + if err != nil { + return nil + } + reservationAffinity, err := expandReservationAffinity(d) + if err != nil { + return err + } + resourcePolicies := expandInstanceTemplateResourcePolicies(d, "resource_policies") + + instanceProperties := &compute.InstanceProperties{ + CanIpForward: d.Get("can_ip_forward").(bool), + Description: d.Get("instance_description").(string), + GuestAccelerators: expandInstanceTemplateGuestAccelerators(d, config), + MachineType: d.Get("machine_type").(string), + MinCpuPlatform: d.Get("min_cpu_platform").(string), + Disks: disks, + Metadata: metadata, + {{- if ne $.TargetVersionName "ga" }} + PartnerMetadata: PartnerMetadata, + {{- end }} + NetworkInterfaces: networks, + NetworkPerformanceConfig: networkPerformanceConfig, + Scheduling: scheduling, + ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), + Tags: resourceInstanceTags(d), + ConfidentialInstanceConfig: expandConfidentialInstanceConfig(d), + ShieldedInstanceConfig: expandShieldedVmConfigs(d), + AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), +{{- if ne $.TargetVersionName "ga" }} + DisplayDevice: expandDisplayDevice(d), +{{- end }} + ResourcePolicies: resourcePolicies, + ReservationAffinity: reservationAffinity, + } + + if _, ok := d.GetOk("effective_labels"); ok { + instanceProperties.Labels = tpgresource.ExpandEffectiveLabels(d) + } + + if _, ok := d.GetOk("resource_manager_tags"); ok { + instanceProperties.ResourceManagerTags = tpgresource.ExpandStringMap(d, "resource_manager_tags") + } + + var itName string + if v, ok := d.GetOk("name"); ok { + itName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + itName = id.PrefixedUniqueId(v.(string)) + } else { + itName = id.UniqueId() + } + instanceTemplate := &compute.InstanceTemplate{ + Description: d.Get("description").(string), + Properties: instanceProperties, + Name: itName, + } + + op, err := config.NewComputeClient(userAgent).InstanceTemplates.Insert(project, instanceTemplate).Do() + if err != nil { + return fmt.Errorf("Error creating instance template: %s", err) + } + + // Store the ID now + d.SetId(fmt.Sprintf("projects/%s/global/instanceTemplates/%s", project, instanceTemplate.Name)) + // And also the unique ID + d.Set("self_link_unique", fmt.Sprintf("%v?uniqueId=%v", d.Id(), op.TargetId)) + + err = ComputeOperationWaitTime(config, op, project, "Creating Instance Template", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return resourceComputeInstanceTemplateRead(d, meta) +} + +func resourceComputeInstanceTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the field "labels" and "terraform_labels" is mutable + return resourceComputeInstanceTemplateRead(d, meta) +} + +type diskCharacteristics struct { + mode string + diskType string + diskSizeGb string + autoDelete bool + sourceImage string + provisionedIops string +} + +func diskCharacteristicsFromMap(m map[string]interface{}) diskCharacteristics { + dc := diskCharacteristics{} + if v := m["mode"]; v == nil || v.(string) == "" { + // mode has an apply-time default of READ_WRITE + dc.mode = "READ_WRITE" + } else { + dc.mode = v.(string) + } + + if v := m["disk_type"]; v != nil { + dc.diskType = v.(string) + } + + if v := m["disk_size_gb"]; v != nil { + // Terraform and GCP return ints as different types (int vs int64), so just + // use strings to compare for simplicity. + dc.diskSizeGb = fmt.Sprintf("%v", v) + } + + if v := m["auto_delete"]; v != nil { + dc.autoDelete = v.(bool) + } + + if v := m["source_image"]; v != nil { + dc.sourceImage = v.(string) + } + + if v := m["provisioned_iops"]; v != nil { + // Terraform and GCP return ints as different types (int vs int64), so just + // use strings to compare for simplicity. + dc.provisionedIops = fmt.Sprintf("%v", v) + } + + return dc +} + +func flattenDisk(disk *compute.AttachedDisk, configDisk map[string]any, defaultProject string) (map[string]interface{}, error) { + diskMap := make(map[string]interface{}) + + + // These values are not returned by the API, so we copy them from the config. + diskMap["source_image_encryption_key"] = configDisk["source_image_encryption_key"] + diskMap["source_snapshot"] = configDisk["source_snapshot"] + diskMap["source_snapshot_encryption_key"] = configDisk["source_snapshot_encryption_key"] + + if disk.InitializeParams != nil { + if disk.InitializeParams.SourceImage != "" { + path, err := resolveImageRefToRelativeURI(defaultProject, disk.InitializeParams.SourceImage) + if err != nil { + return nil, errwrap.Wrapf("Error expanding source image input to relative URI: {{"{{"}}err{{"}}"}}", err) + } + diskMap["source_image"] = path + } else { + diskMap["source_image"] = "" + } + diskMap["disk_type"] = disk.InitializeParams.DiskType + diskMap["provisioned_iops"] = disk.InitializeParams.ProvisionedIops + diskMap["disk_name"] = disk.InitializeParams.DiskName + diskMap["labels"] = disk.InitializeParams.Labels + // The API does not return a disk size value for scratch disks. They are largely only one size, + // so we can assume that size here. Prefer disk.DiskSizeGb over the deprecated + // disk.InitializeParams.DiskSizeGb. + if disk.DiskSizeGb == 0 && disk.InitializeParams.DiskSizeGb == 0 && disk.Type == "SCRATCH" { + diskMap["disk_size_gb"] = DEFAULT_SCRATCH_DISK_SIZE_GB + } else if disk.DiskSizeGb != 0 { + diskMap["disk_size_gb"] = disk.DiskSizeGb + } else { + diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb + } + diskMap["resource_policies"] = disk.InitializeParams.ResourcePolicies + diskMap["resource_manager_tags"] = disk.InitializeParams.ResourceManagerTags + } + + if disk.DiskEncryptionKey != nil { + encryption := make([]map[string]interface{}, 1) + encryption[0] = make(map[string]interface{}) + encryption[0]["kms_key_self_link"] = disk.DiskEncryptionKey.KmsKeyName + diskMap["disk_encryption_key"] = encryption + } + + diskMap["auto_delete"] = disk.AutoDelete + diskMap["boot"] = disk.Boot + diskMap["device_name"] = disk.DeviceName + diskMap["interface"] = disk.Interface + diskMap["source"] = tpgresource.ConvertSelfLinkToV1(disk.Source) + diskMap["mode"] = disk.Mode + diskMap["type"] = disk.Type + + return diskMap, nil +} + +func reorderDisks(configDisks []interface{}, apiDisks []map[string]interface{}) []map[string]interface{} { + if len(apiDisks) != len(configDisks) { + // There are different numbers of disks in state and returned from the API, so it's not + // worth trying to reorder them since it'll be a diff anyway. + return apiDisks + } + + result := make([]map[string]interface{}, len(apiDisks)) + + /* + Disks aren't necessarily returned from the API in the same order they were sent, so gather + information about the ones in state that we can use to map it back. We can't do this by + just looping over all of the disks, because you could end up matching things in the wrong + order. For example, if the config disks contain the following disks: + disk 1: auto delete = false, size = 10 + disk 2: auto delete = false, size = 10, device name = "disk 2" + disk 3: type = scratch + And the disks returned from the API are: + disk a: auto delete = false, size = 10, device name = "disk 2" + disk b: auto delete = false, size = 10, device name = "disk 1" + disk c: type = scratch + Then disk a will match disk 1, disk b won't match any disk, and c will match 3, making the + final order a, c, b, which is wrong. To get disk a to match disk 2, we have to go in order + of fields most specifically able to identify a disk to least. + */ + disksByDeviceName := map[string]int{} + scratchDisksByInterface := map[string][]int{} + attachedDisksBySource := map[string]int{} + attachedDisksByDiskName := map[string]int{} + attachedDisksByCharacteristics := []int{} + + for i, d := range configDisks { + if i == 0 { + // boot disk + continue + } + disk := d.(map[string]interface{}) + if v := disk["device_name"]; v.(string) != "" { + disksByDeviceName[v.(string)] = i + } else if v := disk["type"]; v.(string) == "SCRATCH" { + iface := disk["interface"].(string) + if iface == "" { + // apply-time default + iface = "SCSI" + } + scratchDisksByInterface[iface] = append(scratchDisksByInterface[iface], i) + } else if v := disk["source"]; v.(string) != "" { + attachedDisksBySource[v.(string)] = i + } else if v := disk["disk_name"]; v.(string) != "" { + attachedDisksByDiskName[v.(string)] = i + } else { + attachedDisksByCharacteristics = append(attachedDisksByCharacteristics, i) + } + } + + // Align the disks, going from the most specific criteria to the least. + for _, apiDisk := range apiDisks { + // 1. This resource only works if the boot disk is the first one (which should be fixed + // separately), so put the boot disk first. + if apiDisk["boot"].(bool) { + result[0] = apiDisk + + // 2. All disks have a unique device name + } else if i, ok := disksByDeviceName[apiDisk["device_name"].(string)]; ok { + result[i] = apiDisk + + // 3. Scratch disks are all the same except device name and interface, so match them by + // interface. + } else if apiDisk["type"].(string) == "SCRATCH" { + iface := apiDisk["interface"].(string) + indexes := scratchDisksByInterface[iface] + if len(indexes) > 0 { + result[indexes[0]] = apiDisk + scratchDisksByInterface[iface] = indexes[1:] + } else { + result = append(result, apiDisk) + } + + // 4. Each attached disk will have a different source, so match by that. + } else if i, ok := attachedDisksBySource[apiDisk["source"].(string)]; ok { + result[i] = apiDisk + + // 5. If a disk was created for this resource via initializeParams, it will have a + // unique name. + } else if v, ok := apiDisk["disk_name"]; ok && attachedDisksByDiskName[v.(string)] != 0 { + result[attachedDisksByDiskName[v.(string)]] = apiDisk + + // 6. If no unique keys exist on this disk, then use a combination of its remaining + // characteristics to see whether it matches exactly. + } else { + found := false + for arrayIndex, i := range attachedDisksByCharacteristics { + configDisk := configDisks[i].(map[string]interface{}) + stateDc := diskCharacteristicsFromMap(configDisk) + readDc := diskCharacteristicsFromMap(apiDisk) + if reflect.DeepEqual(stateDc, readDc) { + result[i] = apiDisk + attachedDisksByCharacteristics = append(attachedDisksByCharacteristics[:arrayIndex], attachedDisksByCharacteristics[arrayIndex+1:]...) + found = true + break + } + } + if !found { + result = append(result, apiDisk) + } + } + } + + // Remove nils from map in case there were disks that could not be matched + ds := []map[string]interface{}{} + for _, d := range result { + if d != nil { + ds = append(ds, d) + } + } + return ds +} + +func flattenDisks(disks []*compute.AttachedDisk, d *schema.ResourceData, defaultProject string) ([]map[string]interface{}, error) { + apiDisks := make([]map[string]interface{}, len(disks)) + + for i, disk := range disks { + configDisk := d.Get(fmt.Sprintf("disk.%d", i)).(map[string]any) + apiDisk, err := flattenDisk(disk, configDisk, defaultProject) + if err != nil { + return nil, err + } + apiDisks[i] = apiDisk + } + + return reorderDisks(d.Get("disk").([]interface{}), apiDisks), nil +} + +func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + idStr := d.Id() + if v, ok := d.GetOk("self_link_unique"); ok && v != "" { + idStr = ConvertToUniqueIdWhenPresent(v.(string)) + } + + splits := strings.Split(idStr, "/") + {{- if eq $.TargetVersionName "ga" }} + instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, splits[len(splits)-1]).Do() + {{- else }} + instanceTemplate, err := config.NewComputeClient(userAgent).InstanceTemplates.Get(project, splits[len(splits)-1]).View("FULL").Do() + {{- end }} + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Instance Template %q", d.Get("name").(string))) + } + // Set the metadata fingerprint if there is one. + if instanceTemplate.Properties.Metadata != nil { + if err = d.Set("metadata_fingerprint", instanceTemplate.Properties.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + + md := instanceTemplate.Properties.Metadata + + _md := flattenMetadataBeta(md) + + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if err = d.Set("metadata_startup_script", script); err != nil { + return fmt.Errorf("Error setting metadata_startup_script: %s", err) + } + + delete(_md, "startup-script") + } + + if err = d.Set("metadata", _md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + } + + {{ if ne $.TargetVersionName `ga` -}} + if instanceTemplate.Properties.PartnerMetadata != nil { + partnerMetadata, err := flattenPartnerMetadata(instanceTemplate.Properties.PartnerMetadata) + if err != nil { + return fmt.Errorf("Error parsing partner metadata: %s", err) + } + if err = d.Set("partner_metadata", partnerMetadata); err != nil { + return fmt.Errorf("Error setting partner metadata: %s", err) + } + } + {{- end }} + + // Set the tags fingerprint if there is one. + if instanceTemplate.Properties.Tags != nil { + if err = d.Set("tags_fingerprint", instanceTemplate.Properties.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } else { + if err := d.Set("tags_fingerprint", ""); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } + if instanceTemplate.Properties.Labels != nil { + if err := tpgresource.SetLabels(instanceTemplate.Properties.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + } + if err := tpgresource.SetLabels(instanceTemplate.Properties.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", instanceTemplate.Properties.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) + } + if err = d.Set("self_link", instanceTemplate.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err = d.Set("self_link_unique", fmt.Sprintf("%v?uniqueId=%v", instanceTemplate.SelfLink, instanceTemplate.Id)); err != nil { + return fmt.Errorf("Error setting self_link_unique: %s", err) + } + if err = d.Set("name", instanceTemplate.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if instanceTemplate.Properties.Disks != nil { + disks, err := flattenDisks(instanceTemplate.Properties.Disks, d, project) + if err != nil { + return fmt.Errorf("error flattening disks: %s", err) + } + if err = d.Set("disk", disks); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } + } + if err = d.Set("description", instanceTemplate.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("machine_type", instanceTemplate.Properties.MachineType); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + if err = d.Set("min_cpu_platform", instanceTemplate.Properties.MinCpuPlatform); err != nil { + return fmt.Errorf("Error setting min_cpu_platform: %s", err) + } + + if err = d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + + if err = d.Set("instance_description", instanceTemplate.Properties.Description); err != nil { + return fmt.Errorf("Error setting instance_description: %s", err) + } + if err = d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("network_performance_config", flattenNetworkPerformanceConfig(instanceTemplate.Properties.NetworkPerformanceConfig)); err != nil { + return err + } + if instanceTemplate.Properties.NetworkInterfaces != nil { + networkInterfaces, region, _, _, err := flattenNetworkInterfaces(d, config, instanceTemplate.Properties.NetworkInterfaces) + if err != nil { + return err + } + if err = d.Set("network_interface", networkInterfaces); err != nil { + return fmt.Errorf("Error setting network_interface: %s", err) + } + // region is where to look up the subnetwork if there is one attached to the instance template + if region != "" { + if err = d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + } + } + if instanceTemplate.Properties.Scheduling != nil { + scheduling := flattenScheduling(instanceTemplate.Properties.Scheduling) + if err = d.Set("scheduling", scheduling); err != nil { + return fmt.Errorf("Error setting scheduling: %s", err) + } + } + if instanceTemplate.Properties.Tags != nil { + if err = d.Set("tags", instanceTemplate.Properties.Tags.Items); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } else { + if err = d.Set("tags", nil); err != nil { + return fmt.Errorf("Error setting empty tags: %s", err) + } + } + if instanceTemplate.Properties.ServiceAccounts != nil { + if err = d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)); err != nil { + return fmt.Errorf("Error setting service_account: %s", err) + } + } + if instanceTemplate.Properties.GuestAccelerators != nil { + if err = d.Set("guest_accelerator", flattenGuestAccelerators(instanceTemplate.Properties.GuestAccelerators)); err != nil { + return fmt.Errorf("Error setting guest_accelerator: %s", err) + } + } + if instanceTemplate.Properties.ShieldedInstanceConfig != nil { + if err = d.Set("shielded_instance_config", flattenShieldedVmConfig(instanceTemplate.Properties.ShieldedInstanceConfig)); err != nil { + return fmt.Errorf("Error setting shielded_instance_config: %s", err) + } + } + + if instanceTemplate.Properties.ConfidentialInstanceConfig != nil { + if err = d.Set("confidential_instance_config", flattenConfidentialInstanceConfig(instanceTemplate.Properties.ConfidentialInstanceConfig)); err != nil { + return fmt.Errorf("Error setting confidential_instance_config: %s", err) + } + } + if instanceTemplate.Properties.AdvancedMachineFeatures != nil { + if err = d.Set("advanced_machine_features", flattenAdvancedMachineFeatures(instanceTemplate.Properties.AdvancedMachineFeatures)); err != nil { + return fmt.Errorf("Error setting advanced_machine_features: %s", err) + } + } +{{- if ne $.TargetVersionName "ga" }} + if instanceTemplate.Properties.DisplayDevice != nil { + if err = d.Set("enable_display", flattenEnableDisplay(instanceTemplate.Properties.DisplayDevice)); err != nil { + return fmt.Errorf("Error setting enable_display: %s", err) + } + } +{{- end }} + + if instanceTemplate.Properties.ResourcePolicies != nil { + if err = d.Set("resource_policies", instanceTemplate.Properties.ResourcePolicies); err != nil { + return fmt.Errorf("Error setting resource_policies: %s", err) + } + } + + if reservationAffinity := instanceTemplate.Properties.ReservationAffinity; reservationAffinity != nil { + if err = d.Set("reservation_affinity", flattenReservationAffinity(reservationAffinity)); err != nil { + return fmt.Errorf("Error setting reservation_affinity: %s", err) + } + } + + return nil +} + +func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + splits := strings.Split(d.Id(), "/") + op, err := config.NewComputeClient(userAgent).InstanceTemplates.Delete( + project, splits[len(splits)-1]).Do() + if err != nil { + return fmt.Errorf("Error deleting instance template: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Deleting Instance Template", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +// This wraps the general compute instance helper expandScheduling. +// Default value of OnHostMaintenance depends on the value of Preemptible, +// so we can't set a default in schema +func expandResourceComputeInstanceTemplateScheduling(d *schema.ResourceData, meta interface{}) (*compute.Scheduling, error) { + v, ok := d.GetOk("scheduling") + if !ok || v == nil { + // We can't set defaults for lists (e.g. scheduling) + return &compute.Scheduling{ + OnHostMaintenance: "MIGRATE", + }, nil + } + + expanded, err := expandScheduling(v) + if err != nil { + return nil, err + } + + // Make sure we have an appropriate value for OnHostMaintenance if Preemptible + if expanded.Preemptible && expanded.OnHostMaintenance == "" { + expanded.OnHostMaintenance = "TERMINATE" + } + return expanded, nil +} + +func resourceComputeInstanceTemplateImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/global/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/global/instanceTemplates/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl new file mode 100644 index 000000000000..a2c8876a0639 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -0,0 +1,4197 @@ + +package compute_test + +import ( +{{- if ne $.TargetVersionName "ga" }} + "encoding/json" +{{- end }} + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} + "google.golang.org/api/googleapi" +{{- end }} +) + +const DEFAULT_MIN_CPU_TEST_VALUE = "Intel Haswell" + +func TestAccComputeInstanceTemplate_basic(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_basic(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateTag(&instanceTemplate, "foo"), + testAccCheckComputeInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplate, "my_label", "foobar"), + testAccCheckComputeInstanceTemplateLacksShieldedVmConfig(&instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_imageShorthand(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_imageShorthand(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_preemptible(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_preemptible(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceTemplate_maintenance_interval(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_maintenance_interval(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateMaintenanceInterval(&instanceTemplate, "PERIODIC"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeInstanceTemplate_basic(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateMaintenanceInterval(&instanceTemplate, ""), + ), + }, + }, + }) +} +{{- end }} + +func TestAccComputeInstanceTemplate_IP(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_ip(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_IPv6(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_ipv6(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_networkTier(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_networkTier(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_networkIP(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + networkIP := "10.128.0.2" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_networkIP(acctest.RandString(t, 10), networkIP), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkIP( + "google_compute_instance_template.foobar", networkIP, &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_networkIPAddress(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + ipAddress := "10.128.0.2" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_networkIPAddress(acctest.RandString(t, 10), ipAddress), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetwork(&instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkIPAddress( + "google_compute_instance_template.foobar", ipAddress, &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_disks(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_disks(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_disksInvalid(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_disksInvalid(acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("Cannot use `source`.*"), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_regionDisks(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_regionDisks(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_diskIops(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_diskIops(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_auto(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + network := "tf-test-network-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_subnet_auto(network, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateNetworkName(&instanceTemplate, network), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_custom(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_subnet_custom(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_subnet_xpn(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + org := envvar.GetTestOrgFromEnv(t) + billingId := envvar.GetTestBillingAccountFromEnv(t) + projectName := fmt.Sprintf("tf-testxpn-%d", time.Now().Unix()) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_subnet_xpn(org, billingId, projectName, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExistsInProject( + t, "google_compute_instance_template.foobar", fmt.Sprintf("%s-service", projectName), + &instanceTemplate), + testAccCheckComputeInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_metadata_startup_script(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_startup_script(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_primaryAliasIpRange(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_primaryAliasIpRange(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasAliasIpRange(&instanceTemplate, "", "/24"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_secondaryAliasIpRange(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_secondaryAliasIpRange(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasAliasIpRange(&instanceTemplate, "inst-test-secondary", "/24"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_guestAccelerator(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_guestAccelerator(acctest.RandString(t, 10), 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasGuestAccelerator(&instanceTemplate, "nvidia-tesla-k80", 1), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) + +} + +func TestAccComputeInstanceTemplate_guestAcceleratorSkip(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_guestAccelerator(acctest.RandString(t, 10), 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateLacksGuestAccelerator(&instanceTemplate), + ), + }, + }, + }) + +} + +func TestAccComputeInstanceTemplate_minCpuPlatform(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_minCpuPlatform(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasMinCpuPlatform(&instanceTemplate, DEFAULT_MIN_CPU_TEST_VALUE), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_EncryptKMS(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + kms := acctest.BootstrapKMSKey(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_encryptionKMS(acctest.RandString(t, 10), kms.CryptoKey.Name), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_soleTenantNodeAffinities(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_soleTenantInstanceTemplate(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_instanceResourcePolicies(t *testing.T) { + t.Parallel() + + var template compute.InstanceTemplate + var policyName = "tf-test-policy-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_instanceResourcePolicyCollocated(acctest.RandString(t, 10), policyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &template), + testAccCheckComputeInstanceTemplateHasInstanceResourcePolicies(&template, policyName), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_reservationAffinities(t *testing.T) { + t.Parallel() + + var template compute.InstanceTemplate + var templateName = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, "NO_RESERVATION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &template), + testAccCheckComputeInstanceTemplateHasReservationAffinity(&template, "NO_RESERVATION"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, "ANY_RESERVATION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &template), + testAccCheckComputeInstanceTemplateHasReservationAffinity(&template, "ANY_RESERVATION"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_specificReservation(templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &template), + testAccCheckComputeInstanceTemplateHasReservationAffinity(&template, "SPECIFIC_RESERVATION", templateName), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_shieldedVmConfig1(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_shieldedVmConfig(acctest.RandString(t, 10), true, true, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasShieldedVmConfig(&instanceTemplate, true, true, true), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_shieldedVmConfig2(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_shieldedVmConfig(acctest.RandString(t, 10), true, true, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasShieldedVmConfig(&instanceTemplate, true, true, false), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + {{- if ne $.TargetVersionName "ga" }} + var instanceTemplate2 compute.InstanceTemplate + {{- end }} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplateConfidentialInstanceConfigEnable(acctest.RandString(t, 10), "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, true, "SEV"), + {{- if ne $.TargetVersionName "ga" }} + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar2", &instanceTemplate2), + testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, true, ""), + {{- end }} + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceTemplateConfidentialInstanceConfigNoEnable(acctest.RandString(t, 10), "AMD Milan", "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar3", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, false, "SEV_SNP"), + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar4", &instanceTemplate2), + testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), + ), + }, + {{- end }} + }, + }) +} + +func TestAccComputeInstanceTemplate_AdvancedMachineFeatures(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplateAdvancedMachineFeatures(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceTemplate_enableDisplay(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_enableDisplay(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ end }} +func TestAccComputeInstanceTemplate_invalidDiskType(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_invalidDiskType(acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("SCRATCH disks must have a disk_type of local-ssd"), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_withScratchDisk(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_with375GbScratchDisk(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_with18TbScratchDisk(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_with18TbScratchDisk(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_imageResourceTest(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + diskName := "tf-test-disk-" + acctest.RandString(t, 10) + computeImage := "tf-test-image-" + acctest.RandString(t, 10) + imageDesc1 := "Some description" + imageDesc2 := "Some other description" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_imageResourceTest(diskName, computeImage, imageDesc1), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + { + Config: testAccComputeInstanceTemplate_imageResourceTest(diskName, computeImage, imageDesc2), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_diskResourcePolicies(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + policyName := "tf-test-policy-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_diskResourcePolicies(acctest.RandString(t, 10), policyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasDiskResourcePolicy(&instanceTemplate, policyName), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_nictype_update(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_nictype(instanceTemplateName, instanceTemplateName, "GVNIC"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + { + Config: testAccComputeInstanceTemplate_nictype(instanceTemplateName, instanceTemplateName, "VIRTIO_NET"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_queueCount(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_queueCount(instanceTemplateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_managedEnvoy(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_managedEnvoy(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_spot(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_spot(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceTemplate_spot_maxRunDuration_deleteTerminationAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10), instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_spot_maxRunDuration_stopTerminationAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10), instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 600 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func TestAccComputeInstanceTemplate_spot_maxRunDuration(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10), instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeInstanceTemplateInstanceTerminationAction(&instanceTemplate, instanceTerminationAction), + testAccCheckComputeInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 3600 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_localSsdRecoveryTimeout(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeInstanceTemplateLocalSsdRecoveryTimeout(&instanceTemplate, expectedLocalSsdRecoveryTimeout), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceTemplate_partnerMetadata(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_partnerMetadata(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplatePartnerMetadata(&instanceTemplate, expectedPartnerMetadata), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{fmt.Sprintf("partner_metadata.%s", namespace)}, + }, + }, + }) + +} +{{- end }} + +func TestAccComputeInstanceTemplate_sourceSnapshotEncryptionKey(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + kmsKey := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + + context := map[string]interface{}{ + "kms_ring_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.KeyRing.Name), + "kms_key_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.CryptoKey.Name), + "random_suffix": acctest.RandString(t, 10), + } + + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_sourceSnapshotEncryptionKey(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.template", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.template", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"disk.0.source_snapshot", "disk.0.source_snapshot_encryption_key"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_sourceImageEncryptionKey(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + kmsKey := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + + context := map[string]interface{}{ + "kms_ring_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.KeyRing.Name), + "kms_key_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.CryptoKey.Name), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_sourceImageEncryptionKey(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.template", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_instance_template.template", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"disk.0.source_image_encryption_key"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstanceTemplate_NetworkAttachment(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + testNetworkName := acctest.BootstrapSharedTestNetwork(t, "attachment-network") + subnetName := acctest.BootstrapSubnet(t, "tf-test-subnet", testNetworkName) + networkAttachmentName := acctest.BootstrapNetworkAttachment(t, "tf-test-attachment", subnetName) + + // Need to have the full network attachment name in the format project/{project_id}/regions/{region_id}/networkAttachments/{networkAttachmentName} + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), networkAttachmentName) + + context := map[string]interface{}{ + "subnet": subnetName, + "suffix": (acctest.RandString(t, 10)), + "network_attachment": fullFormNetworkAttachmentName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_network_attachment(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateHasNetworkAttachment(&instanceTemplate, fmt.Sprintf("https://www.googleapis.com/compute/beta/%s", fullFormNetworkAttachmentName)), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeInstanceTemplate_migration(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateUpdate compute.InstanceTemplate + + suffix := acctest.RandString(t, 10) + oldVersion := map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.84.0", // a version that doesn't separate user defined labels and system labels + Source: "registry.terraform.io/hashicorp/google", + }, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_basic(suffix), + ExternalProviders: oldVersion, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + ), + }, + { + Config: testAccComputeInstanceTemplate_basic(suffix), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplateUpdate), + testAccCheckComputeInstanceTemplateNotRecreated(&instanceTemplate, &instanceTemplateUpdate), + testAccCheckComputeInstanceTemplateTag(&instanceTemplateUpdate, "foo"), + testAccCheckComputeInstanceTemplateMetadata(&instanceTemplateUpdate, "foo", "bar"), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplateUpdate, "my_label", "foobar"), + testAccCheckComputeInstanceTemplateLacksShieldedVmConfig(&instanceTemplateUpdate), + ), + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_withLabels(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateUpdate compute.InstanceTemplate + suffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_withProviderDefaultLabels(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplate, "my_label", "foobar"), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplate, "env", "test"), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplate, "default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "labels.my_label", "foobar"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "labels.env", "test"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "terraform_labels.my_label", "foobar"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "terraform_labels.env", "test"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "terraform_labels.default_key1", "default_value1"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeInstanceTemplate_moveLabelToProvderDefaultLabels(suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplateUpdate), + testAccCheckComputeInstanceTemplateNotRecreated(&instanceTemplate, &instanceTemplateUpdate), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplateUpdate, "my_label", "foobar"), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplateUpdate, "env", "test"), + testAccCheckComputeInstanceTemplateContainsLabel(&instanceTemplateUpdate, "default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "labels.my_label", "foobar"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "terraform_labels.my_label", "foobar"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "terraform_labels.env", "test"), + resource.TestCheckResourceAttr("google_compute_instance_template.foobar", "terraform_labels.default_key1", "default_value1"), + ), + }, + { + ResourceName: "google_compute_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_resourceManagerTags(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "instance_name": instanceTemplateName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceTemplate_resourceManagerTags(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceTemplateExists( + t, "google_compute_instance_template.foobar", &instanceTemplate)), + }, + }, + }) +} + +func testAccCheckComputeInstanceTemplateDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance_template" { + continue + } + + splits := strings.Split(rs.Primary.ID, "/") + _, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( + config.Project, splits[len(splits)-1]).Do() + if err == nil { + return fmt.Errorf("Instance template still exists") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateExists(t *testing.T, n string, instanceTemplate interface{}) resource.TestCheckFunc { + if instanceTemplate == nil { + panic("Attempted to check existence of Instance template that was nil.") + } + + return testAccCheckComputeInstanceTemplateExistsInProject(t, n, envvar.GetTestProjectFromEnv(), instanceTemplate.(*compute.InstanceTemplate)) +} + +func testAccCheckComputeInstanceTemplateExistsInProject(t *testing.T, n, p string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + splits := strings.Split(rs.Primary.ID, "/") + templateName := splits[len(splits)-1] + {{- if eq $.TargetVersionName "ga" }} + found, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( + p, templateName).Do() + {{- else }} + found, err := config.NewComputeClient(config.UserAgent).InstanceTemplates.Get( + p, templateName).View("FULL").Do() + {{- end }} + + if err != nil { + return err + } + + if found.Name != templateName { + return fmt.Errorf("Instance template not found") + } + if strings.Contains(rs.Primary.ID, "uniqueId") { + return fmt.Errorf("unique ID is not supposed to be present in the Terraform resource ID") + } + selfLink:= rs.Primary.Attributes["self_link"] + if strings.Contains(selfLink, "uniqueId") { + return fmt.Errorf("unique ID is not supposed to be present in selfLink") + } + + actualSelfLinkUnique:= rs.Primary.Attributes["self_link_unique"] + foundId:= strconv.FormatUint(found.Id, 10) + expectedSelfLinkUnique:= selfLink + "?uniqueId="+foundId + if actualSelfLinkUnique != expectedSelfLinkUnique { + return fmt.Errorf("self_link_unique should be %v but it is: %v", expectedSelfLinkUnique, actualSelfLinkUnique) + } + + *instanceTemplate = *found + + return nil + } +} + +func testAccCheckComputeInstanceTemplateMetadata( + instanceTemplate *compute.InstanceTemplate, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instanceTemplate.Properties.Metadata.Items { + if k != item.Key { + continue + } + + if item.Value != nil && v == *item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, *item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if !strings.Contains(i.Network, network) { + return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:]) + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instanceTemplate.Properties.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + +func testAccCheckComputeInstanceTemplatePreemptible(instanceTemplate *compute.InstanceTemplate, preemptible bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.Preemptible != preemptible { + return fmt.Errorf("Expected preemptible value %v, got %v", preemptible, instanceTemplate.Properties.Scheduling.Preemptible) + } + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceTemplateMaintenanceInterval(instanceTemplate *compute.InstanceTemplate, maintenance_interval string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.MaintenanceInterval != maintenance_interval { + return fmt.Errorf("Expected maintenance interval value %v, got %v", maintenance_interval, instanceTemplate.Properties.Scheduling.MaintenanceInterval) + } + return nil + } +} +{{- end }} + +func testAccCheckComputeInstanceTemplateProvisioningModel(instanceTemplate *compute.InstanceTemplate, provisioning_model string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.ProvisioningModel != provisioning_model { + return fmt.Errorf("Expected provisioning_model %v, got %v", provisioning_model, instanceTemplate.Properties.Scheduling.ProvisioningModel) + } + return nil + } +} + +func testAccCheckComputeInstanceTemplateInstanceTerminationAction(instanceTemplate *compute.InstanceTemplate, instance_termination_action string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.InstanceTerminationAction != instance_termination_action { + return fmt.Errorf("Expected instance_termination_action %v, got %v", instance_termination_action, instanceTemplate.Properties.Scheduling.InstanceTerminationAction) + } + return nil + } +} + + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceTemplateMaxRunDuration(instanceTemplate *compute.InstanceTemplate, instance_max_run_duration_want compute.Duration) resource.TestCheckFunc { + return func(s *terraform.State) error { + if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.MaxRunDuration, instance_max_run_duration_want) { + return fmt.Errorf("gExpected instance_termination_action: %#v; got %#v", instance_max_run_duration_want, instanceTemplate.Properties.Scheduling.MaxRunDuration) + } + + return nil + } +} +{{- end }} + + +func testAccCheckComputeInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate *compute.InstanceTemplate, instance_local_ssd_recovery_timeout_want compute.Duration) resource.TestCheckFunc { + return func(s *terraform.State) error { + if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.LocalSsdRecoveryTimeout, instance_local_ssd_recovery_timeout_want) { + return fmt.Errorf("gExpected LocalSsdRecoveryTimeout: %#v; got %#v", instance_local_ssd_recovery_timeout_want, instanceTemplate.Properties.Scheduling.LocalSsdRecoveryTimeout) + } + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceTemplatePartnerMetadata(instanceTemplate *compute.InstanceTemplate, expectedPartnerMetadata map[string]compute.StructuredEntries) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate == nil { + return fmt.Errorf("instance template is nil") + } + if instanceTemplate.Properties.PartnerMetadata == nil { + return fmt.Errorf("no partner metadata") + } + expectedPartnerMetadataMap := make(map[string]interface{}) + acutalPartnerMetadataMap := make(map[string]interface{}) + for key, value := range instanceTemplate.Properties.PartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + acutalPartnerMetadataMap[key] = jsonMap + } + for key, value := range expectedPartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + expectedPartnerMetadataMap[key] = jsonMap + } + if !reflect.DeepEqual(acutalPartnerMetadataMap, expectedPartnerMetadataMap) { + return fmt.Errorf("got the wrong instance partne metadata action: have: %+v; want: %+v", acutalPartnerMetadataMap, expectedPartnerMetadataMap) + } + return nil + + } +} +{{- end }} + +func testAccCheckComputeInstanceTemplateAutomaticRestart(instanceTemplate *compute.InstanceTemplate, automaticRestart bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + ar := instanceTemplate.Properties.Scheduling.AutomaticRestart + if ar == nil { + return fmt.Errorf("Expected to see a value for AutomaticRestart, but got nil") + } + if *ar != automaticRestart { + return fmt.Errorf("Expected automatic restart value %v, got %v", automaticRestart, ar) + } + return nil + } +} + +func testAccCheckComputeInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil && n == "" { + return nil + } else if instanceTemplate.Properties.Metadata == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n) + } + for _, item := range instanceTemplate.Properties.Metadata.Items { + if item.Key != "startup-script" { + continue + } + if item.Value != nil && *item.Value == n { + return nil + } else if item.Value == nil && n == "" { + return nil + } else if item.Value == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n) + } else if *item.Value != n { + return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value) + } + } + return fmt.Errorf("This should never be reached.") + } +} + +func testAccCheckComputeInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP + err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) + if err != nil { + return err + } + return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s) + } +} + +func testAccCheckComputeInstanceTemplateNetworkIPAddress(n, ipAddress string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP + err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) + if err != nil { + return err + } + return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ipAddress)(s) + } +} + +func testAccCheckComputeInstanceTemplateContainsLabel(instanceTemplate *compute.InstanceTemplate, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + v, ok := instanceTemplate.Properties.Labels[key] + if !ok { + return fmt.Errorf("Expected label with key '%s' not found", key) + } + if v != value { + return fmt.Errorf("Incorrect label value for key '%s': expected '%s' but found '%s'", key, value, v) + } + return nil + } +} + +func testAccCheckComputeInstanceTemplateHasAliasIpRange(instanceTemplate *compute.InstanceTemplate, subnetworkRangeName, iPCidrRange string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { + for _, aliasIpRange := range networkInterface.AliasIpRanges { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + return nil + } + } + } + + return fmt.Errorf("Alias ip range with name %s and cidr %s not present", subnetworkRangeName, iPCidrRange) + } +} + +func testAccCheckComputeInstanceTemplateHasGuestAccelerator(instanceTemplate *compute.InstanceTemplate, acceleratorType string, acceleratorCount int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instanceTemplate.Properties.GuestAccelerators) != 1 { + return fmt.Errorf("Expected only one guest accelerator") + } + + if !strings.HasSuffix(instanceTemplate.Properties.GuestAccelerators[0].AcceleratorType, acceleratorType) { + return fmt.Errorf("Wrong accelerator type: expected %v, got %v", acceleratorType, instanceTemplate.Properties.GuestAccelerators[0].AcceleratorType) + } + + if instanceTemplate.Properties.GuestAccelerators[0].AcceleratorCount != acceleratorCount { + return fmt.Errorf("Wrong accelerator acceleratorCount: expected %d, got %d", acceleratorCount, instanceTemplate.Properties.GuestAccelerators[0].AcceleratorCount) + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateLacksGuestAccelerator(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instanceTemplate.Properties.GuestAccelerators) > 0 { + return fmt.Errorf("Expected no guest accelerators") + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateHasMinCpuPlatform(instanceTemplate *compute.InstanceTemplate, minCpuPlatform string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.MinCpuPlatform != minCpuPlatform { + return fmt.Errorf("Wrong minimum CPU platform: expected %s, got %s", minCpuPlatform, instanceTemplate.Properties.MinCpuPlatform) + } + + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceTemplateHasNetworkAttachment(instanceTemplate *compute.InstanceTemplate, networkAttachmentName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { + if networkInterface.NetworkAttachment != "" && networkInterface.NetworkAttachment == networkAttachmentName { + return nil + } + } + return fmt.Errorf("Network Attachment %s, was not found in the instance template", networkAttachmentName) + } +} +{{- end }} + +func testAccCheckComputeInstanceTemplateHasInstanceResourcePolicies(instanceTemplate *compute.InstanceTemplate, resourcePolicy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + resourcePolicyActual := instanceTemplate.Properties.ResourcePolicies[0] + if resourcePolicyActual != resourcePolicy { + return fmt.Errorf("Wrong instance resource policy: expected %s, got %s", resourcePolicy, resourcePolicyActual) + } + + return nil + } + +} + +func testAccCheckComputeInstanceTemplateHasReservationAffinity(instanceTemplate *compute.InstanceTemplate, consumeReservationType string, specificReservationNames ...string) resource.TestCheckFunc { + if len(specificReservationNames) > 1 { + panic("too many specificReservationNames in test") + } + + return func(*terraform.State) error { + if instanceTemplate.Properties.ReservationAffinity == nil { + return fmt.Errorf("expected template to have reservation affinity, but it was nil") + } + + if actualReservationType := instanceTemplate.Properties.ReservationAffinity.ConsumeReservationType; actualReservationType != consumeReservationType { + return fmt.Errorf("Wrong reservationAffinity consumeReservationType: expected %s, got, %s", consumeReservationType, actualReservationType) + } + + if len(specificReservationNames) > 0 { + const reservationNameKey = "compute.googleapis.com/reservation-name" + if actualKey := instanceTemplate.Properties.ReservationAffinity.Key; actualKey != reservationNameKey { + return fmt.Errorf("Wrong reservationAffinity key: expected %s, got, %s", reservationNameKey, actualKey) + } + + reservationAffinityValues := instanceTemplate.Properties.ReservationAffinity.Values + if len(reservationAffinityValues) != 1 || reservationAffinityValues[0] != specificReservationNames[0] { + return fmt.Errorf("Wrong reservationAffinity values: expected %s, got, %s", specificReservationNames, reservationAffinityValues) + } + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateHasShieldedVmConfig(instanceTemplate *compute.InstanceTemplate, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) resource.TestCheckFunc { + + return func(s *terraform.State) error { + if instanceTemplate.Properties.ShieldedInstanceConfig.EnableSecureBoot != enableSecureBoot { + return fmt.Errorf("Wrong shieldedVmConfig enableSecureBoot: expected %t, got, %t", enableSecureBoot, instanceTemplate.Properties.ShieldedInstanceConfig.EnableSecureBoot) + } + + if instanceTemplate.Properties.ShieldedInstanceConfig.EnableVtpm != enableVtpm { + return fmt.Errorf("Wrong shieldedVmConfig enableVtpm: expected %t, got, %t", enableVtpm, instanceTemplate.Properties.ShieldedInstanceConfig.EnableVtpm) + } + + if instanceTemplate.Properties.ShieldedInstanceConfig.EnableIntegrityMonitoring != enableIntegrityMonitoring { + return fmt.Errorf("Wrong shieldedVmConfig enableIntegrityMonitoring: expected %t, got, %t", enableIntegrityMonitoring, instanceTemplate.Properties.ShieldedInstanceConfig.EnableIntegrityMonitoring) + } + return nil + } +} + +func testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(instanceTemplate *compute.InstanceTemplate, EnableConfidentialCompute bool, ConfidentialInstanceType string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + if instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { + return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute) + } + {{- if ne $.TargetVersionName "ga" }} + if instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { + return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType) + } + {{- end }} + + return nil + } +} + +func testAccCheckComputeInstanceTemplateLacksShieldedVmConfig(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.ShieldedInstanceConfig != nil { + return fmt.Errorf("Expected no shielded vm config") + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateHasDiskResourcePolicy(instanceTemplate *compute.InstanceTemplate, resourcePolicy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + resourcePolicyActual := instanceTemplate.Properties.Disks[0].InitializeParams.ResourcePolicies[0] + if resourcePolicyActual != resourcePolicy { + return fmt.Errorf("Wrong disk resource policy: expected %s, got %s", resourcePolicy, resourcePolicyActual) + } + + return nil + } +} + +func testAccCheckComputeInstanceTemplateNotRecreated(instanceTemplate *compute.InstanceTemplate, instanceTemplateUpdate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Id != instanceTemplateUpdate.Id { + return fmt.Errorf("The resource has been recreated: expected %d, got %d", instanceTemplate.Id, instanceTemplateUpdate.Id) + } + + return nil + } +} + +func testAccComputeInstanceTemplate_basic(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_withProviderDefaultLabels(suffix string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + env = "test" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_moveLabelToProvderDefaultLabels(suffix string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + env = "test" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_imageShorthand(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "tf-test-%s" + description = "description-test" + family = "family-test" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + labels = { + my-label = "my-label-value" + } + timeouts { + create = "5m" + } +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = google_compute_image.foobar.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix, suffix) +} + +func testAccComputeInstanceTemplate_preemptible(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = true + automatic_restart = false + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplate_maintenance_interval(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + maintenance_interval = "PERIODIC" + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix) +} +{{- end }} + +func testAccComputeInstanceTemplate_ip(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "tf-test-instance-template-%s" +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + access_config { + nat_ip = google_compute_address.foo.address + } + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeInstanceTemplate_ipv6(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "tf-test-instance-template-%s" +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "foo" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork-ipv6" { + name = "tf-test-subnetwork-%s" + + ip_cidr_range = "10.0.0.0/22" + region = "us-west2" + + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + + network = google_compute_network.foo.id +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-west2" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork-ipv6.name + stack_type = "IPV4_IPV6" + ipv6_access_config { + network_tier = "PREMIUM" + } + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix) +} + +func testAccComputeInstanceTemplate_networkTier(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + access_config { + network_tier = "STANDARD" + } + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_networkIP(suffix, networkIP string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + network_ip = "%s" + } + + metadata = { + foo = "bar" + } +} +`, suffix, networkIP) +} + +func testAccComputeInstanceTemplate_networkIPAddress(suffix, ipAddress string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + network_ip = "%s" + } + + metadata = { + foo = "bar" + } +} +`, suffix, ipAddress) +} + +func testAccComputeInstanceTemplate_disks(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "tf-test-instance-template-%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + labels = { + foo = "bar" + } + } + + disk { + source = google_compute_disk.foobar.name + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeInstanceTemplate_disksInvalid(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "tf-test-instance-template-%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = google_compute_disk.foobar.name + disk_size_gb = 50 + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeInstanceTemplate_with375GbScratchDisk(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.name + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + disk { + auto_delete = true + device_name = "test-local-ssd" + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + network_interface { + network = "default" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_with18TbScratchDisk(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "n2-standard-64" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.name + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + network_interface { + network = "default" + } +}`, suffix) +} + +func testAccComputeInstanceTemplate_regionDisks(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_disk" "foobar" { + name = "tf-test-instance-template-%s" + size = 10 + type = "pd-ssd" + region = "us-central1" + replica_zones = ["us-central1-a", "us-central1-f"] +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = google_compute_region_disk.foobar.name + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeInstanceTemplate_diskIops(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + provisioned_iops = 10000 + labels = { + foo = "bar" + } + } + + network_interface { + network = "default" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_subnet_auto(network, suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "auto-network" { + name = "%s" + auto_create_subnetworks = true +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = google_compute_network.auto-network.name + } + + metadata = { + foo = "bar" + } +} +`, network, suffix) +} + +func testAccComputeInstanceTemplate_subnet_custom(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_network" "network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.network.self_link +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeInstanceTemplate_subnet_xpn(org, billingId, projectName, suffix string) string { + return fmt.Sprintf(` +resource "google_project" "host_project" { + name = "Test Project XPN Host" + project_id = "%s-host" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "host_project" { + project = google_project.host_project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_shared_vpc_host_project" "host_project" { + project = google_project_service.host_project.project +} + +resource "google_project" "service_project" { + name = "Test Project XPN Service" + project_id = "%s-service" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "service_project" { + project = google_project.service_project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_shared_vpc_service_project" "service_project" { + host_project = google_compute_shared_vpc_host_project.host_project.project + service_project = google_project_service.service_project.project +} + +resource "google_compute_network" "network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + project = google_compute_shared_vpc_host_project.host_project.project +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.network.self_link + project = google_compute_shared_vpc_host_project.host_project.project +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + subnetwork_project = google_compute_subnetwork.subnetwork.project + } + + metadata = { + foo = "bar" + } + project = google_compute_shared_vpc_service_project.service_project.service_project +} +`, projectName, org, billingId, projectName, org, billingId, suffix, suffix, suffix) +} + +func testAccComputeInstanceTemplate_startup_script(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata = { + foo = "bar" + } + + network_interface { + network = "default" + } + + metadata_startup_script = "echo 'Hello'" +} +`, suffix) +} + +func testAccComputeInstanceTemplate_primaryAliasIpRange(i string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata = { + foo = "bar" + } + + network_interface { + network = "default" + alias_ip_range { + ip_cidr_range = "/24" + } + } +} +`, i) +} + +func testAccComputeInstanceTemplate_secondaryAliasIpRange(i string) string { + return fmt.Sprintf(` +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata = { + foo = "bar" + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + + // Note that unlike compute instances, instance templates seem to be + // only able to specify the netmask here. Trying a full CIDR string + // results in: + // Invalid value for field 'resource.properties.networkInterfaces[0].aliasIpRanges[0].ipCidrRange': + // '172.16.0.0/24'. Alias IP CIDR range must be a valid netmask starting with '/' (e.g. '/24') + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork.secondary_ip_range[0].range_name + ip_cidr_range = "/24" + } + } +} +`, i, i, i) +} + +func testAccComputeInstanceTemplate_guestAccelerator(i string, count uint8) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with guest accelerators do not support live migration. + on_host_maintenance = "TERMINATE" + } + + guest_accelerator { + count = %d + type = "nvidia-tesla-k80" + } +} +`, i, count) +} + +func testAccComputeInstanceTemplate_minCpuPlatform(i string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with guest accelerators do not support live migration. + on_host_maintenance = "TERMINATE" + } + + min_cpu_platform = "%s" +} +`, i, DEFAULT_MIN_CPU_TEST_VALUE) +} + +func testAccComputeInstanceTemplate_encryptionKMS(suffix, kmsLink string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + + disk { + source_image = data.google_compute_image.my_image.self_link + disk_encryption_key { + kms_key_self_link = "%s" + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix, kmsLink) +} + +func testAccComputeInstanceTemplate_soleTenantInstanceTemplate(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-standard-4" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + node_affinities { + key = "tfacc" + operator = "IN" + values = ["testinstancetemplate"] + } + + min_node_cpus = 2 + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_instanceResourcePolicyCollocated(suffix string, policyName string) string { + return fmt.Sprintf(` +resource "google_compute_resource_policy" "foo" { + name = "%s" + region = "us-central1" + group_placement_policy { + vm_count = 2 + collocation = "COLLOCATED" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-standard-4" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = false + } + + resource_policies = [google_compute_resource_policy.foo.self_link] + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, policyName, suffix) +} + +func testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, consumeReservationType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instancet-%s" + machine_type = "e2-medium" + can_ip_forward = false + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + reservation_affinity { + type = "%s" + } +} +`, templateName, consumeReservationType) +} + +func testAccComputeInstanceTemplate_reservationAffinityInstanceTemplate_specificReservation(templateName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instancet-%s" + machine_type = "e2-medium" + can_ip_forward = false + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + reservation_affinity { + type = "SPECIFIC_RESERVATION" + + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = ["%s"] + } + } +} +`, templateName, templateName) +} + +func testAccComputeInstanceTemplate_shieldedVmConfig(suffix string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + shielded_instance_config { + enable_secure_boot = %t + enable_vtpm = %t + enable_integrity_monitoring = %t + } +} +`, suffix, enableSecureBoot, enableVtpm, enableIntegrityMonitoring) +} + +func testAccComputeInstanceTemplateConfidentialInstanceConfigEnable(suffix string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "n2d-standard-2" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + enable_confidential_compute = true +{{- if ne $.TargetVersionName "ga" }} + confidential_instance_type = %q +{{- end }} + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} + +{{ if ne $.TargetVersionName `ga` -}} +resource "google_compute_instance_template" "foobar2" { + name = "tf-test-instance2-template-%s" + machine_type = "n2d-standard-2" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + enable_confidential_compute = true + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +{{- end }} +{{- if eq $.TargetVersionName "ga" }} +`, suffix) +{{- else }} +`, suffix, confidentialInstanceType, suffix) +{{- end }} +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplateConfidentialInstanceConfigNoEnable(suffix string, minCpuPlatform, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image2" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance_template" "foobar3" { + name = "tf-test-instance3-template-%s" + machine_type = "n2d-standard-2" + + disk { + source_image = data.google_compute_image.my_image2.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + min_cpu_platform = %q + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +resource "google_compute_instance_template" "foobar4" { + name = "tf-test-instance4-template-%s" + machine_type = "n2d-standard-2" + + disk { + source_image = data.google_compute_image.my_image2.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + min_cpu_platform = %q + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) +} +{{- end }} + +func testAccComputeInstanceTemplateAdvancedMachineFeatures(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "n2-standard-2" // Nested Virt isn't supported on E2 and N2Ds https://cloud.google.com/compute/docs/instances/nested-virtualization/overview#restrictions and https://cloud.google.com/compute/docs/instances/disabling-smt#limitations + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = true + visible_core_count = 1 + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplate_enableDisplay(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + enable_display = true +} +`, suffix) +} + +{{ end }} +func testAccComputeInstanceTemplate_invalidDiskType(suffix string) string { + return fmt.Sprintf(` +# Use this datasource insead of hardcoded values when https://github.com/hashicorp/terraform/issues/22679 +# is resolved. +# data "google_compute_image" "my_image" { +# family = "centos-7" +# project = "centos-cloud" +# } + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20210217" + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20210217" + auto_delete = true + type = "SCRATCH" + } + network_interface { + network = "default" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_imageResourceTest(diskName string, imageName string, imageDescription string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "my_disk" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "diskimage" { + name = "%s" + description = "%s" + source_disk = google_compute_disk.my_disk.self_link +} + +resource "google_compute_instance_template" "foobar" { + name_prefix = "tf-test-instance-template-" + machine_type = "e2-medium" + disk { + source_image = google_compute_image.diskimage.self_link + } + network_interface { + network = "default" + access_config {} + } +} +`, diskName, imageName, imageDescription) +} + +func testAccComputeInstanceTemplate_diskResourcePolicies(suffix string, policyName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.self_link + resource_policies = [google_compute_resource_policy.foo.id] + } + network_interface { + network = "default" + } + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + labels = { + my_label = "foobar" + } +} + +resource "google_compute_resource_policy" "foo" { + name = "%s" + region = "us-central1" + snapshot_schedule_policy { + schedule { + daily_schedule { + days_in_cycle = 1 + start_time = "04:00" + } + } + } +} +`, suffix, policyName) +} + +func testAccComputeInstanceTemplate_nictype(image, instance, nictype string) string { + return fmt.Sprintf(` +resource "google_compute_image" "example" { + name = "%s" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + + guest_os_features { + type = "SECURE_BOOT" + } + + guest_os_features { + type = "MULTI_IP_SUBNET" + } + + guest_os_features { + type = "GVNIC" + } +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = google_compute_image.example.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + nic_type = "%s" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, image, instance, nictype) +} + +func testAccComputeInstanceTemplate_queueCount(instanceTemplateName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + network_interface { + network = "default" + access_config {} + queue_count = 2 + } + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} +`, instanceTemplateName) +} + +func testAccComputeInstanceTemplate_managedEnvoy(suffix string) string { + return fmt.Sprintf(` +data "google_compute_default_service_account" "default" { +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + gce-software-declaration = <<-EOF + { + "softwareRecipes": [{ + "name": "install-gce-service-proxy-agent", + "desired_state": "INSTALLED", + "installSteps": [{ + "scriptRun": { + "script": "#! /bin/bash\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\nsudo gsutil cp gs://gce-service-proxy-"$ZONE"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz "$SERVICE_PROXY_AGENT_DIRECTORY" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz "$SERVICE_PROXY_AGENT_DIRECTORY"\nsudo tar -xzf "$SERVICE_PROXY_AGENT_DIRECTORY"/service-proxy-agent-0.2.tgz -C "$SERVICE_PROXY_AGENT_DIRECTORY"\n"$SERVICE_PROXY_AGENT_DIRECTORY"/service-proxy-agent/service-proxy-agent-bootstrap.sh" + } + }] + }] + } + EOF + gce-service-proxy = <<-EOF + { + "api-version": "0.2", + "proxy-spec": { + "proxy-port": 15001, + "network": "my-network", + "tracing": "ON", + "access-log": "/var/log/envoy/access.log" + } + "service": { + "serving-ports": [80, 81] + }, + "labels": { + "app_name": "bookserver_app", + "app_version": "STABLE" + } + } + EOF + enable-guest-attributes = "true" + enable-osconfig = "true" + + } + + service_account { + email = data.google_compute_default_service_account.default.email + scopes = ["cloud-platform"] + } + + labels = { + gce-service-proxy = "on" + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_spot(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = true + automatic_restart = false + provisioning_model = "SPOT" + instance_termination_action = "STOP" + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +func testAccComputeInstanceTemplate_spot_maxRunDuration(suffix string, instanceTerminationAction string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = true + automatic_restart = false + provisioning_model = "SPOT" + instance_termination_action = "%s" +{{- if ne $.TargetVersionName "ga" }} + max_run_duration { + nanos = 123 + seconds = 60 + } +{{- end }} + + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix, instanceTerminationAction) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 600 + } + on_instance_stop_action { + discard_local_ssd = true + } + + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +{{- end }} + +func testAccComputeInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplate_partnerMetadata(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +{{- end }} + +func testAccComputeInstanceTemplate_sourceSnapshotEncryptionKey(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_kms_key_ring" "ring" { + name = "%{kms_ring_name}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key" { + name = "%{kms_key_name}" + key_ring = data.google_kms_key_ring.ring.id +} + +resource "google_service_account" "test" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "KMS Ops Account" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = data.google_kms_crypto_key.key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${google_service_account.test.email}" +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk-%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot-%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + snapshot_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} + +resource "google_compute_instance_template" "template" { + name = "tf-test-instance-template-%{random_suffix}" + machine_type = "e2-medium" + + disk { + source_snapshot = google_compute_snapshot.snapshot.self_link + source_snapshot_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} +`, context) +} + +func testAccComputeInstanceTemplate_sourceImageEncryptionKey(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_kms_key_ring" "ring" { + name = "%{kms_ring_name}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key" { + name = "%{kms_key_name}" + key_ring = data.google_kms_key_ring.ring.id +} + +resource "google_service_account" "test" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "KMS Ops Account" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = data.google_kms_crypto_key.key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${google_service_account.test.email}" +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_image" "image" { + name = "debian-image" + source_image = data.google_compute_image.debian.self_link + image_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} + +resource "google_compute_instance_template" "template" { + name = "tf-test-instance-template-%{random_suffix}" + machine_type = "e2-medium" + + disk { + source_image = google_compute_image.image.self_link + source_image_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} +`, context) +} + +func testAccComputeInstanceTemplate_resourceManagerTags(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_tags_tag_key" "key" { + parent = "projects/%{project}" + short_name = "foobarbaz%{random_suffix}" + description = "For foo/bar resources." +} + +resource "google_tags_tag_value" "value" { + parent = "tagKeys/${google_tags_tag_key.key.name}" + short_name = "foo%{random_suffix}" + description = "For foo resources." +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + + network_interface { + network = "default" + } +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceTemplate_network_attachment(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + + +resource "google_compute_instance_template" "foobar" { + name = "tf-test-instance-template-%{suffix}" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "default" + } + + network_interface { + network_attachment = "%{network_attachment}" + } + + metadata = { + foo = "bar" + } +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl new file mode 100644 index 000000000000..3a9d5d2bc89b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -0,0 +1,9534 @@ +package compute_test + +import ( + {{- if ne $.TargetVersionName "ga" }} + "encoding/json" + {{- end }} + "fmt" + {{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" + {{- end }} + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestMinCpuPlatformDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "state: empty, conf: AUTOMATIC": { + Old: "", + New: "AUTOMATIC", + ExpectDiffSuppress: true, + }, + "state: empty, conf: automatic": { + Old: "", + New: "automatic", + ExpectDiffSuppress: true, + }, + "state: empty, conf: AuToMaTiC": { + Old: "", + New: "AuToMaTiC", + ExpectDiffSuppress: true, + }, + "state: empty, conf: Intel Haswell": { + Old: "", + New: "Intel Haswell", + ExpectDiffSuppress: false, + }, + // This case should never happen due to the field being + // Optional + Computed; however, including for completeness. + "state: Intel Haswell, conf: empty": { + Old: "Intel Haswell", + New: "", + ExpectDiffSuppress: false, + }, + // These cases should never happen given current API behavior; testing + // in case API behavior changes in the future. + "state: AUTOMATIC, conf: Intel Haswell": { + Old: "AUTOMATIC", + New: "Intel Haswell", + ExpectDiffSuppress: false, + }, + "state: Intel Haswell, conf: AUTOMATIC": { + Old: "Intel Haswell", + New: "AUTOMATIC", + ExpectDiffSuppress: false, + }, + "state: AUTOMATIC, conf: empty": { + Old: "AUTOMATIC", + New: "", + ExpectDiffSuppress: true, + }, + "state: automatic, conf: empty": { + Old: "automatic", + New: "", + ExpectDiffSuppress: true, + }, + "state: AuToMaTiC, conf: empty": { + Old: "AuToMaTiC", + New: "", + ExpectDiffSuppress: true, + }, + } + + for tn, tc := range cases { + if tpgcompute.ComputeInstanceMinCpuPlatformEmptyOrAutomaticDiffSuppress("min_cpu_platform", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Errorf("bad: %s, %q => %q expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + +func computeInstanceImportStep(zone, instanceName string, additionalImportIgnores []string) resource.TestStep { + // metadata is only read into state if set in the config + // importing doesn't know whether metadata.startup_script vs metadata_startup_script is set in the config, + // it always takes metadata.startup-script + ignores := []string{"metadata.%", "metadata.startup-script", "metadata_startup_script", "boot_disk.0.initialize_params.0.resource_manager_tags.%", "params.0.resource_manager_tags.%"} + + return resource.TestStep{ + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateId: fmt.Sprintf("%s/%s/%s", envvar.GetTestProjectFromEnv(), zone, instanceName), + ImportStateVerify: true, + ImportStateVerifyIgnore: append(ignores, additionalImportIgnores...), + } +} + +func TestAccComputeInstance_basic1(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasInstanceId(&instance, "google_compute_instance.foobar"), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceLabel(&instance, "my_key", "my_value"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceMetadata(&instance, "baz", "qux"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "current_status", "RUNNING"), + + // by default, DeletionProtection is implicitly false. This should be false on any + // instance resource without an explicit deletion_protection = true declaration. + // Other tests check explicit true/false configs: TestAccComputeInstance_deletionProtectionExplicit[True | False] + testAccCheckComputeInstanceHasConfiguredDeletionProtection(&instance, false), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"metadata.baz", "metadata.foo", "desired_status", "current_status", "labels", "terraform_labels"}), + }, + }) +} + +func TestAccComputeInstance_basic2(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic3(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic3(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic4(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic4(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_basic5(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic5(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTag(&instance, "foo"), + testAccCheckComputeInstanceMetadata(&instance, "foo", "bar"), + testAccCheckComputeInstanceDisk(&instance, instanceName, true, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_resourceManagerTags(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "instance_name": instanceName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_resourceManagerTags(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance)), + }, + { + Config: testAccComputeInstance_resourceManagerTagsUpdate(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance)), + }, + }, + }) +} + +func TestAccComputeInstance_machineTypeUrl(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var machineTypeUrl = "zones/us-central1-a/machineTypes/e2-medium" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_machineType(instanceName, machineTypeUrl), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "description", "old_desc"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_descriptionUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_description(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "description", "old_desc"), + ), + }, + { + Config: testAccComputeInstance_descriptionUpdate(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + resource.TestCheckResourceAttr("google_compute_instance.foobar", "description", "new_desc"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_IP(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var ipName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_ip(ipName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceAccessConfigHasNatIP(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_IPv6(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var ipName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var ptrName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_ipv6(ipName, instanceName, ptrName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceIpv6AccessConfigHasExternalIPv6(&instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstance_ipv6ExternalReservation(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_ipv6ExternalReservation(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-west2-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_internalIPv6(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var ipName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_internalIpv6(ipName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceIpv6AccessConfigHasInternalIPv6(&instance), + ), + }, + computeInstanceImportStep("us-west2-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_PTRRecord(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var ptrName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var ipName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_PTRRecord(ptrName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceAccessConfigHasPTR(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"metadata.baz", "metadata.foo"}), + { + Config: testAccComputeInstance_ip(ipName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceAccessConfigHasNatIP(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"metadata.baz", "metadata.foo"}), + }, + }) +} + +func TestAccComputeInstance_networkTier(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkTier(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceAccessConfigHasNatIP(&instance), + testAccCheckComputeInstanceHasAssignedNatIP, + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_diskEncryption(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bootEncryptionKey := "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + bootEncryptionKeyHash := "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=" + diskNameToEncryptionKey := map[string]*compute.CustomerEncryptionKey{ + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + RawKey: "Ym9vdDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=", + Sha256: "awJ7p57H+uVZ9axhJjl1D3lfC2MgA/wnt/z88Ltfvss=", + }, + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + RawKey: "c2Vjb25kNzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=", + Sha256: "7TpIwUdtCOJpq2m+3nt8GFgppu6a2Xsj1t0Gexk13Yc=", + }, + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + RawKey: "dGhpcmQ2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=", + Sha256: "b3pvaS7BjDbCKeLPPTx7yXBuQtxyMobCHN1QJR43xeM=", + }, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_disks_encryption(bootEncryptionKey, diskNameToEncryptionKey, instanceName, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance, bootEncryptionKeyHash, diskNameToEncryptionKey), + ), + }, + }, + }) +} + +func TestAccComputeInstance_diskEncryptionRestart(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bootEncryptionKey := "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + bootEncryptionKeyHash := "esTuF7d4eatX4cnc4JsiEiaI+Rff78JgPhA/v1zxX9E=" + diskNameToEncryptionKey := map[string]*compute.CustomerEncryptionKey{ + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + RawKey: "Ym9vdDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=", + Sha256: "awJ7p57H+uVZ9axhJjl1D3lfC2MgA/wnt/z88Ltfvss=", + }, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_disks_encryption_restart(bootEncryptionKey, diskNameToEncryptionKey, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance, bootEncryptionKeyHash, diskNameToEncryptionKey), + ), + }, + { + Config: testAccComputeInstance_disks_encryption_restartUpdate(bootEncryptionKey, diskNameToEncryptionKey, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDiskEncryptionKey("google_compute_instance.foobar", &instance, bootEncryptionKeyHash, diskNameToEncryptionKey), + ), + }, + }, + }) +} + +func TestAccComputeInstance_kmsDiskEncryption(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKey(t) + + bootKmsKeyName := kms.CryptoKey.Name + diskNameToEncryptionKey := map[string]*compute.CustomerEncryptionKey{ + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + KmsKeyName: kms.CryptoKey.Name, + }, + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + KmsKeyName: kms.CryptoKey.Name, + }, + fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)): { + KmsKeyName: kms.CryptoKey.Name, + }, + } + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_disks_kms(bootKmsKeyName, diskNameToEncryptionKey, instanceName, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDiskKmsEncryptionKey("google_compute_instance.foobar", &instance, bootKmsKeyName, diskNameToEncryptionKey), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_resourcePolicyUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var scheduleName1 = fmt.Sprintf("tf-tests-%s", acctest.RandString(t, 10)) + var scheduleName2 = fmt.Sprintf("tf-tests-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_instanceSchedule(instanceName, scheduleName1), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeResourcePolicy(&instance, "", 0), + ), + }, + // check adding + { + Config: testAccComputeInstance_addResourcePolicy(instanceName, scheduleName1), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeResourcePolicy(&instance, scheduleName1, 1), + ), + }, + // check updating + { + Config: testAccComputeInstance_updateResourcePolicy(instanceName, scheduleName1, scheduleName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeResourcePolicy(&instance, scheduleName2, 1), + ), + }, + // check removing + { + Config: testAccComputeInstance_removeResourcePolicy(instanceName, scheduleName1, scheduleName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeResourcePolicy(&instance, "", 0), + ), + }, + }, + }) +} + +func TestAccComputeInstance_attachedDisk(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attachedDisk(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_attachedDisk_sourceUrl(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attachedDisk_sourceUrl(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_attachedDisk_modeRo(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attachedDisk_modeRo(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_attachedDiskUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + var diskName2 = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attachedDisk(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + // check attaching + { + Config: testAccComputeInstance_addAttachedDisk(diskName, diskName2, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + testAccCheckComputeInstanceDisk(&instance, diskName2, false, false), + ), + }, + // check detaching + { + Config: testAccComputeInstance_detachDisk(diskName, diskName2, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + // check updating + { + Config: testAccComputeInstance_updateAttachedDiskEncryptionKey(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceDisk(&instance, diskName, false, false), + ), + }, + }, + }) +} + +func TestAccComputeInstance_bootDisk_source(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootDisk_source(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceBootDisk(&instance, diskName), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_bootDisk_sourceUrl(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootDisk_sourceUrl(diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceBootDisk(&instance, diskName), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_bootDisk_type(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskType = "pd-ssd" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootDisk_type(instanceName, diskType), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceBootDiskType(t, instanceName, diskType), + ), + }, + }, + }) +} + +func TestAccComputeInstance_bootDisk_mode(t *testing.T) { + t.Parallel() + + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskMode = "READ_WRITE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_bootDisk_mode(instanceName, diskMode), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_with375GbScratchDisk(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_with375GbScratchDisk(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScratchDisk(&instance, []map[string]string{ + { + "interface": "NVME", + }, + { + "interface": "SCSI", + }, + { + "interface": "NVME", + "deviceName": "nvme-local-ssd", + }, + { + "interface": "SCSI", + "deviceName": "scsi-local-ssd", + }, + }), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_with18TbScratchDisk(t *testing.T) { + // Skip this test until the quota for the GitHub presubmit GCP project is increased + // to handle the size of the resource this test spins up. + t.Skip() + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_with18TbScratchDisk(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScratchDisk(&instance, []map[string]string{ + { + "interface": "NVME", + }, + { + "interface": "NVME", + }, + { + "interface": "NVME", + }, + { + "interface": "NVME", + }, + { + "interface": "NVME", + }, + { + "interface": "NVME", + }, + }), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_forceNewAndChangeMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_forceNewAndChangeMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "qux", "true"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_update(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceLabel(&instance, "only_me", "nothing_else"), + testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceAccessConfig(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_stopInstanceToUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + // Set fields that require stopping the instance + { + Config: testAccComputeInstance_stopInstanceToUpdate(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + // Check that updating them works + { + Config: testAccComputeInstance_stopInstanceToUpdate2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + // Check that removing them works + { + Config: testAccComputeInstance_stopInstanceToUpdate3(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_serviceAccount(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_serviceAccount(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/compute.readonly"), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/devstorage.read_only"), + testAccCheckComputeInstanceServiceAccount(&instance, + "https://www.googleapis.com/auth/userinfo.email"), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_serviceAccount_update0(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 0), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_serviceAccount_update01(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 0), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_serviceAccount_update02(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 0), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_serviceAccount_update3(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 3), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_serviceAccount_update01(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 0), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_serviceAccount_update4(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 1), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_serviceAccount_update01(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceScopes(&instance, 0), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_scheduling(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_scheduling(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + { + Config: testAccComputeInstance_schedulingUpdated(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_advancedMachineFeatures(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_advancedMachineFeatures(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_advancedMachineFeaturesUpdated(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_soleTenantNodeAffinities(t *testing.T) { + t.Parallel() + + var instanceName = fmt.Sprintf("tf-test-soletenant-%s", acctest.RandString(t, 10)) + var templateName = fmt.Sprintf("tf-test-nodetmpl-%s", acctest.RandString(t, 10)) + var groupName = fmt.Sprintf("tf-test-nodegroup-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_withoutNodeAffinities(instanceName, templateName, groupName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_soleTenantNodeAffinities(instanceName, templateName, groupName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_soleTenantNodeAffinitiesUpdated(instanceName, templateName, groupName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_soleTenantNodeAffinitiesReduced(instanceName, templateName, groupName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + + +func TestAccComputeInstance_reservationAffinities(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-resaffinity-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_reservationAffinity_nonSpecificReservationConfig(instanceName, "NO_RESERVATION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasReservationAffinity(&instance, "NO_RESERVATION"), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + { + Config: testAccComputeInstance_reservationAffinity_nonSpecificReservationConfig(instanceName, "ANY_RESERVATION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasReservationAffinity(&instance, "ANY_RESERVATION"), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + { + Config: testAccComputeInstance_reservationAffinity_specificReservationConfig(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasReservationAffinity(&instance, "SPECIFIC_RESERVATION", instanceName), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_subnet_auto(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnet_auto(acctest.RandString(t, 10), instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_subnet_custom(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnet_custom(acctest.RandString(t, 10), instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_subnet_xpn(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + org := envvar.GetTestOrgFromEnv(t) + billingId := envvar.GetTestBillingAccountFromEnv(t) + projectName := fmt.Sprintf("tf-test-xpn-%d", time.Now().Unix()) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnet_xpn(org, billingId, projectName, instanceName, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExistsInProject( + t, "google_compute_instance.foobar", fmt.Sprintf("%s-service", projectName), + &instance), + testAccCheckComputeInstanceHasSubnet(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_networkIPAuto(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkIPAuto(acctest.RandString(t, 10), instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAnyNetworkIP(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_network_ip_custom(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var ipAddress = "10.0.200.200" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_network_ip_custom(acctest.RandString(t, 10), instanceName, ipAddress), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkIP(&instance, ipAddress), + ), + }, + }, + }) +} + +func TestAccComputeInstance_private_image_family(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + var familyName = fmt.Sprintf("tf-testf-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_private_image_family(diskName, familyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_networkPerformanceConfig(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-testd-%s", acctest.RandString(t, 10)) + var imageName = fmt.Sprintf("tf-testf-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkPerformanceConfig(imageName, diskName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkPerformanceConfig(&instance, "DEFAULT"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_forceChangeMachineTypeManually(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceUpdateMachineType(t, "google_compute_instance.foobar"), + ), + ExpectNonEmptyPlan: true, + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"metadata.baz", "metadata.foo", "desired_status", "current_status", "labels", "terraform_labels"}), + }, + }) +} + +func TestAccComputeInstance_multiNic(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_multiNic(instanceName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMultiNic(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_nictype_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nictype(instanceName, instanceName, "GVNIC"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_nictype(instanceName, instanceName, "VIRTIO_NET"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_guestAccelerator(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_guestAccelerator(instanceName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasGuestAccelerator(&instance, "nvidia-tesla-t4", 1), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"metadata.baz", "metadata.foo"}), + }, + }) + +} + +func TestAccComputeInstance_guestAcceleratorSkip(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_guestAccelerator(instanceName, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLacksGuestAccelerator(&instance), + ), + }, + }, + }) + +} + +func TestAccComputeInstance_minCpuPlatform(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_minCpuPlatform(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMinCpuPlatform(&instance, "Intel Haswell"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_minCpuPlatform_remove(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMinCpuPlatform(&instance, ""), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_deletionProtectionExplicitFalse(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic_deletionProtectionFalse(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasConfiguredDeletionProtection(&instance, false), + ), + }, + }, + }) +} + +func TestAccComputeInstance_deletionProtectionExplicitTrueAndUpdateFalse(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic_deletionProtectionTrue(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasConfiguredDeletionProtection(&instance, true), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"metadata.foo"}), + // Update deletion_protection to false, otherwise the test harness can't delete the instance + { + Config: testAccComputeInstance_basic_deletionProtectionFalse(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasConfiguredDeletionProtection(&instance, false), + ), + }, + }, + }) +} + +func TestAccComputeInstance_primaryAliasIpRange(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_primaryAliasIpRange(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "", "/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_secondaryAliasIpRange(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + networkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_secondaryAliasIpRange(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "inst-test-secondary", "172.16.0.0/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + { + Config: testAccComputeInstance_secondaryAliasIpRangeUpdate(networkName, subnetName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAliasIpRange(&instance, "", "10.0.1.0/24"), + ), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_hostname(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_hostname(instanceName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_compute_instance.foobar", "hostname"), + testAccCheckComputeInstanceLacksShieldedVmConfig(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_shieldedVmConfig(t *testing.T) { + t.Parallel() + + var instance compute.Instance + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_shieldedVmConfig(instanceName, true, true, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasShieldedVmConfig(&instance, true, true, true), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_shieldedVmConfig(instanceName, true, true, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasShieldedVmConfig(&instance, true, true, false), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instance compute.Instance + {{- if ne $.TargetVersionName "ga" }} + var instance2 compute.Instance + {{- end }} + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceConfidentialInstanceConfigEnable(instanceName, "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), + {{- if ne $.TargetVersionName "ga" }} + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar2", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, true, ""), + {{- end }} + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeInstanceConfidentialInstanceConfigNoEnable(instanceName, "AMD Milan", "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar3", &instance), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, false, "SEV_SNP"), + testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar4", &instance2), + testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), + ), + }, + {{- end }} + }, + }) +} + +func TestAccComputeInstance_confidentialHyperDiskBootDisk(t *testing.T) { + t.Parallel() + kms := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-bootstrap-hyperdisk-key1") + + context_1 := map[string]interface{}{ + "instance_name": fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "confidential_compute": true, + "key_ring": kms.KeyRing.Name, + "key_name": kms.CryptoKey.Name, + "zone": "us-central1-a", + "machine_type": "n2-standard-16", + + } + + context_2 := map[string]interface{}{ + "instance_name": context_1["instance_name"], + "confidential_compute": false, + "key_ring" : context_1["key_ring"], + "key_name": context_1["key_name"], + "zone": context_1["zone"], + "machine_type": "c3d-standard-16", + } + + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceConfidentialHyperDiskBootDisk(context_1), + }, + computeInstanceImportStep(context_1["zone"].(string), context_1["instance_name"].(string), []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstanceConfidentialHyperDiskBootDisk(context_2), + }, + computeInstanceImportStep(context_2["zone"].(string), context_2["instance_name"].(string), []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_hyperdiskBootDisk_provisioned_iops_throughput(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "instance_name": fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + "zone": "us-central1-a", + "provisioned_iops": 12000, + "provisioned_throughput": 200, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstanceHyperDiskBootDiskProvisionedIopsThroughput(context), + }, + computeInstanceImportStep(context["zone"].(string), context["instance_name"].(string), []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_enableDisplay(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_enableDisplay(instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_enableDisplayUpdated(instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_desiredStatusOnCreation(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + ExpectError: regexp.MustCompile("When creating an instance, desired_status can only accept RUNNING value"), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "RUNNING", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_desiredStatusUpdateBasic(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "RUNNING", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "RUNNING", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_desiredStatusTerminatedUpdateFields(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_desiredStatusTerminatedUpdate(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMetadata( + &instance, "bar", "baz"), + testAccCheckComputeInstanceLabel(&instance, "only_me", "nothing_else"), + testAccCheckComputeInstanceTag(&instance, "baz"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateRunning_desiredStatusRunning_allowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "RUNNING", true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + }, + }) +} + +const errorAllowStoppingMsg = "please set allow_stopping_for_update" + +func TestAccComputeInstance_updateRunning_desiredStatusNotSet_notAllowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "", false), + ExpectError: regexp.MustCompile(errorAllowStoppingMsg), + }, + }, + }) +} + +func TestAccComputeInstance_updateRunning_desiredStatusRunning_notAllowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "RUNNING", false), + ExpectError: regexp.MustCompile(errorAllowStoppingMsg), + }, + }, + }) +} + +func TestAccComputeInstance_updateRunning_desiredStatusTerminated_allowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "TERMINATED", true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateRunning_desiredStatusTerminated_notAllowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateTerminated_desiredStatusNotSet_allowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "", true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateTerminated_desiredStatusTerminated_allowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "TERMINATED", true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateTerminated_desiredStatusNotSet_notAllowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateTerminated_desiredStatusTerminated_notAllowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateTerminated_desiredStatusRunning_allowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "RUNNING", true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_updateTerminated_desiredStatusRunning_notAllowStoppingForUpdate(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_basic2(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-medium", "TERMINATED", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate(instanceName, "e2-standard-2", "RUNNING", false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasMachineType(&instance, "e2-standard-2"), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + }, + }) +} + +func TestAccComputeInstance_resourcePolicyCollocate(t *testing.T) { + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_resourcePolicyCollocate(instanceName, acctest.RandString(t, 10)), + }, + computeInstanceImportStep("us-east4-b", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_subnetworkUpdate(t *testing.T) { + t.Parallel() + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_subnetworkUpdate(suffix, instanceName), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_subnetworkUpdateTwo(suffix, instanceName), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_subnetworkUpdate(suffix, instanceName), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_queueCount(t *testing.T) { + t.Parallel() + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_queueCountSet(instanceName), + }, + computeInstanceImportStep("us-east1-d", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + + +func TestAccComputeInstance_spotVM(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_spotVM(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + + +func TestAccComputeInstance_spotVM_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_scheduling(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + { + Config: testAccComputeInstance_spotVM(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstance_maxRunDuration_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-mrd-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_standardVM_maxRunDurationUpdated + expectedMaxRunDuration.Nanos = 456 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_standardVM_maxRunDuration(instanceName, "STOP"), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_standardVM_maxRunDurationUpdated(instanceName, "STOP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, "STOP"), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func TestAccComputeInstance_standardVM_maxRunDuration_stopTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_standardVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_standardVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_localSsdVM_maxRunDuration_stopTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_localSsdVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 180 + var instanceTerminationAction = "STOP" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_localSsdVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_spotVM_maxRunDuration_deleteTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_spotVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_spotVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_standardVM_maxRunDuration_deleteTerminationAction(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeInstance_standardVM_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + var instanceTerminationAction = "DELETE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_standardVM_maxRunDuration(instanceName, instanceTerminationAction), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceTerminationAction(&instance, instanceTerminationAction), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_spotVM_maxRunDuration_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + // Define in testAccComputeInstance_spotVM_maxRunDuration + var expectedMaxRunDuration = compute.Duration{} + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_scheduling(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + { + Config: testAccComputeInstance_spotVM_maxRunDuration(instanceName, "DELETE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMaxRunDuration(&instance, expectedMaxRunDuration), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} +{{- end }} + +func TestAccComputeInstance_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + // Define in testAccComputeInstance_localSsdRecoveryTimeout + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 3600 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_localSsdRecoveryTimeout(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLocalSsdRecoveryTimeout(&instance, expectedLocalSsdRecoveryTimeout), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_localSsdRecoveryTimeout_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + // Define in testAccComputeInstance_localSsdRecoveryTimeout + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 3600 + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_scheduling(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + { + Config: testAccComputeInstance_localSsdRecoveryTimeout(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLocalSsdRecoveryTimeout(&instance, expectedLocalSsdRecoveryTimeout), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstance_partnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_partnerMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{fmt.Sprintf("partner_metadata.%s", namespace)}), + }, + }) +} + +func TestAccComputeInstance_partnerMetadata_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_partnerMetadata_empty(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + + { + Config: testAccComputeInstance_partnerMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{fmt.Sprintf("partner_metadata.%s", namespace)}), + }, + }) +} + +func TestAccComputeInstance_partnerMetadata_deletePartnerMetadata(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_partnerMetadata(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstancePartnerMetadata(&instance, expectedPartnerMetadata), + ), + }, + + { + Config: testAccComputeInstance_partnerMetadata_empty(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{fmt.Sprintf("partner_metadata.%s", namespace)}), + }, + }) +} +{{- end }} + +func TestAccComputeInstance_metadataStartupScript_update(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_metadataStartupScript(instanceName, "e2-medium", "abc"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + Config: testAccComputeInstance_metadataStartupScript(instanceName, "e2-standard-4", "xyz"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_regionBootDisk(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var diskName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_regionBootDisk(instanceName, diskName, suffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.regional_vm_instance", &instance), + testAccCheckComputeInstanceBootDisk(&instance, diskName), + ), + + }, + }, + }) +} + +func TestAccComputeInstance_creationOnlyAttributionLabel(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attributionLabelCreate(instanceName, "true", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLabel(&instance, "user_label", "foo"), + testAccCheckComputeInstanceAttributionLabel(&instance, true), + ), + }, + { + Config: testAccComputeInstance_attributionLabelUpdate(instanceName, "true", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLabel(&instance, "user_label", "bar"), + testAccCheckComputeInstanceAttributionLabel(&instance, true), + ), + }, + }, + }) +} + +func TestAccComputeInstance_creationOnlyAttributionLabelConfiguredOnUpdate(t *testing.T) { + // VCR tests cache provider configuration between steps, this test changes provider configuration and fails under VCR. + acctest.SkipIfVcr(t) + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attributionLabelCreate(instanceName, "false", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLabel(&instance, "user_label", "foo"), + testAccCheckComputeInstanceAttributionLabel(&instance, false), + ), + }, + { + Config: testAccComputeInstance_attributionLabelUpdate(instanceName, "true", "CREATION_ONLY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLabel(&instance, "user_label", "bar"), + testAccCheckComputeInstanceAttributionLabel(&instance, false), + ), + }, + }, + }) +} + +func TestAccComputeInstance_proactiveAttributionLabel(t *testing.T) { + // VCR tests cache provider configuration between steps, this test changes provider configuration and fails under VCR. + acctest.SkipIfVcr(t) + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_attributionLabelCreate(instanceName, "false", "PROACTIVE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLabel(&instance, "user_label", "foo"), + testAccCheckComputeInstanceAttributionLabel(&instance, false), + ), + }, + { + Config: testAccComputeInstance_attributionLabelUpdate(instanceName, "true", "PROACTIVE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceLabel(&instance, "user_label", "bar"), + testAccCheckComputeInstanceAttributionLabel(&instance, true), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +// The tests related to security_policy use network_edge_security_service resource +// which can only exist one per region. Because of that, all the following tests must run serially. +func TestAccComputeInstanceNetworkIntefaceWithSecurityPolicy(t *testing.T) { + testCases := map[string]func(t *testing.T){ + "two_access_config": testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigs, + "two_nics_access_config_with_empty_nil_security_policy": testAccComputeInstance_nic_securityPolicyCreateWithEmptyAndNullSecurityPolicies, + "two_nics_two_access_configs_update_one_policy": testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateOnlyOnePolicy, + "two_access_config_update_policy_with_stopped_machine": testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateSecurityPoliciesWithStoppedMachine, + "two_nics_two_access_configs_update_remove_access_config": testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateRemoveAccessConfig, + "two_nics_two_access_configs_update_two_policies": testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateSwapPolicies, + "access_config_update_access_config": testAccComputeInstance_nic_securityPolicyCreateWithAccessConfigUpdateAccessConfig, + "wit_no_access_config": testAccComputeInstance_nic_securityPolicyCreateWithoutAccessConfig, + } + + for name, tc := range testCases { + // shadow the tc variable into scope so that when + // the loop continues, if t.Run hasn't executed tc(t) + // yet, we don't have a race condition + // see https://github.com/golang/go/wiki/CommonMistakes#using-goroutines-on-loop-iterator-variables + tc := tc + t.Run(name, func(t *testing.T) { + tc(t) + }) + } +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigs(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithOneNicAndTwoAccessConfigs(suffix, policyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithEmptyAndNullSecurityPolicies(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndAccessConfigsWithEmptyAndNullSecurityPolicies(suffix, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasNoSecurityPolicy(&instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateOnlyOnePolicy(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var policyName2 = fmt.Sprintf("tf-test-policy2-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance2.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName2), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateSecurityPoliciesWithStoppedMachine(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policyName, instanceName, "\"\"", "RUNNING"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "RUNNING"), + ), + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policyName, instanceName, "\"\"", "TERMINATED"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasStatus(&instance, "TERMINATED"), + ), + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policyName, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "TERMINATED"), + ExpectError: regexp.MustCompile("Error to update security policy because the current instance status must be \"RUNNING\". The security policy or some access config may have changed which requires the security policy to be re-applied"), + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateRemoveAccessConfig(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var policyName2 = fmt.Sprintf("tf-test-policy2-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPoliciesRemoveAccessConfig(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateSwapPolicies(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var policyName2 = fmt.Sprintf("tf-test-policy2-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "\"\"", "\"\""), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance.self_link", "google_compute_region_security_policy.policyforinstance2.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName2), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "google_compute_region_security_policy.policyforinstance2.self_link", "google_compute_region_security_policy.policyforinstance.self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName2), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policyName, policyName2, instanceName, "\"\"", "\"\""), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasNoSecurityPolicy(&instance), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithAccessConfigUpdateAccessConfig(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithOneNicAndTwoAccessConfigs(suffix, policyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateAccessConfig(suffix, policyName, instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(&instance, policyName), + ), + }, + { + ResourceName: "google_compute_instance.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsRemoveAccessConfig(suffix, policyName, instanceName), + ExpectError: regexp.MustCompile("Error setting security policy to the instance since at least one access config must exist"), + }, + }, + }) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithoutAccessConfig(t *testing.T) { + var instanceName = fmt.Sprintf("tf-test-instance-%s", acctest.RandString(t, 10)) + var policyName = fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + var suffix = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsRemoveAccessConfig(suffix, policyName, instanceName), + ExpectError: regexp.MustCompile("Error setting security policy to the instance since at least one access config must exist"), + }, + }, + }) +} + +{{ end }} + +func testAccCheckComputeInstanceUpdateMachineType(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + op, err := config.NewComputeClient(config.UserAgent).Instances.Stop(config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return fmt.Errorf("Could not stop instance: %s", err) + } + err = tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "Waiting on stop", config.UserAgent, 20*time.Minute) + if err != nil { + return fmt.Errorf("Could not stop instance: %s", err) + } + + machineType := compute.InstancesSetMachineTypeRequest{ + MachineType: "zones/us-central1-a/machineTypes/f1-micro", + } + + op, err = config.NewComputeClient(config.UserAgent).Instances.SetMachineType( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"], &machineType).Do() + if err != nil { + return fmt.Errorf("Could not change machine type: %s", err) + } + err = tpgcompute.ComputeOperationWaitTime(config, op, config.Project, "Waiting machine type change", config.UserAgent, 20*time.Minute) + if err != nil { + return fmt.Errorf("Could not change machine type: %s", err) + } + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeInstance_NetworkAttachment(t *testing.T) { + t.Parallel() + suffix := fmt.Sprintf("%s", acctest.RandString(t, 10)) + var instance compute.Instance + + testNetworkAttachmentName := fmt.Sprintf("tf-test-network-attachment-%s", suffix) + + // Need to have the full network attachment name in the format project/{project_id}/regions/{region_id}/networkAttachments/{testNetworkAttachmentName} + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), testNetworkAttachmentName) + + context := map[string]interface{}{ + "suffix": (acctest.RandString(t, 10)), + "network_attachment_name": testNetworkAttachmentName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkAttachment(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasNetworkAttachment(&instance, fmt.Sprintf("https://www.googleapis.com/compute/beta/%s", fullFormNetworkAttachmentName)), + ), + }, + }, + }) +} + +func TestAccComputeInstance_NetworkAttachmentUpdate(t *testing.T) { + t.Parallel() + suffix := acctest.RandString(t, 10) + envRegion := envvar.GetTestRegionFromEnv() + instanceName := fmt.Sprintf("tf-test-compute-instance-%s", suffix) + + networkAttachmentSelflink1 := "google_compute_network_attachment.test_network_attachment_1.self_link" + networkAttachmentSelflink2 := "google_compute_network_attachment.test_network_attachment_2.self_link" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_networkAttachmentUpdate(networkAttachmentSelflink1, envRegion, suffix), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_networkAttachmentUpdate(networkAttachmentSelflink2, envRegion, suffix), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_networkAttachmentUpdate(networkAttachmentSelflink1, envRegion, suffix), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} +{{- end }} + +func TestAccComputeInstance_NicStackTypeUpdate(t *testing.T) { + t.Parallel() + suffix := acctest.RandString(t, 10) + envRegion := envvar.GetTestRegionFromEnv() + instanceName := fmt.Sprintf("tf-test-compute-instance-%s", suffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_nicStackTypeUpdate(suffix, envRegion, "IPV4_ONLY", instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_nicStackTypeUpdate(suffix, envRegion, "IPV4_IPV6", instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + { + Config: testAccComputeInstance_nicStackTypeUpdate(suffix, envRegion, "IPV4_ONLY", instanceName), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{"allow_stopping_for_update"}), + }, + }) +} + +func testAccCheckComputeInstanceDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance" { + continue + } + + _, err := config.NewComputeClient(config.UserAgent).Instances.Get( + config.Project, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("Instance still exists") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceExists(t *testing.T, n string, instance interface{}) resource.TestCheckFunc { + if instance == nil { + panic("Attempted to check existence of Instance that was nil.") + } + + return testAccCheckComputeInstanceExistsInProject(t, n, envvar.GetTestProjectFromEnv(), instance.(*compute.Instance)) +} + +func testAccCheckComputeInstanceExistsInProject(t *testing.T, n, p string, instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + {{- if eq $.TargetVersionName "ga" }} + found, err := config.NewComputeClient(config.UserAgent).Instances.Get( + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).Do() + {{- else }} + found, err := config.NewComputeClient(config.UserAgent).Instances.Get( + p, rs.Primary.Attributes["zone"], rs.Primary.Attributes["name"]).View("FULL").Do() + {{- end }} + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("Instance not found") + } + + *instance = *found + + return nil + } +} + +func testAccCheckComputeInstanceMetadata( + instance *compute.Instance, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instance.Metadata.Items { + if k != item.Key { + continue + } + + if item.Value != nil && v == *item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, *item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeInstanceAccessConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if len(i.AccessConfigs) == 0 { + return fmt.Errorf("no access_config") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasNatIP(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + +func testAccCheckComputeInstanceIpv6AccessConfigHasExternalIPv6(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + for _, c := range i.Ipv6AccessConfigs { + if c.ExternalIpv6 == "" { + return fmt.Errorf("no External IPv6") + } + } + } + + return nil + } +} + +func testAccCheckComputeInstanceIpv6AccessConfigHasInternalIPv6(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.Ipv6Address == "" { + return fmt.Errorf("no internal IPv6 address") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceAccessConfigHasPTR(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.PublicPtrDomainName == "" { + return fmt.Errorf("no PTR Record") + } + } + } + + return nil + } +} + +func testAccCheckComputeResourcePolicy(instance *compute.Instance, scheduleName string, resourcePolicyCountWant int) resource.TestCheckFunc { + return func(s *terraform.State) error { + resourcePoliciesCountHave := len(instance.ResourcePolicies) + if resourcePoliciesCountHave != resourcePolicyCountWant { + return fmt.Errorf("number of resource polices does not match: have: %d; want: %d", resourcePoliciesCountHave, resourcePolicyCountWant) + } + + if resourcePoliciesCountHave == 1 && !strings.Contains(instance.ResourcePolicies[0], scheduleName) { + return fmt.Errorf("got the wrong schedule: have: %s; want: %s", instance.ResourcePolicies[0], scheduleName) + } + + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceMaxRunDuration(instance *compute.Instance, instanceMaxRunDurationWant compute.Duration) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance == nil { + return fmt.Errorf("instance is nil") + } + if instance.Scheduling == nil { + return fmt.Errorf("no scheduling") + } + + if !reflect.DeepEqual(*instance.Scheduling.MaxRunDuration, instanceMaxRunDurationWant) { + return fmt.Errorf("got the wrong instance max run duration action: have: %#v; want: %#v",instance.Scheduling.MaxRunDuration, instanceMaxRunDurationWant) + } + + return nil + } +} +{{- end }} + +func testAccCheckComputeInstanceLocalSsdRecoveryTimeout(instance *compute.Instance, instanceLocalSsdRecoveryTiemoutWant compute.Duration) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance == nil { + return fmt.Errorf("instance is nil") + } + if instance.Scheduling == nil { + return fmt.Errorf("no scheduling") + } + + if !reflect.DeepEqual(*instance.Scheduling.LocalSsdRecoveryTimeout, instanceLocalSsdRecoveryTiemoutWant) { + return fmt.Errorf("got the wrong instance local ssd recovery timeout action: have: %#v; want: %#v",instance.Scheduling.LocalSsdRecoveryTimeout, instanceLocalSsdRecoveryTiemoutWant) + } + + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstancePartnerMetadata(instance *compute.Instance, expectedPartnerMetadata map[string]compute.StructuredEntries) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance == nil { + return fmt.Errorf("instance is nil") + } + if instance.PartnerMetadata == nil { + return fmt.Errorf("no partner metadata") + } + expectedPartnerMetadataMap := make(map[string]interface{}) + acutalPartnerMetadataMap := make(map[string]interface{}) + for key, value := range instance.PartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + acutalPartnerMetadataMap[key] = jsonMap + } + for key, value := range expectedPartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + expectedPartnerMetadataMap[key] = jsonMap + } + if !reflect.DeepEqual(acutalPartnerMetadataMap, expectedPartnerMetadataMap) { + return fmt.Errorf("got the wrong instance partne metadata action: have: %+v; want: %+v", acutalPartnerMetadataMap, expectedPartnerMetadataMap) + } + return nil + + } +} +{{- end }} + +func testAccCheckComputeInstanceTerminationAction(instance *compute.Instance, instanceTerminationActionWant string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance == nil { + return fmt.Errorf("instance is nil") + } + if instance.Scheduling == nil { + return fmt.Errorf("no scheduling") + } + + if instance.Scheduling.InstanceTerminationAction != instanceTerminationActionWant { + return fmt.Errorf("got the wrong instance termniation action: have: %s; want: %s",instance.Scheduling.InstanceTerminationAction, instanceTerminationActionWant) + } + + return nil + } +} + +func testAccCheckComputeInstanceDisk(instance *compute.Instance, source string, delete bool, boot bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Disks == nil { + return fmt.Errorf("no disks") + } + + for _, disk := range instance.Disks { + if strings.HasSuffix(disk.Source, "/"+source) && disk.AutoDelete == delete && disk.Boot == boot { + return nil + } + } + + return fmt.Errorf("Disk not found: %s", source) + } +} + +func testAccCheckComputeInstanceHasInstanceId(instance *compute.Instance, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + remote := fmt.Sprintf("%d", instance.Id) + local := rs.Primary.Attributes["instance_id"] + + if remote != local { + return fmt.Errorf("Instance id stored does not match: remote has %#v but local has %#v", remote, + local) + } + + return nil + } +} + +func testAccCheckComputeInstanceBootDisk(instance *compute.Instance, source string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Disks == nil { + return fmt.Errorf("no disks") + } + + for _, disk := range instance.Disks { + if disk.Boot == true { + if strings.HasSuffix(disk.Source, source) { + return nil + } + } + } + + return fmt.Errorf("Boot disk not found with source %q", source) + } +} + +func testAccCheckComputeInstanceBootDiskType(t *testing.T, instanceName string, diskType string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + // boot disk is named the same as the Instance + disk, err := config.NewComputeClient(config.UserAgent).Disks.Get(config.Project, "us-central1-a", instanceName).Do() + if err != nil { + return err + } + if strings.Contains(disk.Type, diskType) { + return nil + } + + return fmt.Errorf("Boot disk not found with type %q", diskType) + } +} + +func testAccCheckComputeInstanceScratchDisk(instance *compute.Instance, interfaces []map[string]string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Disks == nil { + return fmt.Errorf("no disks") + } + + i := 0 + for _, disk := range instance.Disks { + if disk.Type == "SCRATCH" { + if i >= len(interfaces) { + return fmt.Errorf("Expected %d scratch disks, found more", len(interfaces)) + } + if disk.Interface != interfaces[i]["interface"] { + return fmt.Errorf("Mismatched interface on scratch disk #%d, expected: %q, found: %q", + i, interfaces[i], disk.Interface) + } + if deviceName, ok := interfaces[i]["deviceName"]; ok { + if disk.DeviceName != deviceName { + return fmt.Errorf("Mismatched device name on scratch disk #%d, expected: %q, found: %q", + i, deviceName, disk.DeviceName) + } + } + + i++ + } + } + + if i != len(interfaces) { + return fmt.Errorf("Expected %d scratch disks, found %d", len(interfaces), i) + } + + return nil + } +} + +func testAccCheckComputeInstanceDiskEncryptionKey(n string, instance *compute.Instance, bootDiskEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + for i, disk := range instance.Disks { + if disk.Boot { + attr := rs.Primary.Attributes["boot_disk.0.disk_encryption_key_sha256"] + if attr != bootDiskEncryptionKey { + return fmt.Errorf("Boot disk has wrong encryption key in state.\nExpected: %s\nActual: %s", bootDiskEncryptionKey, attr) + } + if disk.DiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: ", i, attr) + } + if disk.DiskEncryptionKey != nil && attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: %+v", + i, attr, disk.DiskEncryptionKey.Sha256) + } + } else { + if disk.DiskEncryptionKey != nil { + expectedKey := diskNameToEncryptionKey[tpgresource.GetResourceNameFromSelfLink(disk.Source)].Sha256 + if disk.DiskEncryptionKey.Sha256 != expectedKey { + return fmt.Errorf("Disk %d has unexpected encryption key in GCP.\nExpected: %s\nActual: %s", i, expectedKey, disk.DiskEncryptionKey.Sha256) + } + } + } + } + + numAttachedDisks, err := strconv.Atoi(rs.Primary.Attributes["attached_disk.#"]) + if err != nil { + return fmt.Errorf("Error converting value of attached_disk.#") + } + for i := 0; i < numAttachedDisks; i++ { + diskName := tpgresource.GetResourceNameFromSelfLink(rs.Primary.Attributes[fmt.Sprintf("attached_disk.%d.source", i)]) + encryptionKey := rs.Primary.Attributes[fmt.Sprintf("attached_disk.%d.disk_encryption_key_sha256", i)] + if key, ok := diskNameToEncryptionKey[diskName]; ok { + expectedEncryptionKey := key.Sha256 + if encryptionKey != expectedEncryptionKey { + return fmt.Errorf("Attached disk %d has unexpected encryption key in state.\nExpected: %s\nActual: %s", i, expectedEncryptionKey, encryptionKey) + } + } + } + return nil + } +} + +func testAccCheckComputeInstanceDiskKmsEncryptionKey(n string, instance *compute.Instance, bootDiskEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + for i, disk := range instance.Disks { + if disk.Boot { + attr := rs.Primary.Attributes["boot_disk.0.kms_key_self_link"] + if attr != bootDiskEncryptionKey { + return fmt.Errorf("Boot disk has wrong encryption key in state.\nExpected: %s\nActual: %s", bootDiskEncryptionKey, attr) + } + if disk.DiskEncryptionKey == nil && attr != "" { + return fmt.Errorf("Disk %d has mismatched encryption key.\nTF State: %+v\nGCP State: ", i, attr) + } + } else { + if disk.DiskEncryptionKey != nil { + expectedKey := diskNameToEncryptionKey[tpgresource.GetResourceNameFromSelfLink(disk.Source)].KmsKeyName + // The response for crypto keys often includes the version of the key which needs to be removed + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + actualKey := strings.Split(disk.DiskEncryptionKey.KmsKeyName, "/cryptoKeyVersions")[0] + if actualKey != expectedKey { + return fmt.Errorf("Disk %d has unexpected encryption key in GCP.\nExpected: %s\nActual: %s", i, expectedKey, actualKey) + } + } + } + } + + numAttachedDisks, err := strconv.Atoi(rs.Primary.Attributes["attached_disk.#"]) + if err != nil { + return fmt.Errorf("Error converting value of attached_disk.#") + } + for i := 0; i < numAttachedDisks; i++ { + diskName := tpgresource.GetResourceNameFromSelfLink(rs.Primary.Attributes[fmt.Sprintf("attached_disk.%d.source", i)]) + kmsKeyName := rs.Primary.Attributes[fmt.Sprintf("attached_disk.%d.kms_key_self_link", i)] + if key, ok := diskNameToEncryptionKey[diskName]; ok { + expectedEncryptionKey := key.KmsKeyName + if kmsKeyName != expectedEncryptionKey { + return fmt.Errorf("Attached disk %d has unexpected encryption key in state.\nExpected: %s\nActual: %s", i, expectedEncryptionKey, kmsKeyName) + } + } + } + return nil + } +} + +func testAccCheckComputeInstanceTag(instance *compute.Instance, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instance.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + +func testAccCheckComputeInstanceLabel(instance *compute.Instance, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Labels == nil { + return fmt.Errorf("no labels found on instance %s", instance.Name) + } + + v, ok := instance.Labels[key] + if !ok { + return fmt.Errorf("No label found with key %s on instance %s", key, instance.Name) + } + if v != value { + return fmt.Errorf("Expected value '%s' but found value '%s' for label '%s' on instance %s", value, v, key, instance.Name) + } + + return nil + } +} + +func testAccCheckComputeInstanceAttributionLabel(instance *compute.Instance, present bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Labels == nil { + if present { + return fmt.Errorf("no labels found on instance %s", instance.Name) + } + return nil + } + + _, ok := instance.Labels["goog-terraform-provisioned"] + if ok { + if !present { + return fmt.Errorf("Attribution label found on instance %s", instance.Name) + } + } + + return nil + } +} + +func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + for _, val := range instance.ServiceAccounts[0].Scopes { + if val == scope { + return nil + } + } + + return fmt.Errorf("Scope not found: %s", scope) + } +} + +func testAccCheckComputeInstanceScopes(instance *compute.Instance, scopeCount int) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if count := len(instance.ServiceAccounts); count == 0 { + if scopeCount == 0 { + return nil + } else { + return fmt.Errorf("Scope count expected: %s, but got %s", fmt.Sprint(scopeCount), fmt.Sprint(count)) + } + } else { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + if scount := len(instance.ServiceAccounts[0].Scopes); scount == scopeCount { + return nil + } else { + return fmt.Errorf("Scope count expected: %s, but got %s", fmt.Sprint(scopeCount), fmt.Sprint(scount)) + } + } + } +} + +func testAccCheckComputeInstanceHasSubnet(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasAnyNetworkIP(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP == "" { + return fmt.Errorf("no network_ip") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasNetworkIP(instance *compute.Instance, networkIP string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP != networkIP { + return fmt.Errorf("Wrong network_ip found: expected %v, got %v", networkIP, i.NetworkIP) + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasNetworkPerformanceConfig(instance *compute.Instance, bandwidthTier string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.NetworkPerformanceConfig == nil { + return fmt.Errorf("Expected instance to have network performance config, but it was nil") + } + if instance.NetworkPerformanceConfig.TotalEgressBandwidthTier != bandwidthTier { + return fmt.Errorf("Incorrect network_performance_config.total_egress_bandwidth_tier found: expected %v, got %v", bandwidthTier, instance.NetworkPerformanceConfig.TotalEgressBandwidthTier) + } + + return nil + } +} + +func testAccCheckComputeInstanceHasMultiNic(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instance.NetworkInterfaces) < 2 { + return fmt.Errorf("only saw %d nics", len(instance.NetworkInterfaces)) + } + + return nil + } +} + +func testAccCheckComputeInstanceHasGuestAccelerator(instance *compute.Instance, acceleratorType string, acceleratorCount int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instance.GuestAccelerators) != 1 { + return fmt.Errorf("Expected only one guest accelerator") + } + + if !strings.HasSuffix(instance.GuestAccelerators[0].AcceleratorType, acceleratorType) { + return fmt.Errorf("Wrong accelerator type: expected %v, got %v", acceleratorType, instance.GuestAccelerators[0].AcceleratorType) + } + + if instance.GuestAccelerators[0].AcceleratorCount != acceleratorCount { + return fmt.Errorf("Wrong accelerator acceleratorCount: expected %d, got %d", acceleratorCount, instance.GuestAccelerators[0].AcceleratorCount) + } + + return nil + } +} + +func testAccCheckComputeInstanceLacksGuestAccelerator(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instance.GuestAccelerators) > 0 { + return fmt.Errorf("Expected no guest accelerators") + } + + return nil + } +} + +func testAccCheckComputeInstanceHasMinCpuPlatform(instance *compute.Instance, minCpuPlatform string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.MinCpuPlatform != minCpuPlatform { + return fmt.Errorf("Wrong minimum CPU platform: expected %s, got %s", minCpuPlatform, instance.MinCpuPlatform) + } + + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceHasNetworkAttachment(instance *compute.Instance, networkAttachmentName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + if networkInterface.NetworkAttachment != "" && networkInterface.NetworkAttachment == networkAttachmentName { + return nil + } + } + return fmt.Errorf("Network Attachment %s, was not found in the instance template", networkAttachmentName) + } +} +{{- end }} + +func testAccCheckComputeInstanceHasMachineType(instance *compute.Instance, machineType string) resource.TestCheckFunc { + return func(s *terraform.State) error { + instanceMachineType := tpgresource.GetResourceNameFromSelfLink(instance.MachineType) + if instanceMachineType != machineType { + return fmt.Errorf("Wrong machine type: expected %s, got %s", machineType, instanceMachineType) + } + + return nil + } +} + +func testAccCheckComputeInstanceHasAliasIpRange(instance *compute.Instance, subnetworkRangeName, iPCidrRange string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + for _, aliasIpRange := range networkInterface.AliasIpRanges { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + return nil + } + } + } + + return fmt.Errorf("Alias ip range with name %s and cidr %s not present", subnetworkRangeName, iPCidrRange) + } +} + +func testAccCheckComputeInstanceHasAssignedNatIP(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_instance" { + continue + } + ip := rs.Primary.Attributes["network_interface.0.access_config.0.nat_ip"] + if ip == "" { + return fmt.Errorf("No assigned NatIP for instance %s", rs.Primary.Attributes["name"]) + } + } + return nil +} + +func testAccCheckComputeInstanceHasConfiguredDeletionProtection(instance *compute.Instance, configuredDeletionProtection bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.DeletionProtection != configuredDeletionProtection { + return fmt.Errorf("Wrong deletion protection flag: expected %t, got %t", configuredDeletionProtection, instance.DeletionProtection) + } + + return nil + } +} + +func testAccCheckComputeInstanceHasReservationAffinity(instance *compute.Instance, reservationType string, specificReservationNames ...string) resource.TestCheckFunc { + if len(specificReservationNames) > 1 { + panic("too many specificReservationNames provided in test") + } + + return func(*terraform.State) error { + if instance.ReservationAffinity == nil { + return fmt.Errorf("expected instance to have reservation affinity, but it was nil") + } + + if instance.ReservationAffinity.ConsumeReservationType != reservationType { + return fmt.Errorf("Wrong reservationAffinity consumeReservationType: expected %s, got, %s", reservationType, instance.ReservationAffinity.ConsumeReservationType) + } + + if len(specificReservationNames) > 0 { + const reservationNameKey = "compute.googleapis.com/reservation-name" + if instance.ReservationAffinity.Key != reservationNameKey { + return fmt.Errorf("Wrong reservationAffinity key: expected %s, got, %s", reservationNameKey, instance.ReservationAffinity.Key) + } + if len(instance.ReservationAffinity.Values) != 1 || instance.ReservationAffinity.Values[0] != specificReservationNames[0] { + return fmt.Errorf("Wrong reservationAffinity values: expected %s, got, %s", specificReservationNames, instance.ReservationAffinity.Values) + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasShieldedVmConfig(instance *compute.Instance, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) resource.TestCheckFunc { + + return func(s *terraform.State) error { + if instance.ShieldedInstanceConfig.EnableSecureBoot != enableSecureBoot { + return fmt.Errorf("Wrong shieldedVmConfig enableSecureBoot: expected %t, got, %t", enableSecureBoot, instance.ShieldedInstanceConfig.EnableSecureBoot) + } + + if instance.ShieldedInstanceConfig.EnableVtpm != enableVtpm { + return fmt.Errorf("Wrong shieldedVmConfig enableVtpm: expected %t, got, %t", enableVtpm, instance.ShieldedInstanceConfig.EnableVtpm) + } + + if instance.ShieldedInstanceConfig.EnableIntegrityMonitoring != enableIntegrityMonitoring { + return fmt.Errorf("Wrong shieldedVmConfig enableIntegrityMonitoring: expected %t, got, %t", enableIntegrityMonitoring, instance.ShieldedInstanceConfig.EnableIntegrityMonitoring) + } + return nil + } +} + +func testAccCheckComputeInstanceHasConfidentialInstanceConfig(instance *compute.Instance, EnableConfidentialCompute bool, ConfidentialInstanceType string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + if instance.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { + return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instance.ConfidentialInstanceConfig.EnableConfidentialCompute) + } + {{- if ne $.TargetVersionName "ga" }} + if instance.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { + return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instance.ConfidentialInstanceConfig.ConfidentialInstanceType) + } + {{- end }} + + return nil + } +} + +func testAccCheckComputeInstanceLacksShieldedVmConfig(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.ShieldedInstanceConfig != nil { + return fmt.Errorf("Expected no shielded vm config") + } + + return nil + } +} + +func testAccCheckComputeInstanceHasStatus(instance *compute.Instance, status string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Status != status { + return fmt.Errorf("Instance has not status %s, status: %s", status, instance.Status) + } + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeInstanceNicAccessConfigHasSecurityPolicy(instance *compute.Instance, securityPolicy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + for _, accessConfig := range networkInterface.AccessConfigs { + if strings.Contains(accessConfig.SecurityPolicy, securityPolicy) { + return nil + } + } + + for _, accessConfigIpv6 := range networkInterface.Ipv6AccessConfigs { + if strings.Contains(accessConfigIpv6.SecurityPolicy, securityPolicy) { + return nil + } + } + } + + return fmt.Errorf("Security Policy with name %s not present", securityPolicy) + } +} + +func testAccCheckComputeInstanceNicAccessConfigHasNoSecurityPolicy(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instance.NetworkInterfaces { + for _, accessConfig := range networkInterface.AccessConfigs { + if accessConfig.SecurityPolicy != "" { + return fmt.Errorf("Security Policy with name %s is present", accessConfig.SecurityPolicy) + } + + } + + for _, accessConfigIpv6 := range networkInterface.Ipv6AccessConfigs { + if accessConfigIpv6.SecurityPolicy != "" { + return fmt.Errorf("Security Policy with name %s is present", accessConfigIpv6.SecurityPolicy) + } + } + } + + return nil + } +} +{{- end }} + +func testAccComputeInstance_basic(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + desired_status = "RUNNING" + + //deletion_protection = false is implicit in this config due to default value + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + baz = "qux" + startup-script = "echo Hello" + } + + labels = { + my_key = "my_value" + my_other_key = "my_other_value" + } +} +`, instance) +} + +func testAccComputeInstance_basic2(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, instance) +} + +func testAccComputeInstance_basic3(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, instance) +} + +func testAccComputeInstance_basic4(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, instance) +} + +func testAccComputeInstance_basic5(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, instance) +} + +func testAccComputeInstance_machineType(instance string, machineType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "%s" + zone = "us-central1-a" + description = "old_desc" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, instance, machineType) +} + +func testAccComputeInstance_description(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + description = "old_desc" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, instance) +} + +func testAccComputeInstance_descriptionUpdate(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + description = "new_desc" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, instance) +} + +func testAccComputeInstance_resourceManagerTags(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_tags_tag_key" "key" { + parent = "projects/%{project}" + short_name = "foobarbaz%{random_suffix}" + description = "For foo/bar resources." +} + +resource "google_tags_tag_value" "value" { + parent = "tagKeys/${google_tags_tag_key.key.name}" + short_name = "foo%{random_suffix}" + description = "For foo resources." +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + } + + params { + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + + network_interface { + network = "default" + } +} +`, context) +} + +func testAccComputeInstance_resourceManagerTagsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_tags_tag_key" "key" { + parent = "projects/%{project}" + short_name = "foobarbaz%{random_suffix}" + description = "For foo/bar resources." +} + +resource "google_tags_tag_value" "value" { + parent = "tagKeys/${google_tags_tag_key.key.name}" + short_name = "foo%{random_suffix}" + description = "For foo resources." +} + +resource "google_tags_tag_key" "key_new" { + parent = "projects/%{project}" + short_name = "foobarbaznew%{random_suffix}" + description = "New key for foo/bar resources." +} + +resource "google_tags_tag_value" "value_new" { + parent = "tagKeys/${google_tags_tag_key.key_new.name}" + short_name = "foonew%{random_suffix}" + description = "New value for foo resources." +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + } + + params { + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + "tagKeys/${google_tags_tag_key.key_new.name}" = "tagValues/${google_tags_tag_value.value_new.name}" + } + } + + network_interface { + network = "default" + } +} +`, context) +} + +func testAccComputeInstance_basic_deletionProtectionFalse(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + deletion_protection = false + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, instance) +} + +func testAccComputeInstance_basic_deletionProtectionTrue(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + deletion_protection = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} +`, instance) +} + +// Update zone to ForceNew, and change metadata k/v entirely +// Generates diff mismatch +func testAccComputeInstance_forceNewAndChangeMetadata(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-b" + tags = ["baz"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + access_config { + } + } + + metadata = { + qux = "true" + } +} +`, instance) +} + +// Update metadata, tags, and network_interface +func testAccComputeInstance_update(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = true + tags = ["baz"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + access_config { + } + } + + metadata = { + bar = "baz" + startup-script = "echo Hello" + } + + labels = { + only_me = "nothing_else" + } +} +`, instance) +} + +func testAccComputeInstance_ip(ip, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_address" "foo" { + name = "%s" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + access_config { + nat_ip = google_compute_address.foo.address + } + } + + metadata = { + foo = "bar" + } +} +`, ip, instance) +} + +func testAccComputeInstance_ipv6(ip, instance, record string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_subnetwork" "subnetwork-ipv6" { + name = "%s-subnetwork" + + ip_cidr_range = "10.0.0.0/22" + region = "us-west2" + + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + + network = google_compute_network.custom-test.id +} + +resource "google_compute_network" "custom-test" { + name = "%s-network" + auto_create_subnetworks = false +} + +resource "google_compute_address" "foo" { + name = "%s" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-west2-a" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork-ipv6.name + stack_type = "IPV4_IPV6" + ipv6_access_config { + network_tier = "PREMIUM" + public_ptr_domain_name = "%s.gcp.tfacc.hashicorptest.com." + } + } + + metadata = { + foo = "bar" + } +} +`, instance, instance, ip, instance, record) +} + +func testAccComputeInstance_internalIpv6(ip, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_subnetwork" "subnetwork_ipv6" { + name = "%s-subnetwork" + + ip_cidr_range = "10.0.0.0/22" + region = "us-west2" + + stack_type = "IPV4_IPV6" + ipv6_access_type = "INTERNAL" + + network = google_compute_network.custom-test.id + } + + resource "google_compute_network" "custom-test" { + name = "%s-network" + enable_ula_internal_ipv6 = true + auto_create_subnetworks = false + } + + resource "google_compute_address" "ipv6" { + name = "%s" + region = "us-west2" + address_type = "INTERNAL" + purpose = "GCE_ENDPOINT" + subnetwork = google_compute_subnetwork.subnetwork_ipv6.id + ip_version = "IPV6" + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-west2-a" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork_ipv6.name + stack_type = "IPV4_IPV6" + ipv6_address = google_compute_address.ipv6.address + } + + metadata = { + foo = "bar" + } + } + `, instance, instance, ip, instance) +} + +func testAccComputeInstance_ipv6ExternalReservation(instance string) string { + return fmt.Sprintf(` +resource "google_compute_address" "ipv6-address" { + region = "us-west2" + name = "%s-address" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnetwork-ipv6.name +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_subnetwork" "subnetwork-ipv6" { + name = "%s-subnetwork" + + ip_cidr_range = "10.0.0.0/22" + region = "us-west2" + + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + + network = google_compute_network.custom-test.id +} + +resource "google_compute_network" "custom-test" { + name = "%s-network" + auto_create_subnetworks = false +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-west2-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork-ipv6.name + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + } +} +`, instance, instance, instance, instance) +} + +func testAccComputeInstance_PTRRecord(record, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + access_config { + public_ptr_domain_name = "%s.gcp.tfacc.hashicorptest.com." + } + } + + metadata = { + foo = "bar" + } +} +`, instance, record) +} + +func testAccComputeInstance_networkTier(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + access_config { + network_tier = "STANDARD" + } + } +} +`, instance) +} + +func testAccComputeInstance_disks_encryption(bootEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey, instance, suffix string) string { + diskNames := []string{} + for k := range diskNameToEncryptionKey { + diskNames = append(diskNames, k) + } + sort.Strings(diskNames) + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + raw_key = "%s" + } +} + +resource "google_compute_disk" "foobar2" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + raw_key = "%s" + } +} + +resource "google_compute_disk" "foobar3" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + raw_key = "%s" + } +} + +resource "google_compute_disk" "foobar4" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + disk_encryption_key_raw = "%s" + } + + attached_disk { + source = google_compute_disk.foobar.self_link + disk_encryption_key_raw = "%s" + } + + attached_disk { + source = google_compute_disk.foobar2.self_link + disk_encryption_key_raw = "%s" + } + + attached_disk { + source = google_compute_disk.foobar4.self_link + } + + attached_disk { + source = google_compute_disk.foobar3.self_link + disk_encryption_key_raw = "%s" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + allow_stopping_for_update = true +} +`, diskNames[0], diskNameToEncryptionKey[diskNames[0]].RawKey, + diskNames[1], diskNameToEncryptionKey[diskNames[1]].RawKey, + diskNames[2], diskNameToEncryptionKey[diskNames[2]].RawKey, + "tf-testd-"+suffix, + instance, bootEncryptionKey, + diskNameToEncryptionKey[diskNames[0]].RawKey, diskNameToEncryptionKey[diskNames[1]].RawKey, diskNameToEncryptionKey[diskNames[2]].RawKey) +} + +func testAccComputeInstance_disks_encryption_restart(bootEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey, instance string) string { + diskNames := []string{} + for k := range diskNameToEncryptionKey { + diskNames = append(diskNames, k) + } + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + raw_key = "%s" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + disk_encryption_key_raw = "%s" + } + + attached_disk { + source = google_compute_disk.foobar.self_link + disk_encryption_key_raw = "%s" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + allow_stopping_for_update = true +} +`, diskNames[0], diskNameToEncryptionKey[diskNames[0]].RawKey, + instance, bootEncryptionKey, + diskNameToEncryptionKey[diskNames[0]].RawKey) +} + +func testAccComputeInstance_disks_encryption_restartUpdate(bootEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey, instance string) string { + diskNames := []string{} + for k := range diskNameToEncryptionKey { + diskNames = append(diskNames, k) + } + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + raw_key = "%s" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-standard-2" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + disk_encryption_key_raw = "%s" + } + + attached_disk { + source = google_compute_disk.foobar.self_link + disk_encryption_key_raw = "%s" + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } + + allow_stopping_for_update = true +} +`, diskNames[0], diskNameToEncryptionKey[diskNames[0]].RawKey, + instance, bootEncryptionKey, + diskNameToEncryptionKey[diskNames[0]].RawKey) +} + +func testAccComputeInstance_disks_kms(bootEncryptionKey string, diskNameToEncryptionKey map[string]*compute.CustomerEncryptionKey, instance, suffix string) string { + diskNames := []string{} + for k := range diskNameToEncryptionKey { + diskNames = append(diskNames, k) + } + sort.Strings(diskNames) + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + kms_key_self_link = "%s" + } +} + +resource "google_compute_disk" "foobar2" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + kms_key_self_link = "%s" + } +} + +resource "google_compute_disk" "foobar3" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + + disk_encryption_key { + kms_key_self_link = "%s" + } +} + +resource "google_compute_disk" "foobar4" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + kms_key_self_link = "%s" + } + + attached_disk { + source = google_compute_disk.foobar.self_link + kms_key_self_link = "%s" + } + + attached_disk { + source = google_compute_disk.foobar2.self_link + kms_key_self_link = "%s" + } + + attached_disk { + source = google_compute_disk.foobar4.self_link + } + + attached_disk { + source = google_compute_disk.foobar3.self_link + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, diskNames[0], diskNameToEncryptionKey[diskNames[0]].KmsKeyName, + diskNames[1], diskNameToEncryptionKey[diskNames[1]].KmsKeyName, + diskNames[2], diskNameToEncryptionKey[diskNames[2]].KmsKeyName, + "tf-testd-"+suffix, + instance, bootEncryptionKey, + diskNameToEncryptionKey[diskNames[0]].KmsKeyName, diskNameToEncryptionKey[diskNames[1]].KmsKeyName) +} + +func testAccComputeInstance_instanceSchedule(instance, schedule string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_resource_policy" "instance_schedule" { + name = "%s" + region = "us-central1" + instance_schedule_policy { + vm_start_schedule { + schedule = "1 1 1 1 1" + } + vm_stop_schedule { + schedule = "2 2 2 2 2" + } + time_zone = "UTC" + } +} +`, instance, schedule) +} + +func testAccComputeInstance_addResourcePolicy(instance, schedule string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + resource_policies = [google_compute_resource_policy.instance_schedule.self_link] +} + +resource "google_compute_resource_policy" "instance_schedule" { + name = "%s" + region = "us-central1" + instance_schedule_policy { + vm_start_schedule { + schedule = "1 1 1 1 1" + } + vm_stop_schedule { + schedule = "2 2 2 2 2" + } + time_zone = "UTC" + } +} +`, instance, schedule) +} + +func testAccComputeInstance_updateResourcePolicy(instance, schedule1, schedule2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + resource_policies = [google_compute_resource_policy.instance_schedule2.self_link] +} + +resource "google_compute_resource_policy" "instance_schedule" { + name = "%s" + region = "us-central1" + instance_schedule_policy { + vm_start_schedule { + schedule = "1 1 1 1 1" + } + vm_stop_schedule { + schedule = "2 2 2 2 2" + } + time_zone = "UTC" + } +} + +resource "google_compute_resource_policy" "instance_schedule2" { + name = "%s" + region = "us-central1" + instance_schedule_policy { + vm_start_schedule { + schedule = "2 2 2 2 2" + } + vm_stop_schedule { + schedule = "3 3 3 3 3" + } + time_zone = "UTC" + } +} +`, instance, schedule1, schedule2) +} + +func testAccComputeInstance_removeResourcePolicy(instance, schedule1, schedule2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + resource_policies = null +} + +resource "google_compute_resource_policy" "instance_schedule" { + name = "%s" + region = "us-central1" + instance_schedule_policy { + vm_start_schedule { + schedule = "1 1 1 1 1" + } + vm_stop_schedule { + schedule = "2 2 2 2 2" + } + time_zone = "UTC" + } +} + +resource "google_compute_resource_policy" "instance_schedule2" { + name = "%s" + region = "us-central1" + instance_schedule_policy { + vm_start_schedule { + schedule = "2 2 2 2 2" + } + vm_stop_schedule { + schedule = "3 3 3 3 3" + } + time_zone = "UTC" + } +} +`, instance, schedule1, schedule2) +} + +func testAccComputeInstance_attachedDisk(disk, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.name + } + + network_interface { + network = "default" + } +} +`, disk, instance) +} + +func testAccComputeInstance_attachedDisk_sourceUrl(disk, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.self_link + } + + network_interface { + network = "default" + } +} +`, disk, instance) +} + +func testAccComputeInstance_attachedDisk_modeRo(disk, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.self_link + mode = "READ_ONLY" + } + + network_interface { + network = "default" + } +} +`, disk, instance) +} + +func testAccComputeInstance_addAttachedDisk(disk, disk2, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "foobar2" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.name + } + + attached_disk { + source = google_compute_disk.foobar2.self_link + } + + network_interface { + network = "default" + } +} +`, disk, disk2, instance) +} + +func testAccComputeInstance_detachDisk(disk, disk2, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_disk" "foobar2" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.name + } + + network_interface { + network = "default" + } +} +`, disk, disk2, instance) +} + +func testAccComputeInstance_updateAttachedDiskEncryptionKey(disk, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" + disk_encryption_key { + raw_key = "c2Vjb25kNzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + attached_disk { + source = google_compute_disk.foobar.name + disk_encryption_key_raw = "c2Vjb25kNzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI" + } + + network_interface { + network = "default" + } +} +`, disk, instance) +} + +func testAccComputeInstance_bootDisk_source(disk, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + source = google_compute_disk.foobar.name + } + + network_interface { + network = "default" + } +} +`, disk, instance) +} + +func testAccComputeInstance_bootDisk_sourceUrl(disk, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + source = google_compute_disk.foobar.self_link + } + + network_interface { + network = "default" + } +} +`, disk, instance) +} + +func testAccComputeInstance_bootDisk_type(instance string, diskType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + type = "%s" + } + } + + network_interface { + network = "default" + } +} +`, instance, diskType) +} + +func testAccComputeInstance_bootDisk_mode(instance string, diskMode string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + type = "pd-ssd" + } + + mode = "%s" + } + + network_interface { + network = "default" + } +} +`, instance, diskMode) +} + +func testAccComputeInstance_with375GbScratchDisk(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + scratch_disk { + interface = "NVME" + } + + scratch_disk { + interface = "SCSI" + } + + scratch_disk { + interface = "NVME" + device_name = "nvme-local-ssd" + } + + scratch_disk { + interface = "SCSI" + device_name = "scsi-local-ssd" + } + + network_interface { + network = "default" + } +} +`, instance) +} + +func testAccComputeInstance_with18TbScratchDisk(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n2-standard-64" // must be a large n2 to be paired with 18Tb local-ssd + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + scratch_disk { + interface = "NVME" + size = 3000 + } + + scratch_disk { + interface = "NVME" + size = 3000 + } + + scratch_disk { + interface = "NVME" + size = 3000 + } + + scratch_disk { + interface = "NVME" + size = 3000 + } + + scratch_disk { + interface = "NVME" + size = 3000 + } + + scratch_disk { + interface = "NVME" + size = 3000 + } + + network_interface { + network = "default" + } +}`, instance) +} + +func testAccComputeInstance_serviceAccount(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + "storage-ro", + ] + } +} +`, instance) +} + +func testAccComputeInstance_serviceAccount_update0(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_serviceAccount_update01(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = [] + } + allow_stopping_for_update = true +} + +data "google_compute_default_service_account" "default" { +} +`, instance) +} + +func testAccComputeInstance_serviceAccount_update02(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + email = data.google_compute_default_service_account.default.email + scopes = [] + } + allow_stopping_for_update = true +} + +data "google_compute_default_service_account" "default" { +} +`, instance) +} + +func testAccComputeInstance_serviceAccount_update3(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + "storage-ro", + ] + } + + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_serviceAccount_update4(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + network_interface { + network = "default" + } + service_account { + scopes = [ + "userinfo-email", + ] + } + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_scheduling(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + } +} +`, instance) +} + +func testAccComputeInstance_schedulingUpdated(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + preemptible = true + } +} +`, instance) +} + +func testAccComputeInstance_advancedMachineFeatures(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-10" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-2" // Nested Virt isn't supported on E2 and N2Ds https://cloud.google.com/compute/docs/instances/nested-virtualization/overview#restrictions and https://cloud.google.com/compute/docs/instances/disabling-smt#limitations + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + allow_stopping_for_update = true + +} +`, instance) +} + +func testAccComputeInstance_advancedMachineFeaturesUpdated(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-10" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-2" // Nested Virt isn't supported on E2 and N2Ds https://cloud.google.com/compute/docs/instances/nested-virtualization/overview#restrictions and https://cloud.google.com/compute/docs/instances/disabling-smt#limitations + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = true + visible_core_count = 1 + } + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_subnet_auto(suffix, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + + auto_create_subnetworks = true +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.inst-test-network.name + access_config { + } + } +} +`, suffix, instance) +} + +func testAccComputeInstance_subnet_custom(suffix, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.inst-test-network.self_link +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + access_config { + } + } +} +`, suffix, suffix, instance) +} + +func testAccComputeInstance_subnet_xpn(org, billingId, projectName, instance, suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_project" "host_project" { + name = "Test Project XPN Host" + project_id = "%s-host" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "host_project" { + project = google_project.host_project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_shared_vpc_host_project" "host_project" { + project = google_project_service.host_project.project +} + +resource "google_project" "service_project" { + name = "Test Project XPN Service" + project_id = "%s-service" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "service_project" { + project = google_project.service_project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_shared_vpc_service_project" "service_project" { + host_project = google_compute_shared_vpc_host_project.host_project.project + service_project = google_project_service.service_project.project +} + +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + project = google_compute_shared_vpc_host_project.host_project.project + + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.inst-test-network.self_link + project = google_compute_shared_vpc_host_project.host_project.project +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + project = google_compute_shared_vpc_service_project.service_project.service_project + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.name + subnetwork_project = google_compute_subnetwork.inst-test-subnetwork.project + access_config { + } + } +} +`, projectName, org, billingId, projectName, org, billingId, suffix, suffix, instance) +} + +func testAccComputeInstance_networkIPAuto(suffix, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.inst-test-network.self_link +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.name + access_config { + } + } +} +`, suffix, suffix, instance) +} + +func testAccComputeInstance_network_ip_custom(suffix, instance, ipAddress string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.inst-test-network.self_link +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.name + network_ip = "%s" + access_config { + } + } +} +`, suffix, suffix, instance, ipAddress) +} + +func testAccComputeInstance_private_image_family(disk, family, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "foobar" { + name = "%s-1" + source_disk = google_compute_disk.foobar.self_link + family = "%s" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = google_compute_image.foobar.family + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, disk, family, family, instance) +} + +func testAccComputeInstance_networkPerformanceConfig(disk string, image string, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "foobar" { + name = "%s" + source_disk = google_compute_disk.foobar.self_link + guest_os_features { + type = "GVNIC" + } + guest_os_features { + type = "VIRTIO_SCSI_MULTIQUEUE" + } + guest_os_features { + type = "UEFI_COMPATIBLE" + } + guest_os_features { + type = "SEV_CAPABLE" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n2-standard-2" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = google_compute_image.foobar.self_link + } + } + + network_interface { + network = "default" + access_config { + // Ephemeral IP + } + } + + network_performance_config { + total_egress_bandwidth_tier = "DEFAULT" + } +} +`, disk, image, instance) +} + +func testAccComputeInstance_multiNic(instance, network, subnetwork string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.name + access_config { + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.inst-test-network.self_link +} +`, instance, network, subnetwork) +} + +func testAccComputeInstance_nictype(image, instance, nictype string) string { + return fmt.Sprintf(` +resource "google_compute_image" "example" { + name = "%s" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + + guest_os_features { + type = "SECURE_BOOT" + } + + guest_os_features { + type = "MULTI_IP_SUBNET" + } + + guest_os_features { + type = "GVNIC" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + //deletion_protection = false is implicit in this config due to default value + + boot_disk { + initialize_params { + image = google_compute_image.example.id + } + } + + network_interface { + network = "default" + nic_type = "%s" + } + + metadata = { + foo = "bar" + baz = "qux" + startup-script = "echo Hello" + } + + labels = { + my_key = "my_value" + my_other_key = "my_other_value" + } +} +`, image, instance, nictype) +} + +func testAccComputeInstance_guestAccelerator(instance string, count uint8) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of guest_accelerator + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with guest accelerators do not support live migration. + on_host_maintenance = "TERMINATE" + } + + guest_accelerator { + count = %d + type = "nvidia-tesla-t4" + } +} +`, instance, count) +} + +func testAccComputeInstance_minCpuPlatform(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of min_cpu_platform + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + min_cpu_platform = "Intel Haswell" + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_minCpuPlatform_remove(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-micro" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + min_cpu_platform = "AuToMaTiC" + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_primaryAliasIpRange(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + + alias_ip_range { + ip_cidr_range = "/24" + } + } +} +`, instance) +} + +func testAccComputeInstance_secondaryAliasIpRange(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork.secondary_ip_range[0].range_name + ip_cidr_range = "172.16.0.0/24" + } + + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork.secondary_ip_range[1].range_name + ip_cidr_range = "10.1.0.0/20" + } + } +} +`, network, subnet, instance) +} + +func testAccComputeInstance_secondaryAliasIpRangeUpdate(network, subnet, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "inst-test-network" { + name = "%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + alias_ip_range { + ip_cidr_range = "10.0.1.0/24" + } + } +} +`, network, subnet, instance) +} + +func testAccComputeInstance_hostname(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + hostname = "%s.test" +} +`, instance, instance) +} + +// Set fields that require stopping the instance: machine_type, min_cpu_platform, and service_account +func testAccComputeInstance_stopInstanceToUpdate(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" // can't be e2 because of min_cpu_platform + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + min_cpu_platform = "Intel Broadwell" + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + "storage-ro", + ] + } + + allow_stopping_for_update = true +} +`, instance) +} + +// Update fields that require stopping the instance: machine_type, min_cpu_platform, and service_account +func testAccComputeInstance_stopInstanceToUpdate2(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-2" // can't be e2 because of min_cpu_platform + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + min_cpu_platform = "Intel Skylake" + service_account { + scopes = [ + "userinfo-email", + "compute-ro", + ] + } + + allow_stopping_for_update = true +} +`, instance) +} + +// Remove fields that require stopping the instance: min_cpu_platform and service_account (machine_type is Required) +func testAccComputeInstance_stopInstanceToUpdate3(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-2" // can't be e2 because of min_cpu_platform + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_withoutNodeAffinities(instance, nodeTemplate, nodeGroup string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-8" // can't be e2 because of sole tenancy + zone = "us-central1-a" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_node_template" "nodetmpl" { + name = "%s" + region = "us-central1" + + node_affinity_labels = { + tfacc = "test" + } + + node_type = "n1-node-96-624" + + cpu_overcommit_type = "ENABLED" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + + initial_size = 1 + node_template = google_compute_node_template.nodetmpl.self_link +} +`, instance, nodeTemplate, nodeGroup) +} + +func testAccComputeInstance_soleTenantNodeAffinities(instance, nodeTemplate, nodeGroup string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-8" // can't be e2 because of sole tenancy + zone = "us-central1-a" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + node_affinities { + key = "tfacc" + operator = "IN" + values = ["test"] + } + + node_affinities { + key = "tfacc" + operator = "NOT_IN" + values = ["not_here"] + } + + node_affinities { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = [google_compute_node_group.nodes.name] + } + + min_node_cpus = 4 + } +} + +resource "google_compute_node_template" "nodetmpl" { + name = "%s" + region = "us-central1" + + node_affinity_labels = { + tfacc = "test" + } + + node_type = "n1-node-96-624" + + cpu_overcommit_type = "ENABLED" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + + initial_size = 1 + node_template = google_compute_node_template.nodetmpl.self_link +} +`, instance, nodeTemplate, nodeGroup) +} + +func testAccComputeInstance_soleTenantNodeAffinitiesUpdated(instance, nodeTemplate, nodeGroup string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-8" // can't be e2 because of sole tenancy + zone = "us-central1-a" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + node_affinities { + key = "tfacc" + operator = "IN" + values = ["test", "updatedlabel"] + } + + node_affinities { + key = "tfacc" + operator = "NOT_IN" + values = ["not_here"] + } + + node_affinities { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = [google_compute_node_group.nodes.name] + } + + min_node_cpus = 6 + } +} + +resource "google_compute_node_template" "nodetmpl" { + name = "%s" + region = "us-central1" + + node_affinity_labels = { + tfacc = "test" + } + + node_type = "n1-node-96-624" + + cpu_overcommit_type = "ENABLED" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + + initial_size = 1 + node_template = google_compute_node_template.nodetmpl.self_link +} +`, instance, nodeTemplate, nodeGroup) +} + +func testAccComputeInstance_soleTenantNodeAffinitiesReduced(instance, nodeTemplate, nodeGroup string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-8" // can't be e2 because of sole tenancy + zone = "us-central1-a" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + node_affinities { + key = "tfacc" + operator = "IN" + values = ["test", "updatedlabel"] + } + + node_affinities { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = [google_compute_node_group.nodes.name] + } + + min_node_cpus = 6 + } +} + +resource "google_compute_node_template" "nodetmpl" { + name = "%s" + region = "us-central1" + + node_affinity_labels = { + tfacc = "test" + } + + node_type = "n1-node-96-624" + + cpu_overcommit_type = "ENABLED" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + + initial_size = 1 + node_template = google_compute_node_template.nodetmpl.self_link +} +`, instance, nodeTemplate, nodeGroup) +} + +func testAccComputeInstance_reservationAffinity_nonSpecificReservationConfig(instanceName, reservationType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + reservation_affinity { + type = "%s" + } +}`, instanceName, reservationType) +} + +func testAccComputeInstance_reservationAffinity_specificReservationConfig(instanceName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_reservation" "reservation" { + name = "%s" + zone = "us-central1-a" + + specific_reservation { + count = 1 + instance_properties { + machine_type = "n1-standard-1" + } + } + specific_reservation_required = true +} + +resource "google_compute_instance" "foobar" { + name = "%[1]s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + reservation_affinity { + type = "SPECIFIC_RESERVATION" + + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = ["%[1]s"] + } + } +}`, instanceName) +} + +func testAccComputeInstance_shieldedVmConfig(instance string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + shielded_instance_config { + enable_secure_boot = %t + enable_vtpm = %t + enable_integrity_monitoring = %t + } + + allow_stopping_for_update = true +} +`, instance, enableSecureBoot, enableVtpm, enableIntegrityMonitoring) +} + +func testAccComputeInstanceConfidentialInstanceConfigEnable(instance string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n2d-standard-2" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + confidential_instance_config { + enable_confidential_compute = true +{{- if ne $.TargetVersionName "ga" }} + confidential_instance_type = %q +{{- end }} + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} + +{{ if ne $.TargetVersionName `ga` -}} +resource "google_compute_instance" "foobar2" { + name = "%s2" + machine_type = "n2d-standard-2" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + confidential_instance_config { + enable_confidential_compute = true + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +{{- end }} +{{- if eq $.TargetVersionName "ga" }} +`, instance) +{{- else }} +`, instance, confidentialInstanceType, instance) +{{- end }} +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstanceConfidentialInstanceConfigNoEnable(instance string, minCpuPlatform, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image2" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar3" { + name = "%s3" + machine_type = "n2d-standard-2" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image2.self_link + } + } + + network_interface { + network = "default" + } + + min_cpu_platform = %q + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +resource "google_compute_instance" "foobar4" { + name = "%s4" + machine_type = "n2d-standard-2" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image2.self_link + } + } + + network_interface { + network = "default" + } + + min_cpu_platform = %q + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, instance, minCpuPlatform, confidentialInstanceType, instance, minCpuPlatform, confidentialInstanceType) +} +{{- end }} + +func testAccComputeInstance_attributionLabelCreate(instance, add, strategy string) string { + return fmt.Sprintf(` +provider "google" { + add_terraform_attribution_label = %s + terraform_attribution_label_addition_strategy = %q +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + desired_status = "RUNNING" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + labels = { + user_label = "foo" + } +} +`, add, strategy, instance) +} + +func testAccComputeInstance_attributionLabelUpdate(instance, add, strategy string) string { + return fmt.Sprintf(` +provider "google" { + add_terraform_attribution_label = %s + terraform_attribution_label_addition_strategy = %q +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + desired_status = "RUNNING" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + labels = { + user_label = "bar" + } +} +`, add, strategy, instance) +} + +func testAccComputeInstanceConfidentialHyperDiskBootDisk(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2204-lts" + project = "ubuntu-os-cloud" +} + +data "google_project" "project" {} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = "%{key_name}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" +} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "%{machine_type}" + zone = "%{zone}" + + boot_disk { + + initialize_params { + image = data.google_compute_image.my_image.self_link + enable_confidential_compute = %{confidential_compute} + type = "hyperdisk-balanced" + } + + kms_key_self_link = "%{key_name}" + } + + network_interface { + network = "default" + } + depends_on = [google_kms_crypto_key_iam_member.crypto_key] + +} +`, context) +} + +func testAccComputeInstanceHyperDiskBootDiskProvisionedIopsThroughput(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2204-lts" + project = "ubuntu-os-cloud" +} + +data "google_project" "project" {} + +resource "google_compute_instance" "foobar" { + name = "%{instance_name}" + machine_type = "h3-standard-88" + zone = "%{zone}" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + provisioned_iops = %{provisioned_iops} + provisioned_throughput = %{provisioned_throughput} + type = "hyperdisk-balanced" + size = 100 + } + } + + network_interface { + network = "default" + } +} +`, context) +} + +func testAccComputeInstance_enableDisplay(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + enable_display = true + + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_enableDisplayUpdated(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + enable_display = false + + allow_stopping_for_update = true +} +`, instance) +} + +func testAccComputeInstance_machineType_desiredStatus_allowStoppingForUpdate( + instance, machineType, desiredStatus string, + allowStoppingForUpdate bool, +) string { + desiredStatusConfigSection := "" + if desiredStatus != "" { + desiredStatusConfigSection = fmt.Sprintf( + "desired_status = \"%s\"", + desiredStatus, + ) + } + + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "%s" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params{ + image = "${data.google_compute_image.my_image.self_link}" + } + } + + network_interface { + network = "default" + } + + %s + + metadata = { + foo = "bar" + } + + allow_stopping_for_update = %t +} +`, instance, machineType, desiredStatusConfigSection, allowStoppingForUpdate) +} + +func testAccComputeInstance_desiredStatusTerminatedUpdate(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = false + tags = ["baz"] + + boot_disk { + initialize_params{ + image = "${data.google_compute_image.my_image.self_link}" + } + } + + network_interface { + network = "default" + } + + desired_status = "TERMINATED" + + metadata = { + bar = "baz" + } + + labels = { + only_me = "nothing_else" + } +} +`, instance) +} + +func testAccComputeInstance_resourcePolicyCollocate(instance, suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "c2-standard-4" + zone = "us-east4-b" + can_ip_forward = false + tags = ["foo", "bar"] + + //deletion_protection = false is implicit in this config due to default value + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with resource policies do not support live migration. + on_host_maintenance = "TERMINATE" + automatic_restart = false + } + + resource_policies = [google_compute_resource_policy.foo.self_link] +} + +resource "google_compute_instance" "second" { + name = "%s-2" + machine_type = "c2-standard-4" + zone = "us-east4-b" + can_ip_forward = false + tags = ["foo", "bar"] + + //deletion_protection = false is implicit in this config due to default value + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with resource policies do not support live migration. + on_host_maintenance = "TERMINATE" + automatic_restart = false + } + + resource_policies = [google_compute_resource_policy.foo.self_link] +} + +resource "google_compute_resource_policy" "foo" { + name = "tf-test-policy-%s" + region = "us-east4" + group_placement_policy { + vm_count = 2 + collocation = "COLLOCATED" + } +} + +`, instance, instance, suffix) +} + +func testAccComputeInstance_subnetworkUpdate(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_network" "inst-test-network2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } + } + + resource "google_compute_subnetwork" "inst-test-subnetwork2" { + name = "tf-test-compute-subnet2-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network2.id + secondary_ip_range { + range_name = "inst-test-secondary2" + ip_cidr_range = "173.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary2" + ip_cidr_range = "10.4.0.0/16" + } + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.id + access_config { + network_tier = "STANDARD" + } + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork.secondary_ip_range[0].range_name + ip_cidr_range = "172.16.0.0/24" + } + + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork.secondary_ip_range[1].range_name + ip_cidr_range = "10.1.0.0/20" + } + } + } +`, suffix, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_subnetworkUpdateTwo(suffix, instance string) string { + return fmt.Sprintf(` + data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + } + + resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + } + + resource "google_compute_network" "inst-test-network2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "tf-test-compute-subnet-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network.id + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary" + ip_cidr_range = "10.1.0.0/16" + } + } + + resource "google_compute_subnetwork" "inst-test-subnetwork2" { + name = "tf-test-compute-subnet2-%s" + ip_cidr_range = "10.3.0.0/16" + region = "us-east1" + network = google_compute_network.inst-test-network2.id + secondary_ip_range { + range_name = "inst-test-secondary2" + ip_cidr_range = "173.16.0.0/20" + } + secondary_ip_range { + range_name = "inst-test-tertiary2" + ip_cidr_range = "10.4.0.0/16" + } + } + + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork2.id + network_ip = "10.3.0.3" + access_config { + network_tier = "STANDARD" + } + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork2.secondary_ip_range[0].range_name + ip_cidr_range = "173.16.0.0/24" + } + } + } +`, suffix, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_queueCountSet(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-east1-d" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + network = "default" + queue_count = 2 + } +} +`, instance) +} + +func testAccComputeInstance_spotVM(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + provisioning_model = "SPOT" + automatic_restart = false + preemptible = true + instance_termination_action = "STOP" + } +} +`, instance) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstance_standardVM_maxRunDuration(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + provisioning_model = "STANDARD" + automatic_restart = false + instance_termination_action = "%s" + max_run_duration { + nanos = 123 + seconds = 60 + } + } +} +`, instance, instanceTerminationAction) +} + +func testAccComputeInstance_standardVM_maxRunDurationUpdated(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + provisioning_model = "STANDARD" + automatic_restart = false + instance_termination_action = "%s" + max_run_duration { + nanos = 456 + seconds = 60 + } + } +} +`, instance, instanceTerminationAction) +} + +func testAccComputeInstance_localSsdVM_maxRunDuration(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n2-standard-8" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + # Local SSD interface type; NVME for image with optimized NVMe drivers or SCSI + # Local SSD are 375 GiB in size + scratch_disk { + interface = "SCSI" + } + + network_interface { + network = "default" + access_config {} + } + + scheduling { + provisioning_model = "STANDARD" + automatic_restart = false + instance_termination_action = "%s" + max_run_duration { + nanos = 123 + seconds = 180 + } + on_instance_stop_action { + discard_local_ssd = true + } + } +} +`, instance, instanceTerminationAction) +} +{{- end }} + + +func testAccComputeInstance_spotVM_maxRunDuration(instance string, instanceTerminationAction string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + provisioning_model = "SPOT" + automatic_restart = false + preemptible = true + instance_termination_action = "%s" +{{- if ne $.TargetVersionName "ga" }} + max_run_duration { + nanos = 123 + seconds = 60 + } +{{- end }} + } +} +`, instance, instanceTerminationAction) +} + +func testAccComputeInstance_localSsdRecoveryTimeout(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + scheduling { + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } + +} +`, instance) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstance_partnerMetadata_empty(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +}`, instance) +} + +func testAccComputeInstance_partnerMetadata(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } +}`, instance) +} +{{- end }} + +func testAccComputeInstance_metadataStartupScript(instance, machineType, metadata string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "%s" + zone = "us-central1-a" + can_ip_forward = false + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + metadata = { + foo = "%s" + } + metadata_startup_script = "echo hi > /test.txt" + allow_stopping_for_update = true +} +`, instance, machineType, metadata) +} + + +func testAccComputeInstance_regionBootDisk(instance, diskName, suffix string) string { + return fmt.Sprintf(` +resource "google_compute_instance" "regional_vm_instance" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-c" + + boot_disk { + source = google_compute_region_disk.regionaldisk.self_link + } + network_interface { + network = google_compute_network.vpc_network.name + access_config {} + } +} + +resource "google_compute_region_disk" "regionaldisk" { + name = "%s" + type = "pd-ssd" + region = "us-central1" + replica_zones = ["us-central1-c", "us-central1-a"] + size = 50 + snapshot = google_compute_snapshot.debian.id +} + +resource "google_compute_network" "vpc_network" { + name = "tf-test-%s" +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_snapshot" "debian" { + name = "tf-test-%s" + source_disk = google_compute_disk.debian.id +} + +resource "google_compute_disk" "debian" { + name = "tf-test-%s" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-c" +} +`, instance, diskName, suffix, suffix, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstance_nic_securityPolicyCreateWithOneNicAndTwoAccessConfigs(suffix, policy, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "STANDARD" + } + security_policy = google_compute_region_security_policy.policyforinstance.self_link + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, policy, instance) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsWithTwoSecurityPoliciesAndStatus(suffix, policy, instance, policyToSetOne, desiredStatus string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet.self_link + access_config { + network_tier = "STANDARD" + } + security_policy = %s + } + + metadata = { + foo = "bar" + } + + desired_status = "%s" +} +`, suffix, suffix, suffix, suffix, policy, instance, policyToSetOne, desiredStatus) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPolicies(suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_network" "net2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + region = "europe-west1" + name = "tf-test-subnet2-%s" + ip_cidr_range = "192.170.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net2.id +} + +resource "google_compute_subnetwork" "subnet-ipv62" { + region = "europe-west1" + name = "tf-test-subnet-ip62-%s" + ip_cidr_range = "10.10.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net2.id +} + +resource "google_compute_address" "normal-address2" { + region = "europe-west1" + name = "tf-test-addr-normal2-%s" +} + +resource "google_compute_address" "ipv6-address2" { + region = "europe-west1" + name = "tf-test-addr-ipv62-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv62.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy" "policyforinstance2" { + region = "europe-west1" + name = "%s" + description = "region security policy 2 to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "PREMIUM" + nat_ip = google_compute_address.normal-address.address + } + security_policy = %s + } + + network_interface { + network = google_compute_network.net2.self_link + subnetwork = google_compute_subnetwork.subnet-ipv62.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address2.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "PREMIUM" + nat_ip = google_compute_address.normal-address2.address + } + security_policy = %s + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndTwoAccessConfigsUpdateTwoPoliciesRemoveAccessConfig(suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_network" "net2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + region = "europe-west1" + name = "tf-test-subnet2-%s" + ip_cidr_range = "192.170.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net2.id +} + +resource "google_compute_subnetwork" "subnet-ipv62" { + region = "europe-west1" + name = "tf-test-subnet-ip62-%s" + ip_cidr_range = "10.10.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net2.id +} + +resource "google_compute_address" "normal-address2" { + region = "europe-west1" + name = "tf-test-addr-normal2-%s" +} + +resource "google_compute_address" "ipv6-address2" { + region = "europe-west1" + name = "tf-test-addr-ipv62-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv62.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy" "policyforinstance2" { + region = "europe-west1" + name = "%s" + description = "region security policy 2 to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + # access config removed + security_policy = %s + } + + network_interface { + network = google_compute_network.net2.self_link + subnetwork = google_compute_subnetwork.subnet-ipv62.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address2.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + # access config removed + security_policy = %s + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, policy2, instance, policyToSetOne, policyToSetTwo) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoNicsAndAccessConfigsWithEmptyAndNullSecurityPolicies(suffix, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_network" "net2" { + name = "tf-test-network2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + region = "europe-west1" + name = "tf-test-subnet2-%s" + ip_cidr_range = "192.170.0.0/20" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net2.id +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet.self_link + access_config { + network_tier = "STANDARD" + } + security_policy = "" + } + + network_interface { + network = google_compute_network.net2.self_link + subnetwork = google_compute_subnetwork.subnet2.self_link + access_config { + network_tier = "STANDARD" + } + security_policy = null + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, instance) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsUpdateAccessConfig(suffix, policy, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + ipv6_access_config { + external_ipv6 = google_compute_address.ipv6-address.address + external_ipv6_prefix_length = 96 + name = "external-ipv6-access-config" + network_tier = "PREMIUM" + } + access_config { + network_tier = "PREMIUM" + nat_ip = google_compute_address.normal-address.address + } + security_policy = google_compute_region_security_policy.policyforinstance.self_link + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, instance) +} + +func testAccComputeInstance_nic_securityPolicyCreateWithTwoAccessConfigsRemoveAccessConfig(suffix, policy, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +# First activate advanced network DDoS protection for the desired region +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "europe-west1" + name = "tf-test-policyddosprotection-%s" + description = "region security policy for instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "europe-west1" + name = "tf-test-edgesecservice-%s" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + region = "europe-west1" + name = "tf-test-subnet-%s" + ip_cidr_range = "192.168.0.0/16" + purpose = "PRIVATE" + stack_type = "IPV4_ONLY" + network = google_compute_network.net.id +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "europe-west1" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_address" "normal-address" { + region = "europe-west1" + name = "tf-test-addr-normal-%s" +} + +resource "google_compute_address" "ipv6-address" { + region = "europe-west1" + name = "tf-test-addr-ipv6-%s" + address_type = "EXTERNAL" + ip_version = "IPV6" + network_tier = "PREMIUM" + ipv6_endpoint_type = "VM" + subnetwork = google_compute_subnetwork.subnet-ipv6.name +} + +resource "google_compute_region_security_policy" "policyforinstance" { + region = "europe-west1" + name = "%s" + description = "region security policy to set to instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "europe-west1-b" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "IPV4_IPV6" + # remove all access config + security_policy = google_compute_region_security_policy.policyforinstance.self_link + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix, suffix, suffix, suffix, policy, instance) +} + +{{ end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeInstance_networkAttachment(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "test-network"{ + name = "tf-test-network-%{suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "test-subnetwork" { + name = "tf-test-compute-subnet-%{suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.test-network.id +} + +resource "google_compute_network_attachment" "test_network_attachment" { + name = "%{network_attachment_name}" + region = "us-central1" + description = "network attachment description" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.test-subnetwork.self_link + ] +} + +resource "google_compute_instance" "foobar" { + name = "tf-test-instance-%{suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.id + } + } + + network_interface { + network = "default" + } + + network_interface{ + network_attachment = google_compute_network_attachment.test_network_attachment.self_link + } + + metadata = { + foo = "bar" + } +} +`, context) +} + +func testAccComputeInstance_networkAttachmentUpdate(networkAttachment, region, suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "consumer_vpc_1" { + name = "tf-test-consumer-net-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "consumer_vpc_2" { + name = "tf-test-consumer-net-2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "consumer_subnet_1" { + name = "tf-test-consumer-subnet-1-%s" + ip_cidr_range = "10.0.0.0/16" + region = "%s" + network = google_compute_network.consumer_vpc_1.id +} + +resource "google_compute_subnetwork" "consumer_subnet_2" { + name = "tf-test-consumer-subnet-2-%s" + ip_cidr_range = "10.3.0.0/16" + region = "%s" + network = google_compute_network.consumer_vpc_2.id +} + +resource "google_compute_network_attachment" "test_network_attachment_1" { + name = "tf-test-network-attachment-1-%s" + region = "%s" + description = "network attachment 1 description" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.consumer_subnet_1.self_link + ] +} + +resource "google_compute_network_attachment" "test_network_attachment_2" { + name = "tf-test-network-attachment-2-%s" + region = "%s" + description = "network attachment 2 description" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.consumer_subnet_2.self_link + ] +} + +resource "google_compute_instance" "foobar" { + name = "tf-test-compute-instance-%s" + machine_type = "e2-medium" + zone = "%s-a" + allow_stopping_for_update = true + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + network_interface{ + network_attachment = %s + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, region, suffix, region, suffix, region, suffix, region, suffix, region, networkAttachment) +} +{{- end }} + +func testAccComputeInstance_nicStackTypeUpdate(suffix, region, stack_type, instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "net" { + name = "tf-test-network-%s" + enable_ula_internal_ipv6 = true + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet-ipv6" { + region = "%s" + name = "tf-test-subnet-ip6-%s" + ip_cidr_range = "10.0.0.0/22" + purpose = "PRIVATE" + stack_type = "IPV4_IPV6" + ipv6_access_type = "INTERNAL" + network = google_compute_network.net.id +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "%s-a" + tags = ["foo", "bar"] + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = google_compute_network.net.self_link + subnetwork = google_compute_subnetwork.subnet-ipv6.self_link + stack_type = "%s" + } + + metadata = { + foo = "bar" + } +} +`, suffix, region, suffix, instance, region, stack_type) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_sweeper.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_sweeper.go.tmpl new file mode 100644 index 000000000000..3bbedc7fee20 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_sweeper.go.tmpl @@ -0,0 +1,64 @@ +package compute +{{- if ne $.TargetVersionName "ga" }} + +import ( + "context" + "log" + + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func init() { + sweeper.AddTestSweepers("ComputeNetworkEdgeSecurityService", testSweepComputeNetworkEdgeSecurityService) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepComputeNetworkEdgeSecurityService(region string) error { + resourceName := "ComputeNetworkEdgeSecurityService" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + found, err := config.NewComputeClient(config.UserAgent).NetworkEdgeSecurityServices.AggregatedList(config.Project).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request: %s", err) + return nil + } + + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for zone, itemList := range found.Items { + for _, tp := range itemList.NetworkEdgeSecurityServices { + if !sweeper.IsSweepableTestResource(tp.Name) { + nonPrefixCount++ + continue + } + + // Don't wait on operations as we may have a lot to delete + _, err := config.NewComputeClient(config.UserAgent).NetworkEdgeSecurityServices.Delete(config.Project, tpgresource.GetResourceNameFromSelfLink(zone), tp.Name).Do() + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting %s resource %s : %s", resourceName, tp.Name, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, tp.Name) + } + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_test.go.tmpl new file mode 100644 index 000000000000..e0110fcc32b8 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_edge_security_service_test.go.tmpl @@ -0,0 +1,79 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeNetworkEdgeSecurityService_update(t *testing.T) { + t.Parallel() + + pName := fmt.Sprintf("tf-test-security-policy-%s", acctest.RandString(t, 10)) + nesName := fmt.Sprintf("tf-test-edge-security-services-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkEdgeSecurityServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkEdgeSecurityService_basic(pName, nesName), + }, + { + ResourceName: "google_compute_network_edge_security_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkEdgeSecurityService_update(pName, nesName), + }, + { + ResourceName: "google_compute_network_edge_security_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkEdgeSecurityService_basic(pName, nesName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "foobar" { + name = "%s" + description = "basic region security policy" + type = "CLOUD_ARMOR_NETWORK" + # can only exist one of this resource per region + region = "us-central1" +} + +resource "google_compute_network_edge_security_service" "foobar" { + name = "%s" + region = "us-central1" + description = "My basic resource using security policy" + security_policy = google_compute_region_security_policy.foobar.self_link +} +`, pName, nesName) +} + +func testAccNetworkEdgeSecurityService_update(pName, nesName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "foobar" { + name = "%s" + description = "basic region security policy" + type = "CLOUD_ARMOR_NETWORK" + region = "us-central1" +} + +resource "google_compute_network_edge_security_service" "foobar" { + name = "%s" + region = "us-central1" + description = "My basic updated resource using security policy" + security_policy = google_compute_region_security_policy.foobar.self_link +} +`, pName, nesName) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_group_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_group_test.go new file mode 100644 index 000000000000..a42926815467 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_group_test.go @@ -0,0 +1,124 @@ +package compute_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeNetworkEndpointGroup_networkEndpointGroup(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkEndpointGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkEndpointGroup_networkEndpointGroup(context), + }, + { + ResourceName: "google_compute_network_endpoint_group.neg", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "zone"}, + }, + }, + }) +} + +func TestAccComputeNetworkEndpointGroup_internalEndpoint(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkEndpointGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkEndpointGroup_internalEndpoint(context), + }, + { + ResourceName: "google_compute_network_endpoint_group.neg", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network", "subnetwork", "zone"}, + }, + }, + }) +} + +func testAccComputeNetworkEndpointGroup_networkEndpointGroup(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint_group" "neg" { + name = "tf-test-my-lb-neg%{random_suffix}" + network = google_compute_network.default.id + default_port = "90" + zone = "us-central1-a" +} + +resource "google_compute_network" "default" { + name = "tf-test-neg-network%{random_suffix}" + auto_create_subnetworks = true +} +`, context) +} + +func testAccComputeNetworkEndpointGroup_internalEndpoint(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint_group" "neg" { + name = "tf-test-my-lb-neg%{random_suffix}" + network = google_compute_network.internal.id + subnetwork = google_compute_subnetwork.internal.id + zone = "us-central1-a" + network_endpoint_type = "GCE_VM_IP" +} + +resource "google_compute_network_endpoint" "endpoint" { + network_endpoint_group = google_compute_network_endpoint_group.neg.name + #ip_address = "127.0.0.1" + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip +} + +resource "google_compute_network" "internal" { + name = "tf-test-neg-network%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "internal"{ + name = "tf-test-my-subnetwork%{random_suffix}" + network = google_compute_network.internal.id + ip_cidr_range = "10.128.0.0/20" + region = "us-central1" + private_ip_google_access= true +} + +resource "google_compute_instance" "default" { + name = "tf-test-neg-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = "debian-8-jessie-v20160803" + } + } + + network_interface { + subnetwork = google_compute_subnetwork.internal.self_link + access_config { + } + } +} + +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_test.go.tmpl new file mode 100644 index 000000000000..c562bb634bcf --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoint_test.go.tmpl @@ -0,0 +1,239 @@ +package compute_test +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeNetworkEndpoint_networkEndpointsBasic(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "default_port": 90, + "modified_port": 100, + "add1_port": 101, + "add2_port": 102, + } + negId := fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/tf-test-neg-%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one endpoint + Config: testAccComputeNetworkEndpoint_networkEndpointsBasic(context), + }, + { + ResourceName: "google_compute_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Force-recreate old endpoint + Config: testAccComputeNetworkEndpoint_networkEndpointsModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkEndpointWithPortsDestroyed(t, negId, "90"), + ), + }, + { + ResourceName: "google_compute_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Add two new endpoints + Config: testAccComputeNetworkEndpoint_networkEndpointsAdditional(context), + }, + { + ResourceName: "google_compute_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_network_endpoint.add1", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_network_endpoint.add2", + ImportState: true, + ImportStateVerify: true, + }, + { + // delete all endpoints + Config: testAccComputeNetworkEndpoint_noNetworkEndpoints(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkEndpointWithPortsDestroyed(t, negId, "100"), + ), + }, + }, + }) +} + +func testAccComputeNetworkEndpoint_networkEndpointsBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.id + + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = google_compute_network_endpoint_group.neg.default_port +} +`, context) + testAccComputeNetworkEndpoint_noNetworkEndpoints(context) +} + +func testAccComputeNetworkEndpoint_networkEndpointsModified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.name + + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{modified_port}" +} +`, context) + testAccComputeNetworkEndpoint_noNetworkEndpoints(context) +} + +func testAccComputeNetworkEndpoint_networkEndpointsAdditional(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.id + + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{modified_port}" +} + +resource "google_compute_network_endpoint" "add1" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.id + + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add1_port}" +} + +resource "google_compute_network_endpoint" "add2" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.name + + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add2_port}" +} +`, context) + testAccComputeNetworkEndpoint_noNetworkEndpoints(context) +} + +func testAccComputeNetworkEndpoint_noNetworkEndpoints(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint_group" "neg" { + name = "tf-test-neg-%{random_suffix}" + zone = "us-central1-a" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link + default_port = "%{default_port}" +} + +resource "google_compute_network" "default" { + name = "tf-test-neg-network-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-neg-subnetwork-%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.self_link +} + +resource "google_compute_instance" "default" { + name = "tf-test-neg-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.self_link + access_config { + } + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +`, context) +} + +// testAccCheckComputeNetworkEndpointDestroyed makes sure the endpoint with +// given Terraform resource name and previous information (obtained from Exists) +// was destroyed properly. +func testAccCheckComputeNetworkEndpointWithPortsDestroyed(t *testing.T, negId string, ports ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundPorts, err := testAccComputeNetworkEndpointListEndpointPorts(t, negId) + if err != nil { + return fmt.Errorf("unable to confirm endpoints with ports %+v was destroyed: %v", ports, err) + } + for _, p := range ports { + if _, ok := foundPorts[p]; ok { + return fmt.Errorf("network endpoint with port %s still exists", p) + } + } + + return nil + } +} + +func testAccComputeNetworkEndpointListEndpointPorts(t *testing.T, negId string) (map[string]struct{}, error) { + config := acctest.GoogleProviderConfig(t) + + {{ if eq $.TargetVersionName `ga` }} + url := fmt.Sprintf("https://www.googleapis.com/compute/v1/%s/listNetworkEndpoints", negId) + {{- else }} + url := fmt.Sprintf("https://www.googleapis.com/compute/beta/%s/listNetworkEndpoints", negId) + {{- end }} + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return nil, err + } + + v, ok := res["items"] + if !ok || v == nil { + return nil, nil + } + items := v.([]interface{}) + ports := make(map[string]struct{}) + for _, item := range items { + endptWithHealth := item.(map[string]interface{}) + v, ok := endptWithHealth["networkEndpoint"] + if !ok || v == nil { + continue + } + endpt := v.(map[string]interface{}) + ports[fmt.Sprintf("%v", endpt["port"])] = struct{}{} + } + return ports, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoints_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoints_test.go.tmpl new file mode 100644 index 000000000000..ede80521b872 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_endpoints_test.go.tmpl @@ -0,0 +1,326 @@ +package compute_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeNetworkEndpoints_networkEndpointsBasic(t *testing.T) { + t.Parallel() + + // detachNetworkEndpoints call ordering is not guaranteed, causing VCR to rerecord + acctest.SkipIfVcr(t) + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "default_port": 90, + "modified_port": 100, + "add1_port": 101, + "add2_port": 102, + "add3_port": 103, + "add4_port": 104, + "add5_port": 105, + } + negId := fmt.Sprintf("projects/%s/zones/%s/networkEndpointGroups/tf-test-neg-%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one endpoint + Config: testAccComputeNetworkEndpoints_networkEndpointsBase(context), + }, + { + ResourceName: "google_compute_network_endpoints.default", + ImportState: true, + ImportStateVerify: true, + ImportStateId: negId, + }, + { + // Force-recreate old endpoint + Config: testAccComputeNetworkEndpoints_networkEndpointsModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkEndpointsWithPortsDestroyed(t, negId, "90"), + ), + }, + { + ResourceName: "google_compute_network_endpoints.default", + ImportState: true, + ImportStateVerify: true, + ImportStateId: negId, + }, + { + // Add four new endpoints + Config: testAccComputeNetworkEndpoints_networkEndpointsAdditional(context), + }, + { + ResourceName: "google_compute_network_endpoints.default", + ImportState: true, + ImportStateVerify: true, + ImportStateId: negId, + }, + { + // Add enough endpoints to trigger pagination + Config: testAccComputeNetworkEndpoints_networkEndpointsPaginated(context, 100, 1300), + }, + { + ResourceName: "google_compute_network_endpoints.default", + ImportState: true, + ImportStateVerify: true, + ImportStateId: negId, + }, + { + // Remove enough endpoints to trigger pagination + Config: testAccComputeNetworkEndpoints_networkEndpointsPaginated(context, 700, 1900), + }, + { + ResourceName: "google_compute_network_endpoints.default", + ImportState: true, + ImportStateVerify: true, + ImportStateId: negId, + }, + { + // delete all endpoints + Config: testAccComputeNetworkEndpoints_noNetworkEndpoints(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkEndpointsWithAllEndpointsDestroyed(t, negId), + ), + }, + }, + }) +} + +func testAccComputeNetworkEndpoints_networkEndpointsBase(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoints" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.id + + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = google_compute_network_endpoint_group.neg.default_port + } +} +`, context) + testAccComputeNetworkEndpoints_noNetworkEndpoints(context) +} + +func testAccComputeNetworkEndpoints_networkEndpointsModified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoints" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.name + + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{modified_port}" + } +} +`, context) + testAccComputeNetworkEndpoints_noNetworkEndpoints(context) +} + +func testAccComputeNetworkEndpoints_networkEndpointsAdditional(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoints" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.id + + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{modified_port}" + } + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add1_port}" + } + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add2_port}" + } + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add3_port}" + } + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add4_port}" + } + network_endpoints { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = "%{add5_port}" + } +} +`, context) + testAccComputeNetworkEndpoints_noNetworkEndpoints(context) +} + +func testAccComputeNetworkEndpoints_networkEndpointsPaginated(context map[string]interface{}, lower, upper int) string { + context["for_each"] = networkEndpointsGenerateRanges(lower, upper) + return acctest.Nprintf(` +resource "google_compute_network_endpoints" "default" { + zone = "us-central1-a" + network_endpoint_group = google_compute_network_endpoint_group.neg.name + + dynamic "network_endpoints" { + for_each = %{for_each} + content { + instance = google_compute_instance.default.name + ip_address = google_compute_instance.default.network_interface[0].network_ip + port = network_endpoints.value + } + } +} +`, context) + testAccComputeNetworkEndpoints_noNetworkEndpoints(context) +} + +// Terraform `range` can only generate a list of 1024 elements, so we need to +// concat them to get a longer list +func networkEndpointsGenerateRanges(lower, upper int) string { + var ranges []string + l := lower + for l < upper { + u := l + 1024 + if u > upper { + u = upper + } + ranges = append(ranges, fmt.Sprintf("range(%d, %d)", l, u)) + l += 1024 + } + return fmt.Sprintf("concat(%s)", strings.Join(ranges, ", ")) +} + +func testAccComputeNetworkEndpoints_noNetworkEndpoints(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_endpoint_group" "neg" { + name = "tf-test-neg-%{random_suffix}" + zone = "us-central1-a" + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link + default_port = "%{default_port}" +} + +resource "google_compute_network" "default" { + name = "tf-test-neg-network-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-neg-subnetwork-%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.self_link +} + +resource "google_compute_instance" "default" { + name = "tf-test-neg-%{random_suffix}" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.self_link + access_config { + } + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +`, context) +} + +// testAccCheckComputeNetworkEndpointDestroyed makes sure the endpoint with +// given Terraform resource name and previous information (obtained from Exists) +// was destroyed properly. +func testAccCheckComputeNetworkEndpointsWithPortsDestroyed(t *testing.T, negId string, ports ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundPorts, err := testAccComputeNetworkEndpointsListEndpointPorts(t, negId) + if err != nil { + return fmt.Errorf("unable to confirm endpoints with ports %+v was destroyed: %v", ports, err) + } + for _, p := range ports { + if _, ok := foundPorts[p]; ok { + return fmt.Errorf("network endpoint with port %s still exists", p) + } + } + + return nil + } +} + +func testAccCheckComputeNetworkEndpointsWithAllEndpointsDestroyed(t *testing.T, negId string) resource.TestCheckFunc { + return func(s *terraform.State) error { + endpoints, err := testAccComputeNetworkEndpointsListEndpoints(t, negId) + if err != nil { + return fmt.Errorf("unable to confirm all endpoints were destroyed: %v", err) + } + if len(endpoints) > 0 { + return fmt.Errorf("Not all network endpoints were deleted: %v", endpoints) + } + return nil + } +} + +func testAccComputeNetworkEndpointsListEndpoints(t *testing.T, negId string) ([]interface{}, error) { + config := acctest.GoogleProviderConfig(t) + + {{ if eq $.TargetVersionName `ga` }} + url := fmt.Sprintf("https://www.googleapis.com/compute/v1/%s/listNetworkEndpoints", negId) + {{- else }} + url := fmt.Sprintf("https://www.googleapis.com/compute/beta/%s/listNetworkEndpoints", negId) + {{- end }} + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return nil, err + } + + v, ok := res["items"] + if !ok || v == nil { + return nil, nil + } + return v.([]interface{}), nil +} + +func testAccComputeNetworkEndpointsListEndpointPorts(t *testing.T, negId string) (map[string]struct{}, error) { + items, err := testAccComputeNetworkEndpointsListEndpoints(t, negId) + if err != nil { + return nil, err + } + ports := make(map[string]struct{}) + for _, item := range items { + endptWithHealth := item.(map[string]interface{}) + v, ok := endptWithHealth["networkEndpoint"] + if !ok || v == nil { + continue + } + endpt := v.(map[string]interface{}) + ports[fmt.Sprintf("%v", endpt["port"])] = struct{}{} + } + return ports, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go new file mode 100644 index 000000000000..769a3ce51970 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_firewall_policy_rule_test.go @@ -0,0 +1,665 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeNetworkFirewallPolicyRule_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_name": fmt.Sprintf("organizations/%s", envvar.GetTestOrgFromEnv(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkFirewallPolicyRule_start(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeNetworkFirewallPolicyRule_update(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeNetworkFirewallPolicyRule_removeConfigs(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeNetworkFirewallPolicyRule_start(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + }, + }) +} + +func TestAccComputeNetworkFirewallPolicyRule_multipleRules(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_name": fmt.Sprintf("organizations/%s", envvar.GetTestOrgFromEnv(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkFirewallPolicyRule_multiple(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule2", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeNetworkFirewallPolicyRule_multipleAdd(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule3", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeNetworkFirewallPolicyRule_multipleRemove(context), + }, + }, + }) +} + +func TestAccComputeNetworkFirewallPolicyRule_securityProfileGroup_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_name": fmt.Sprintf("organizations/%s", envvar.GetTestOrgFromEnv(t)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetworkFirewallPolicyRule_securityProfileGroup_basic(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + { + Config: testAccComputeNetworkFirewallPolicyRule_securityProfileGroup_update(context), + }, + { + ResourceName: "google_compute_network_firewall_policy_rule.fw_policy_rule1", + ImportState: true, + ImportStateVerify: true, + // Referencing using ID causes import to fail + ImportStateVerifyIgnore: []string{"firewall_policy"}, + }, + }, + }) +} + +func testAccComputeNetworkFirewallPolicyRule_securityProfileGroup_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_security_security_profile" "security_profile" { + name = "tf-test-my-sp%{random_suffix}" + type = "THREAT_PREVENTION" + parent = "%{org_name}" + location = "global" +} + +resource "google_network_security_security_profile_group" "security_profile_group" { + name = "tf-test-my-spg%{random_suffix}" + parent = "%{org_name}" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.security_profile.id +} + +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_compute_network_firewall_policy_association" "fw_policy_a" { + name = "tf-test-policy-a-%{random_suffix}" + attachment_target = google_compute_network.network1.id + firewall_policy = google_compute_network_firewall_policy.fw_policy.id +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "apply_security_profile_group" + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group.id}" + direction = "INGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + src_ip_ranges = ["11.100.0.1/32"] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_securityProfileGroup_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_network_security_security_profile" "security_profile" { + name = "tf-test-my-sp%{random_suffix}" + type = "THREAT_PREVENTION" + parent = "%{org_name}" + location = "global" +} + +resource "google_network_security_security_profile_group" "security_profile_group" { + name = "tf-test-my-spg%{random_suffix}" + parent = "%{org_name}" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.security_profile.id +} + +resource "google_network_security_security_profile_group" "security_profile_group_updated" { + name = "tf-test-my-spg-updated%{random_suffix}" + parent = "%{org_name}" + location = "global" + description = "My updated security profile group." + threat_prevention_profile = google_network_security_security_profile.security_profile.id +} + +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_compute_network_firewall_policy_association" "fw_policy_a" { + name = "tf-test-policy-a-%{random_suffix}" + attachment_target = google_compute_network.network1.id + firewall_policy = google_compute_network_firewall_policy.fw_policy.id +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "apply_security_profile_group" + security_profile_group = "//networksecurity.googleapis.com/${google_network_security_security_profile_group.security_profile_group_updated.id}" + direction = "INGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + src_ip_ranges = ["11.100.0.1/32"] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_start(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "service_account1" { + account_id = "tf-test-sa-%{random_suffix}" +} + +resource "google_service_account" "service_account2" { + account_id = "tf-test-sa2-%{random_suffix}" +} + +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network2" { + name = "tf-test-2-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = [] + dest_region_codes = [] + dest_threat_intelligences = [] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "service_account1" { + account_id = "tf-test-sa-%{random_suffix}" +} + +resource "google_service_account" "service_account2" { + account_id = "tf-test-sa2-%{random_suffix}" +} + +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network2" { + name = "tf-test-2-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + target_service_accounts = [google_service_account.service_account1.email] + + match { + layer4_configs { + ip_protocol = "tcp" + ports = [8080] + } + layer4_configs { + ip_protocol = "udp" + ports = [22] + } + dest_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_removeConfigs(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "service_account1" { + account_id = "tf-test-sa-%{random_suffix}" +} + +resource "google_service_account" "service_account2" { + account_id = "tf-test-sa2-%{random_suffix}" +} + +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network2" { + name = "tf-test-2-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Test description" + priority = 9000 + enable_logging = false + action = "deny" + direction = "INGRESS" + disabled = true + target_service_accounts = [ + google_service_account.service_account1.email, + google_service_account.service_account2.email + ] + + match { + layer4_configs { + ip_protocol = "udp" + ports = [22] + } + src_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_multiple(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network1" { + name = "tf-test-%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_compute_network_firewall_policy_association" "fw_policy_a" { + name = "tf-test-policy-a-%{random_suffix}" + attachment_target = google_compute_network.network1.id + firewall_policy = google_compute_network_firewall_policy.fw_policy.id +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule2" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9001 + enable_logging = false + action = "deny" + direction = "INGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + layer4_configs { + ip_protocol = "all" + } + src_ip_ranges = ["11.100.0.1/32"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_multipleAdd(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Description Update" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + dest_address_groups = [google_network_security_address_group.address_group.id] + } +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule2" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9001 + enable_logging = false + action = "deny" + direction = "INGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + layer4_configs { + ip_protocol = "all" + } + src_ip_ranges = ["11.100.0.1/32"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [google_network_security_address_group.address_group.id] + } +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule3" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 40 + enable_logging = true + action = "allow" + direction = "INGRESS" + disabled = true + match { + layer4_configs { + ip_protocol = "udp" + ports = [8000] + } + src_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + src_address_groups = [google_network_security_address_group.address_group.id] + } +} +`, context) +} + +func testAccComputeNetworkFirewallPolicyRule_multipleRemove(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network_firewall_policy" "fw_policy" { + name = "tf-test-policy-%{random_suffix}" + description = "Resource created for Terraform acceptance testing" +} + +resource "google_network_security_address_group" "address_group" { + name = "tf-test-policy%{random_suffix}" + parent = "%{org_name}" + description = "Sample global networksecurity_address_group" + location = "global" + items = ["208.80.154.224/32"] + type = "IPV4" + capacity = 100 +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule1" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 9000 + enable_logging = true + action = "allow" + direction = "EGRESS" + disabled = false + match { + layer4_configs { + ip_protocol = "tcp" + ports = [80, 8080] + } + dest_ip_ranges = ["11.100.0.1/32"] + dest_fqdns = ["google.com"] + dest_region_codes = ["US"] + dest_threat_intelligences = ["iplist-known-malicious-ips"] + } +} + +resource "google_compute_network_firewall_policy_rule" "fw_policy_rule3" { + firewall_policy = google_compute_network_firewall_policy.fw_policy.id + description = "Resource created for Terraform acceptance testing" + priority = 40 + enable_logging = true + action = "allow" + direction = "INGRESS" + disabled = true + match { + layer4_configs { + ip_protocol = "udp" + ports = [8000] + } + src_ip_ranges = ["11.100.0.1/32", "10.0.0.0/24"] + src_fqdns = ["google.com"] + src_region_codes = ["US"] + src_threat_intelligences = ["iplist-known-malicious-ips"] + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_peering.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_peering.go.tmpl new file mode 100644 index 000000000000..5540d7f4fb4c --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_peering.go.tmpl @@ -0,0 +1,382 @@ +package compute + +import ( + "fmt" + "log" + "sort" + "strings" + "time" + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +const peerNetworkLinkRegex = "projects/(" + verify.ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" + +func ResourceComputeNetworkPeering() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeNetworkPeeringCreate, + Read: resourceComputeNetworkPeeringRead, + Update: resourceComputeNetworkPeeringUpdate, + Delete: resourceComputeNetworkPeeringDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeNetworkPeeringImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `Name of the peering.`, + }, + + "network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(peerNetworkLinkRegex), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The primary network of the peering.`, + }, + + "peer_network": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(peerNetworkLinkRegex), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The peer network in the peering. The peer network may belong to a different project.`, + }, + + "export_custom_routes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to export the custom routes to the peer network. Defaults to false.`, + }, + + "import_custom_routes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to export the custom routes from the peer network. Defaults to false.`, + }, + + "export_subnet_routes_with_public_ip": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + Default: true, + }, + + "import_subnet_routes_with_public_ip": { + Type: schema.TypeBool, + ForceNew: true, + Optional: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State for the peering, either ACTIVE or INACTIVE. The peering is ACTIVE when there's a matching configuration in the peer network.`, + }, + + "state_details": { + Type: schema.TypeString, + Computed: true, + Description: `Details about the current state of the peering.`, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4_ONLY", "IPV4_IPV6"}), + Description: `Which IP version(s) of traffic and routes are allowed to be imported or exported between peer networks. The default value is IPV4_ONLY. Possible values: ["IPV4_ONLY", "IPV4_IPV6"]`, + Default: "IPV4_ONLY", + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeNetworkPeeringCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksAddPeeringRequest{} + request.NetworkPeering = expandNetworkPeering(d) + + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + transport_tpg.MutexStore.Lock(kn) + defer transport_tpg.MutexStore.Unlock(kn) + } + + addOp, err := config.NewComputeClient(userAgent).Networks.AddPeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + return fmt.Errorf("Error adding network peering: %s", err) + } + + err = ComputeOperationWaitTime(config, addOp, networkFieldValue.Project, "Adding Network Peering", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", networkFieldValue.Name, d.Get("name").(string))) + + return resourceComputeNetworkPeeringRead(d, meta) +} + +func resourceComputeNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + peeringName := d.Get("name").(string) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + + network, err := config.NewComputeClient(userAgent).Networks.Get(networkFieldValue.Project, networkFieldValue.Name).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Network %q", networkFieldValue.Name)) + } + + peering := findPeeringFromNetwork(network, peeringName) + if peering == nil { + log.Printf("[WARN] Removing network peering %s from network %s because it's gone", peeringName, network.Name) + d.SetId("") + return nil + } + + if err := d.Set("peer_network", peering.Network); err != nil { + return fmt.Errorf("Error setting peer_network: %s", err) + } + if err := d.Set("name", peering.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("import_custom_routes", peering.ImportCustomRoutes); err != nil { + return fmt.Errorf("Error setting import_custom_routes: %s", err) + } + if err := d.Set("export_custom_routes", peering.ExportCustomRoutes); err != nil { + return fmt.Errorf("Error setting export_custom_routes: %s", err) + } + if err := d.Set("import_subnet_routes_with_public_ip", peering.ImportSubnetRoutesWithPublicIp); err != nil { + return fmt.Errorf("Error setting import_subnet_routes_with_public_ip: %s", err) + } + if err := d.Set("export_subnet_routes_with_public_ip", peering.ExportSubnetRoutesWithPublicIp); err != nil { + return fmt.Errorf("Error setting export_subnet_routes_with_public_ip: %s", err) + } + if err := d.Set("state", peering.State); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err := d.Set("state_details", peering.StateDetails); err != nil { + return fmt.Errorf("Error setting state_details: %s", err) + } + if err := d.Set("stack_type", flattenNetworkPeeringStackType(peering.StackType, d, config)); err != nil { + return fmt.Errorf("Error setting stack_type: %s", err) + } + + return nil +} + +func resourceComputeNetworkPeeringUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksUpdatePeeringRequest{} + request.NetworkPeering = expandNetworkPeering(d) + + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + transport_tpg.MutexStore.Lock(kn) + defer transport_tpg.MutexStore.Unlock(kn) + } + + updateOp, err := config.NewComputeClient(userAgent).Networks.UpdatePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + return fmt.Errorf("Error updating network peering: %s", err) + } + + err = ComputeOperationWaitTime(config, updateOp, networkFieldValue.Project, "Updating Network Peering", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return resourceComputeNetworkPeeringRead(d, meta) +} + +func resourceComputeNetworkPeeringDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Remove the `network` to `peer_network` peering + name := d.Get("name").(string) + networkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("network").(string), d, config) + if err != nil { + return err + } + peerNetworkFieldValue, err := tpgresource.ParseNetworkFieldValue(d.Get("peer_network").(string), d, config) + if err != nil { + return err + } + + request := &compute.NetworksRemovePeeringRequest{ + Name: name, + } + + // Only one peering operation at a time can be performed for a given network. + // Lock on both networks, sorted so we don't deadlock for A <--> B peering pairs. + peeringLockNames := sortedNetworkPeeringMutexKeys(networkFieldValue, peerNetworkFieldValue) + for _, kn := range peeringLockNames { + transport_tpg.MutexStore.Lock(kn) + defer transport_tpg.MutexStore.Unlock(kn) + } + + removeOp, err := config.NewComputeClient(userAgent).Networks.RemovePeering(networkFieldValue.Project, networkFieldValue.Name, request).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Peering `%s` already removed from network `%s`", name, networkFieldValue.Name) + } else { + return fmt.Errorf("Error removing peering `%s` from network `%s`: %s", name, networkFieldValue.Name, err) + } + } else { + err = ComputeOperationWaitTime(config, removeOp, networkFieldValue.Project, "Removing Network Peering", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + } + + return nil +} + +func findPeeringFromNetwork(network *compute.Network, peeringName string) *compute.NetworkPeering { + for _, p := range network.Peerings { + if p.Name == peeringName { + return p + } + } + return nil +} +func expandNetworkPeering(d *schema.ResourceData) *compute.NetworkPeering { + return &compute.NetworkPeering{ + ExchangeSubnetRoutes: true, + Name: d.Get("name").(string), + Network: d.Get("peer_network").(string), + ExportCustomRoutes: d.Get("export_custom_routes").(bool), + ImportCustomRoutes: d.Get("import_custom_routes").(bool), + ExportSubnetRoutesWithPublicIp: d.Get("export_subnet_routes_with_public_ip").(bool), + ImportSubnetRoutesWithPublicIp: d.Get("import_subnet_routes_with_public_ip").(bool), + StackType: d.Get("stack_type").(string), + ForceSendFields: []string{"ExportSubnetRoutesWithPublicIp", "ImportCustomRoutes", "ExportCustomRoutes"}, + } +} + +func flattenNetworkPeeringStackType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // To prevent the perma-diff caused by the absence of `stack_type` in API responses for older resource + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "IPV4_ONLY" + } + + return v +} + +func sortedNetworkPeeringMutexKeys(networkName, peerNetworkName *tpgresource.GlobalFieldValue) []string { + // Whether you delete the peering from network A to B or the one from B to A, they + // cannot happen at the same time. + networks := []string{ + fmt.Sprintf("%s/peerings", networkName.RelativeLink()), + fmt.Sprintf("%s/peerings", peerNetworkName.RelativeLink()), + } + sort.Strings(networks) + return networks +} + +func resourceComputeNetworkPeeringImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + splits := strings.Split(d.Id(), "/") + if len(splits) != 3 { + return nil, fmt.Errorf("Error parsing network peering import format, expected: {project}/{network}/{name}") + } + project := splits[0] + network := splits[1] + name := splits[2] + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + // Since the format of the network URL in the peering might be different depending on the ComputeBasePath, + // just read the network self link from the API. + net, err := config.NewComputeClient(userAgent).Networks.Get(project, network).Do() + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Network %q", splits[1])) + } + + if err := d.Set("network", tpgresource.ConvertSelfLinkToV1(net.SelfLink)); err != nil { + return nil, fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("name", name); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + + // Replace import id for the resource id + id := fmt.Sprintf("%s/%s", network, name) + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_network_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_test.go.tmpl new file mode 100644 index 000000000000..19fa69959f83 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_network_test.go.tmpl @@ -0,0 +1,465 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeNetwork_explicitAutoSubnet(t *testing.T) { + t.Parallel() + + var network compute.Network + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-basic-%s", suffixName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_basic(networkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.bar", &network), + testAccCheckComputeNetworkIsAutoSubnet( + t, "google_compute_network.bar", &network), + ), + }, + { + ResourceName: "google_compute_network.bar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeNetwork_customSubnet(t *testing.T) { + t.Parallel() + + var network compute.Network + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-custom-sn-%s", suffixName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_custom_subnet(networkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.baz", &network), + testAccCheckComputeNetworkIsCustomSubnet( + t, "google_compute_network.baz", &network), + ), + }, + { + ResourceName: "google_compute_network.baz", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeNetwork_routingModeAndUpdate(t *testing.T) { + t.Parallel() + + var network compute.Network + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-routing-mode-%s", suffixName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_routing_mode(networkName, "GLOBAL"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.acc_network_routing_mode", &network), + testAccCheckComputeNetworkHasRoutingMode( + t, "google_compute_network.acc_network_routing_mode", &network, "GLOBAL"), + ), + }, + // Test updating the routing field (only updatable field). + { + Config: testAccComputeNetwork_routing_mode(networkName, "REGIONAL"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.acc_network_routing_mode", &network), + testAccCheckComputeNetworkHasRoutingMode( + t, "google_compute_network.acc_network_routing_mode", &network, "REGIONAL"), + ), + }, + }, + }) +} + +func TestAccComputeNetwork_numericId(t *testing.T) { + t.Parallel() + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-basic-%s", suffixName) + projectId := envvar.GetTestProjectFromEnv() + networkId := fmt.Sprintf("projects/%v/global/networks/%v", projectId, networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_basic(networkName), + Check: resource.ComposeTestCheckFunc( + resource.TestMatchResourceAttr("google_compute_network.bar", "numeric_id",regexp.MustCompile("^\\d{1,}$")), + resource.TestCheckResourceAttr("google_compute_network.bar", "id", networkId), + ), + }, + { + ResourceName: "google_compute_network.bar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeNetwork_default_routing_mode(t *testing.T) { + t.Parallel() + + var network compute.Network + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-network-default-routes-%s", suffixName) + + expectedRoutingMode := "REGIONAL" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_basic(networkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.bar", &network), + testAccCheckComputeNetworkHasRoutingMode( + t, "google_compute_network.bar", &network, expectedRoutingMode), + ), + }, + }, + }) +} + +func TestAccComputeNetwork_networkDeleteDefaultRoute(t *testing.T) { + t.Parallel() + + var network compute.Network + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-network-default-routes-%s", suffixName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_deleteDefaultRoute(networkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.bar", &network), + testAccCheckComputeNetworkDefaultRoutesDeleted( + t, "google_compute_network.bar", &network), + ), + }, + }, + }) +} + +func TestAccComputeNetwork_networkFirewallPolicyEnforcementOrderAndUpdate(t *testing.T) { + t.Parallel() + + var network compute.Network + var updatedNetwork compute.Network + suffixName := acctest.RandString(t, 10) + networkName := fmt.Sprintf("tf-test-network-firewall-policy-enforcement-order-%s", suffixName) + + defaultNetworkFirewallPolicyEnforcementOrder := "AFTER_CLASSIC_FIREWALL" + explicitNetworkFirewallPolicyEnforcementOrder := "BEFORE_CLASSIC_FIREWALL" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNetwork_networkFirewallPolicyEnforcementOrderDefault(networkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.acc_network_firewall_policy_enforcement_order", &network), + testAccCheckComputeNetworkHasNetworkFirewallPolicyEnforcementOrder( + t, "google_compute_network.acc_network_firewall_policy_enforcement_order", &network, defaultNetworkFirewallPolicyEnforcementOrder), + ), + }, + { + ResourceName: "google_compute_network.acc_network_firewall_policy_enforcement_order", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + // Test updating the enforcement order works and updates in-place + { + Config: testAccComputeNetwork_networkFirewallPolicyEnforcementOrderUpdate(networkName, explicitNetworkFirewallPolicyEnforcementOrder), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNetworkExists( + t, "google_compute_network.acc_network_firewall_policy_enforcement_order", &updatedNetwork), + testAccCheckComputeNetworkHasNetworkFirewallPolicyEnforcementOrder( + t, "google_compute_network.acc_network_firewall_policy_enforcement_order", &updatedNetwork, explicitNetworkFirewallPolicyEnforcementOrder), + testAccCheckComputeNetworkWasUpdated(&updatedNetwork, &network), + ), + }, + { + ResourceName: "google_compute_network.acc_network_firewall_policy_enforcement_order", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func testAccCheckComputeNetworkExists(t *testing.T, n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).Networks.Get( + config.Project, rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("Network not found") + } + + *network = *found + + return nil + } +} + +func testAccCheckComputeNetworkDefaultRoutesDeleted(t *testing.T, n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + routes, err := config.NewComputeClient(config.UserAgent).Routes.List(config.Project).Filter(fmt.Sprintf("(network=\"%s\") AND (destRange=\"0.0.0.0/0\")", network.SelfLink)).Do() + if err != nil { + return err + } + + if len(routes.Items) > 0 { + return fmt.Errorf("Default routes were not deleted") + } + + return nil + } +} + +func testAccCheckComputeNetworkIsAutoSubnet(t *testing.T, n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + if !found.AutoCreateSubnetworks { + return fmt.Errorf("should have AutoCreateSubnetworks = true") + } + + if found.IPv4Range != "" { + return fmt.Errorf("should not have IPv4Range") + } + + return nil + } +} + +func testAccCheckComputeNetworkIsCustomSubnet(t *testing.T, n string, network *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + if found.AutoCreateSubnetworks { + return fmt.Errorf("should have AutoCreateSubnetworks = false") + } + + if found.IPv4Range != "" { + return fmt.Errorf("should not have IPv4Range") + } + + return nil + } +} + +func testAccCheckComputeNetworkHasRoutingMode(t *testing.T, n string, network *compute.Network, routingMode string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["routing_mode"] == "" { + return fmt.Errorf("Routing mode not found on resource") + } + + found, err := config.NewComputeClient(config.UserAgent).Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + foundRoutingMode := found.RoutingConfig.RoutingMode + + if routingMode != foundRoutingMode { + return fmt.Errorf("Expected routing mode %s to match actual routing mode %s", routingMode, foundRoutingMode) + } + + return nil + } +} + +func testAccCheckComputeNetworkHasNetworkFirewallPolicyEnforcementOrder(t *testing.T, n string, network *compute.Network, order string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["network_firewall_policy_enforcement_order"] == "" { + return fmt.Errorf("Network firewall policy enforcement order not found on resource") + } + + found, err := config.NewComputeClient(config.UserAgent).Networks.Get( + config.Project, network.Name).Do() + if err != nil { + return err + } + + foundNetworkFirewallPolicyEnforcementOrder := found.NetworkFirewallPolicyEnforcementOrder + + if order != foundNetworkFirewallPolicyEnforcementOrder { + return fmt.Errorf("Expected network firewall policy enforcement order %s to match %s", order, foundNetworkFirewallPolicyEnforcementOrder) + } + + return nil + } +} + +func testAccCheckComputeNetworkWasUpdated(newNetwork *compute.Network, oldNetwork *compute.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + if oldNetwork.CreationTimestamp != newNetwork.CreationTimestamp { + return fmt.Errorf("expected compute network to have been updated (had same creation time), instead was recreated - old creation time %s, new creation time %s", oldNetwork.CreationTimestamp, newNetwork.CreationTimestamp) + } + return nil + } +} + +func testAccComputeNetwork_basic(networkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "bar" { + name = "%s" + auto_create_subnetworks = true +} +`, networkName) +} + +func testAccComputeNetwork_custom_subnet(networkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "baz" { + name = "%s" + auto_create_subnetworks = false +} +`, networkName) +} + +func testAccComputeNetwork_routing_mode(networkName, routingMode string) string { + return fmt.Sprintf(` +resource "google_compute_network" "acc_network_routing_mode" { + name = "%s" + routing_mode = "%s" +} +`, networkName, routingMode) +} + +func testAccComputeNetwork_deleteDefaultRoute(networkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "bar" { + name = "%s" + delete_default_routes_on_create = true + auto_create_subnetworks = false +} +`, networkName) +} + +func testAccComputeNetwork_networkFirewallPolicyEnforcementOrderDefault(networkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "acc_network_firewall_policy_enforcement_order" { + name = "%s" +} +`, networkName) +} + +func testAccComputeNetwork_networkFirewallPolicyEnforcementOrderUpdate(networkName, order string) string { + return fmt.Sprintf(` +resource "google_compute_network" "acc_network_firewall_policy_enforcement_order" { + name = "%s" + network_firewall_policy_enforcement_order = "%s" +} +`, networkName, order) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_node_group_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_node_group_test.go new file mode 100644 index 000000000000..690d749f3eb3 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_node_group_test.go @@ -0,0 +1,184 @@ +package compute_test + +import ( + "fmt" + "testing" + + "strings" + "time" + + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeNodeGroup_update(t *testing.T) { + t.Parallel() + + groupName := fmt.Sprintf("group--%d", acctest.RandInt(t)) + tmplPrefix := fmt.Sprintf("tmpl--%d", acctest.RandInt(t)) + + var timeCreated time.Time + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNodeGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNodeGroup_update(groupName, tmplPrefix, "tmpl1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNodeGroupCreationTimeBefore(&timeCreated), + ), + }, + { + ResourceName: "google_compute_node_group.nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_size"}, + }, + { + Config: testAccComputeNodeGroup_update2(groupName, tmplPrefix, "tmpl2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeNodeGroupCreationTimeBefore(&timeCreated), + ), + }, + { + ResourceName: "google_compute_node_group.nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"initial_size"}, + }, + }, + }) +} + +func TestAccComputeNodeGroup_fail(t *testing.T) { + t.Parallel() + + groupName := fmt.Sprintf("group--%d", acctest.RandInt(t)) + tmplPrefix := fmt.Sprintf("tmpl--%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeNodeGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeNodeGroup_fail(groupName, tmplPrefix, "tmpl1"), + ExpectError: regexp.MustCompile("An initial_size or autoscaling_policy must be configured on node group creation."), + }, + }, + }) +} + +func testAccCheckComputeNodeGroupCreationTimeBefore(prevTimeCreated *time.Time) resource.TestCheckFunc { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_node_group" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + timestampRaw, ok := rs.Primary.Attributes["creation_timestamp"] + if !ok { + return fmt.Errorf("expected creation_timestamp to be set in node group's state") + } + creationTimestamp, err := time.Parse(time.RFC3339Nano, timestampRaw) + if err != nil { + return fmt.Errorf("unexpected error while parsing creation_timestamp: %v", err) + } + + if prevTimeCreated.IsZero() { + *prevTimeCreated = creationTimestamp + return nil + } + + if creationTimestamp.After(prevTimeCreated.Add(time.Millisecond * 100)) { + return fmt.Errorf( + "Creation timestamp %q was after expected previous time of creation %q", + timestampRaw, prevTimeCreated.Format(time.RFC3339Nano)) + } + } + + return nil + } +} + +func testAccComputeNodeGroup_update(groupName, tmplPrefix, tmplToUse string) string { + return fmt.Sprintf(` +resource "google_compute_node_template" "tmpl1" { + name = "%s-first" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_template" "tmpl2" { + name = "%s-second" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + description = "example google_compute_node_group for Terraform Google Provider" + + initial_size = 1 + node_template = google_compute_node_template.%s.self_link +} + +`, tmplPrefix, tmplPrefix, groupName, tmplToUse) +} + +func testAccComputeNodeGroup_update2(groupName, tmplPrefix, tmplToUse string) string { + return fmt.Sprintf(` +resource "google_compute_node_template" "tmpl1" { + name = "%s-first" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_template" "tmpl2" { + name = "%s-second" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + description = "example google_compute_node_group for Terraform Google Provider" + + autoscaling_policy { + mode = "ONLY_SCALE_OUT" + min_nodes = 1 + max_nodes = 10 + } + node_template = google_compute_node_template.%s.self_link +} + +`, tmplPrefix, tmplPrefix, groupName, tmplToUse) +} + +func testAccComputeNodeGroup_fail(groupName, tmplPrefix, tmplToUse string) string { + return fmt.Sprintf(` +resource "google_compute_node_template" "tmpl1" { + name = "%s-first" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_group" "nodes" { + name = "%s" + zone = "us-central1-a" + description = "example google_compute_node_group for Terraform Google Provider" + + node_template = google_compute_node_template.%s.self_link +} + +`, tmplPrefix, groupName, tmplToUse) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_rule_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_rule_test.go.tmpl new file mode 100644 index 000000000000..f94f48d7fb6e --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_rule_test.go.tmpl @@ -0,0 +1,111 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeOrganizationSecurityPolicyRule_organizationSecurityPolicyRuleUpdateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeOrganizationSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeOrganizationSecurityPolicyRule_organizationSecurityPolicyRulePreUpdateExample(context), + }, + { + ResourceName: "google_compute_organization_security_policy_rule.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeOrganizationSecurityPolicyRule_organizationSecurityPolicyRulePostUpdateExample(context), + }, + { + ResourceName: "google_compute_organization_security_policy_rule.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeOrganizationSecurityPolicyRule_organizationSecurityPolicyRulePreUpdateExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "security_policy_target" { + display_name = "tf-test-secpol-%{random_suffix}" + parent = "organizations/%{org_id}" +} + +resource "google_compute_organization_security_policy" "policy" { + display_name = "tf-test%{random_suffix}" + parent = google_folder.security_policy_target.name +} +resource "google_compute_organization_security_policy_rule" "policy" { + policy_id = google_compute_organization_security_policy.policy.id + action = "allow" + + direction = "INGRESS" + enable_logging = true + match { + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + layer4_config { + ip_protocol = "tcp" + ports = ["22"] + } + layer4_config { + ip_protocol = "icmp" + } + } + } + priority = 100 +} +`, context) +} + +func testAccComputeOrganizationSecurityPolicyRule_organizationSecurityPolicyRulePostUpdateExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_folder" "security_policy_target" { + display_name = "tf-test-secpol-%{random_suffix}" + parent = "organizations/%{org_id}" +} + +resource "google_compute_organization_security_policy" "policy" { + display_name = "tf-test%{random_suffix}" + parent = google_folder.security_policy_target.name +} + +resource "google_compute_organization_security_policy_rule" "policy" { + policy_id = google_compute_organization_security_policy.policy.id + action = "deny" + + direction = "INGRESS" + enable_logging = false + description = "Updated description" + match { + config { + src_ip_ranges = ["172.16.0.0/12"] + layer4_config { + ip_protocol = "udp" + ports = ["53"] + } + } + } + priority = 100 +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_test.go.tmpl new file mode 100644 index 000000000000..819603c7b2f6 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_organization_security_policy_test.go.tmpl @@ -0,0 +1,63 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeOrganizationSecurityPolicy_organizationSecurityPolicyUpdateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeOrganizationSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeOrganizationSecurityPolicy_organizationSecurityPolicyPreUpdateExample(context), + }, + { + ResourceName: "google_compute_organization_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeOrganizationSecurityPolicy_organizationSecurityPolicyPostUpdateExample(context), + }, + { + ResourceName: "google_compute_organization_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeOrganizationSecurityPolicy_organizationSecurityPolicyPreUpdateExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_organization_security_policy" "policy" { + display_name = "tf-test%{random_suffix}" + parent = "organizations/%{org_id}" +} +`, context) +} + +func testAccComputeOrganizationSecurityPolicy_organizationSecurityPolicyPostUpdateExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_organization_security_policy" "policy" { + display_name = "tf-test%{random_suffix}" + parent = "organizations/%{org_id}" + description = "Updated description." +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_per_instance_config_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_per_instance_config_test.go new file mode 100644 index 000000000000..f4a8a997e3a7 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_per_instance_config_test.go @@ -0,0 +1,738 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputePerInstanceConfig_statefulBasic(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + suffix := acctest.RandString(t, 10) + igmName := fmt.Sprintf("tf-test-igm-%s", suffix) + context := map[string]interface{}{ + "igm_name": igmName, + "random_suffix": suffix, + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name2": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name3": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name4": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + } + igmId := fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), igmName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one endpoint + Config: testAccComputePerInstanceConfig_statefulBasic(context), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + { + // Force-recreate old config + Config: testAccComputePerInstanceConfig_statefulModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name"].(string)), + ), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + { + // Add two new endpoints + Config: testAccComputePerInstanceConfig_statefulAdditional(context), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + { + ResourceName: "google_compute_per_instance_config.with_disks", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"most_disruptive_allowed_action", "minimal_action", "remove_instance_state_on_destroy"}, + }, + { + ResourceName: "google_compute_per_instance_config.add2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + { + // delete all configs + Config: testAccComputePerInstanceConfig_igm(context), + Check: resource.ComposeTestCheckFunc( + // Config with remove_instance_state_on_destroy = false won't be destroyed (config4) + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name2"].(string)), + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name3"].(string)), + ), + }, + }, + }) +} + +func TestAccComputePerInstanceConfig_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "igm_name": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one config + Config: testAccComputePerInstanceConfig_statefulBasic(context), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + { + // Update an existing config + Config: testAccComputePerInstanceConfig_update(context), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + }, + }) +} + +func TestAccComputePerInstanceConfig_statefulIps(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "igm_name": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "network": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "subnetwork": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "address1": fmt.Sprintf("tf-test-igm-address%s", acctest.RandString(t, 10)), + "address2": fmt.Sprintf("tf-test-igm-address%s", acctest.RandString(t, 10)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one config + Config: testAccComputePerInstanceConfig_statefulIpsBasic(context), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + { + // Update an existing config + Config: testAccComputePerInstanceConfig_statefulIpsUpdate(context), + }, + { + ResourceName: "google_compute_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "zone"}, + }, + }, + }) +} + +func TestAccComputePerInstanceConfig_removeInstanceOnDestroy(t *testing.T) { + t.Parallel() + + igmName := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "igm_name": igmName, + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name2": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "network": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "subnetwork": fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)), + "address1": fmt.Sprintf("tf-test-igm-address%s", acctest.RandString(t, 10)), + "address2": fmt.Sprintf("tf-test-igm-address%s", acctest.RandString(t, 10)), + } + igmId := fmt.Sprintf("projects/%s/zones/%s/instanceGroupManagers/%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestZoneFromEnv(), igmName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputePerInstanceConfig_removeInstanceOnDestroyBefore(context), + }, + { + ResourceName: "google_compute_per_instance_config.config_one", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + ResourceName: "google_compute_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + Config: testAccComputePerInstanceConfig_removeInstanceOnDestroyAfter(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name"].(string)), + testAccCheckComputePerInstanceConfigInstanceDestroyed(t, igmId, context["config_name"].(string)), + ), + }, + { + ResourceName: "google_compute_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + // delete all configs + Config: testAccComputePerInstanceConfig_igm(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputePerInstanceConfigDestroyed(t, igmId, context["config_name2"].(string)), + testAccCheckComputePerInstanceConfigInstanceDestroyed(t, igmId, context["config_name2"].(string)), + ), + }, + }, + }) +} + +func testAccComputePerInstanceConfig_statefulBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_per_instance_config" "default" { + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_per_instance_config" "default" { + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + update = "12345" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_statefulModified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_per_instance_config" "default" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name2}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_statefulAdditional(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_per_instance_config" "default" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name2}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + } +} + +resource "google_compute_per_instance_config" "with_disks" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name3}" + most_disruptive_allowed_action = "REFRESH" + minimal_action = "REFRESH" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + meta = "123" + } + + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + + disk { + device_name = "my-stateful-disk3" + source = google_compute_disk.disk2.id + } + } +} + +resource "google_compute_per_instance_config" "add2" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name4}" + preserved_state { + metadata = { + foo = "abc" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk2" { + name = "test-disk3-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20210217" + physical_block_size_bytes = 4096 +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_igm(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "tf-test-igm-%{random_suffix}" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_group_manager" "igm" { + description = "Terraform test instance group manager" + name = "%{igm_name}" + + version { + name = "prod" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-no-tp" +} +`, context) +} + +func testAccComputePerInstanceConfig_removeInstanceOnDestroyBefore(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_per_instance_config" "config_one" { + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-one" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} + +resource "google_compute_per_instance_config" "config_two" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_removeInstanceOnDestroyAfter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_per_instance_config" "config_two" { + zone = google_compute_instance_group_manager.igm.zone + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_statefulIpsBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_per_instance_config" "default" { + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +func testAccComputePerInstanceConfig_statefulIpsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_per_instance_config" "default" { + instance_group_manager = google_compute_instance_group_manager.igm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "ON_PERMANENT_INSTANCE_DELETION" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "ON_PERMANENT_INSTANCE_DELETION" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = google_compute_instance_group_manager.igm.zone + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} +`, context) + testAccComputePerInstanceConfig_igm(context) +} + +// Checks that the per instance config with the given name was destroyed +func testAccCheckComputePerInstanceConfigDestroyed(t *testing.T, igmId, configName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundNames, err := testAccComputePerInstanceConfigListNames(t, igmId) + if err != nil { + return fmt.Errorf("unable to confirm config with name %s was destroyed: %v", configName, err) + } + if _, ok := foundNames[configName]; ok { + return fmt.Errorf("config with name %s still exists", configName) + } + + return nil + } +} + +// Checks that the instance with the given name was destroyed. +func testAccCheckComputePerInstanceConfigInstanceDestroyed(t *testing.T, igmId, configName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundNames, err := testAccComputePerInstanceConfigListInstances(t, igmId) + if err != nil { + return fmt.Errorf("unable to confirm instance with name %s was destroyed: %v", configName, err) + } + if _, ok := foundNames[configName]; ok { + return fmt.Errorf("instance with name %s still exists", configName) + } + + return nil + } +} + +func testAccComputePerInstanceConfigListInstances(t *testing.T, igmId string) (map[string]struct{}, error) { + config := acctest.GoogleProviderConfig(t) + + url := fmt.Sprintf("%s%s/listManagedInstances", config.ComputeBasePath, igmId) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return nil, err + } + + v, ok := res["managedInstances"] + if !ok || v == nil { + return nil, nil + } + items := v.([]interface{}) + instances := make(map[string]struct{}) + for _, item := range items { + instance := item.(map[string]interface{}) + instances[fmt.Sprintf("%v", instance["name"])] = struct{}{} + } + return instances, nil +} + +func testAccComputePerInstanceConfigListNames(t *testing.T, igmId string) (map[string]struct{}, error) { + config := acctest.GoogleProviderConfig(t) + + url := fmt.Sprintf("%s%s/listPerInstanceConfigs", config.ComputeBasePath, igmId) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return nil, err + } + + v, ok := res["items"] + if !ok || v == nil { + return nil, nil + } + items := v.([]interface{}) + instanceConfigs := make(map[string]struct{}) + for _, item := range items { + perInstanceConfig := item.(map[string]interface{}) + instanceConfigs[fmt.Sprintf("%v", perInstanceConfig["name"])] = struct{}{} + } + return instanceConfigs, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_project_default_network_tier.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_project_default_network_tier.go.tmpl new file mode 100644 index 000000000000..8c8b92d23236 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_project_default_network_tier.go.tmpl @@ -0,0 +1,125 @@ +package compute + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeProjectDefaultNetworkTier() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectDefaultNetworkTierCreateOrUpdate, + Read: resourceComputeProjectDefaultNetworkTierRead, + Update: resourceComputeProjectDefaultNetworkTierCreateOrUpdate, + Delete: resourceComputeProjectDefaultNetworkTierDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + SchemaVersion: 0, + + Schema: map[string]*schema.Schema{ + "network_tier": { + Type: schema.TypeString, + Required: true, + Description: `The default network tier to be configured for the project. This field can take the following values: PREMIUM or STANDARD.`, + ValidateFunc: validation.StringInSlice([]string{"PREMIUM", "STANDARD"}, false), + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeProjectDefaultNetworkTierCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + request := &compute.ProjectsSetDefaultNetworkTierRequest{ + NetworkTier: d.Get("network_tier").(string), + } + op, err := config.NewComputeClient(userAgent).Projects.SetDefaultNetworkTier(projectID, request).Do() + if err != nil { + return fmt.Errorf("SetDefaultNetworkTier failed: %s", err) + } + + log.Printf("[DEBUG] SetDefaultNetworkTier: %d (%s)", op.Id, op.SelfLink) + err = ComputeOperationWaitTime(config, op, projectID, "SetDefaultNetworkTier", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("SetDefaultNetworkTier failed: %s", err) + } + + d.SetId(projectID) + + return resourceComputeProjectDefaultNetworkTierRead(d, meta) +} + +func resourceComputeProjectDefaultNetworkTierRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectId := d.Id() + + project, err := config.NewComputeClient(userAgent).Projects.Get(projectId).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project data for project %q", projectId)) + } + + err = d.Set("network_tier", project.DefaultNetworkTier) + if err != nil { + return fmt.Errorf("Error setting default network tier: %s", err) + } + + if err := d.Set("project", projectId); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return nil +} + +func resourceComputeProjectDefaultNetworkTierDelete(d *schema.ResourceData, meta interface{}) error { + + log.Printf("[WARNING] Default Network Tier will be only removed from Terraform state, but will be left intact on GCP.") + + return schema.RemoveFromState(d, meta) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata.go.tmpl new file mode 100644 index 000000000000..82f4dabed42b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata.go.tmpl @@ -0,0 +1,164 @@ +package compute + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeProjectMetadata() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectMetadataCreateOrUpdate, + Read: resourceComputeProjectMetadataRead, + Update: resourceComputeProjectMetadataCreateOrUpdate, + Delete: resourceComputeProjectMetadataDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + SchemaVersion: 0, + + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A series of key value pairs.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeProjectMetadataCreateOrUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + md := &compute.Metadata{ + Items: expandComputeMetadata(d.Get("metadata").(map[string]interface{})), + } + + err = resourceComputeProjectMetadataSet(projectID, userAgent, config, md, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + d.SetId(projectID) + + return resourceComputeProjectMetadataRead(d, meta) +} + +func resourceComputeProjectMetadataRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // At import time, we have no state to draw from. We'll wrongly pull the + // provider default project if we use a normal GetProject, so we need to + // rely on the `id` field being set to the project. + // At any other time we can use GetProject, as state will have the correct + // value; the project pulled from config / the provider / at import time. + // + // Note that if a user imports a project other than their provider project + // and has left the project field unspecified, Terraform will not see a diff + // but would create metadata for the provider project on a destroy/create. + projectId := d.Id() + + project, err := config.NewComputeClient(userAgent).Projects.Get(projectId).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project metadata for project %q", projectId)) + } + + err = d.Set("metadata", FlattenMetadata(project.CommonInstanceMetadata)) + if err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + + if err := d.Set("project", projectId); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return nil +} + +func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + md := &compute.Metadata{} + err = resourceComputeProjectMetadataSet(projectID, userAgent, config, md, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + return resourceComputeProjectMetadataRead(d, meta) +} + +func resourceComputeProjectMetadataSet(projectID, userAgent string, config *transport_tpg.Config, md *compute.Metadata, timeout time.Duration) error { + createMD := func() error { + log.Printf("[DEBUG] Loading project service: %s", projectID) + project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md.Fingerprint = project.CommonInstanceMetadata.Fingerprint + op, err := config.NewComputeClient(userAgent).Projects.SetCommonInstanceMetadata(projectID, md).Do() + if err != nil { + return fmt.Errorf("SetCommonInstanceMetadata failed: %s", err) + } + + log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) + return ComputeOperationWaitTime(config, op, project.Name, "SetCommonMetadata", userAgent, timeout) + } + + err := transport_tpg.MetadataRetryWrapper(createMD) + return err +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata_item.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata_item.go.tmpl new file mode 100644 index 000000000000..68f502cd4e9c --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_project_metadata_item.go.tmpl @@ -0,0 +1,240 @@ +package compute + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +type metadataPresentBehavior bool + +const ( + failIfPresent metadataPresentBehavior = true + overwritePresent metadataPresentBehavior = false +) + +func ResourceComputeProjectMetadataItem() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeProjectMetadataItemCreate, + Read: resourceComputeProjectMetadataItemRead, + Update: resourceComputeProjectMetadataItemUpdate, + Delete: resourceComputeProjectMetadataItemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The metadata key to set.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: `The value to set for the given metadata key.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(7 * time.Minute), + Update: schema.DefaultTimeout(7 * time.Minute), + Delete: schema.DefaultTimeout(7 * time.Minute), + }, + UseJSONNumber: true, + } +} + +func resourceComputeProjectMetadataItemCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + key := d.Get("key").(string) + val := d.Get("value").(string) + + err = updateComputeCommonInstanceMetadata(config, projectID, key, userAgent, &val, d.Timeout(schema.TimeoutCreate), failIfPresent) + if err != nil { + return err + } + + d.SetId(key) + + return nil +} + +func resourceComputeProjectMetadataItemRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + log.Printf("[DEBUG] Loading project metadata: %s", projectID) + project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := FlattenMetadata(project.CommonInstanceMetadata) + val, ok := md[d.Id()] + if !ok { + // Resource no longer exists + d.SetId("") + return nil + } + + if err := d.Set("project", projectID); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("key", d.Id()); err != nil { + return fmt.Errorf("Error setting key: %s", err) + } + if err := d.Set("value", val); err != nil { + return fmt.Errorf("Error setting value: %s", err) + } + + return nil +} + +func resourceComputeProjectMetadataItemUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + if d.HasChange("value") { + key := d.Get("key").(string) + _, n := d.GetChange("value") + new := n.(string) + + err = updateComputeCommonInstanceMetadata(config, projectID, key, userAgent, &new, d.Timeout(schema.TimeoutUpdate), overwritePresent) + if err != nil { + return err + } + } + return nil +} + +func resourceComputeProjectMetadataItemDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + projectID, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + key := d.Get("key").(string) + + err = updateComputeCommonInstanceMetadata(config, projectID, key, userAgent, nil, d.Timeout(schema.TimeoutDelete), overwritePresent) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func updateComputeCommonInstanceMetadata(config *transport_tpg.Config, projectID, key, userAgent string, afterVal *string, timeout time.Duration, failIfPresent metadataPresentBehavior) error { + updateMD := func() error { + lockName := fmt.Sprintf("projects/%s/commoninstancemetadata", projectID) + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + log.Printf("[DEBUG] Loading project metadata: %s", projectID) + project, err := config.NewComputeClient(userAgent).Projects.Get(projectID).Do() + if err != nil { + return fmt.Errorf("Error loading project '%s': %s", projectID, err) + } + + md := FlattenMetadata(project.CommonInstanceMetadata) + + val, ok := md[key] + + if !ok { + if afterVal == nil { + // Asked to set no value and we didn't find one - we're done + return nil + } + } else { + if failIfPresent { + return fmt.Errorf("key %q already present in metadata for project %q. Use `terraform import` to manage it with Terraform", key, projectID) + } + if afterVal != nil && *afterVal == val { + // Asked to set a value and it's already set - we're done. + return nil + } + } + + if afterVal == nil { + delete(md, key) + } else { + md[key] = *afterVal + } + + // Attempt to write the new value now + op, err := config.NewComputeClient(userAgent).Projects.SetCommonInstanceMetadata( + projectID, + &compute.Metadata{ + Fingerprint: project.CommonInstanceMetadata.Fingerprint, + Items: expandComputeMetadata(md), + }, + ).Do() + + if err != nil { + return err + } + + log.Printf("[DEBUG] SetCommonInstanceMetadata: %d (%s)", op.Id, op.SelfLink) + + return ComputeOperationWaitTime(config, op, project.Name, "SetCommonInstanceMetadata", userAgent, timeout) + } + + return transport_tpg.MetadataRetryWrapper(updateMD) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_autoscaler_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_autoscaler_test.go.tmpl new file mode 100644 index 000000000000..0054483dd2a7 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_autoscaler_test.go.tmpl @@ -0,0 +1,298 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeRegionAutoscaler_update(t *testing.T) { + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-region-autoscaler-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionAutoscaler_basic(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_region_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionAutoscaler_update(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_region_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionAutoscaler_scaleDownControl(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-region-autoscaler-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionAutoscaler_scaleDownControl(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_region_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionAutoscaler_scalingSchedule(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-region-autoscaler-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionAutoscaler_scalingSchedule(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_region_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionAutoscaler_scaleInControl(t *testing.T) { + t.Parallel() + + var itName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var tpName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var igmName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var autoscalerName = fmt.Sprintf("tf-test-region-autoscaler-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionAutoscalerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionAutoscaler_scaleInControl(itName, tpName, igmName, autoscalerName), + }, + { + ResourceName: "google_compute_region_autoscaler.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_region_instance_group_manager" "foobar" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + target_pools = [google_compute_target_pool.foobar.self_link] + base_instance_name = "tf-test-foobar" + region = "us-central1" +} + +`, itName, tpName, igmName) +} + +func testAccComputeRegionAutoscaler_basic(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_region_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + region = "us-central1" + target = google_compute_region_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 5 + min_replicas = 0 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + } +} +`, autoscalerName) +} + +func testAccComputeRegionAutoscaler_update(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_region_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + region = "us-central1" + target = google_compute_region_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + } +} +`, autoscalerName) +} + +func testAccComputeRegionAutoscaler_scaleDownControl(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_region_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + region = "us-central1" + target = google_compute_region_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + predictive_method = "OPTIMIZE_AVAILABILITY" + } +{{- if ne $.TargetVersionName "ga" }} + scale_down_control { + max_scaled_down_replicas { + percent = 80 + } + time_window_sec = 300 + } +{{- end }} + } +} +`, autoscalerName) +} + +func testAccComputeRegionAutoscaler_scaleInControl(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_region_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + region = "us-central1" + target = google_compute_region_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } + scale_in_control { + max_scaled_in_replicas { + percent = 80 + } + time_window_sec = 300 + } + } +} +`, autoscalerName) +} + +func testAccComputeRegionAutoscaler_scalingSchedule(itName, tpName, igmName, autoscalerName string) string { + return testAccComputeRegionAutoscaler_scaffolding(itName, tpName, igmName) + fmt.Sprintf(` +resource "google_compute_region_autoscaler" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + region = "us-central1" + target = google_compute_region_instance_group_manager.foobar.self_link + autoscaling_policy { + max_replicas = 10 + min_replicas = 1 + cooldown_period = 60 + cpu_utilization { + target = 0.5 + } +{{- if ne $.TargetVersionName "ga" }} + scale_down_control { + max_scaled_down_replicas { + percent = 80 + } + time_window_sec = 300 + } +{{- end }} + scaling_schedules { + name = "every-weekday-morning" + description = "Increase to 2 every weekday at 7AM for 6 hours." + min_required_replicas = 0 + schedule = "0 7 * * MON-FRI" + time_zone = "America/New_York" + duration_sec = 21600 + } + scaling_schedules { + name = "every-weekday-afternoon" + description = "Increase to 2 every weekday at 7PM for 6 hours." + min_required_replicas = 2 + schedule = "0 19 * * MON-FRI" + time_zone = "America/New_York" + duration_sec = 21600 + } + } +} +`, autoscalerName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl new file mode 100644 index 000000000000..10ed4ec0052d --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_backend_service_test.go.tmpl @@ -0,0 +1,1149 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeRegionBackendService_basic(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + extraCheckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_basic(serviceName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_basicModified( + serviceName, checkName, extraCheckName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_ilbBasic_withUnspecifiedProtocol(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_ilbBasic_withUnspecifiedProtocol(serviceName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_withBackendInternal(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_withInvalidInternalBackend( + serviceName, igName, itName, checkName), + ExpectError: regexp.MustCompile(`capacity_scaler" cannot be set for non-managed backend service`), + }, + { + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_region_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName, 20), + }, + { + ResourceName: "google_compute_region_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_withBackendInternalManaged(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igmName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + hcName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_internalManagedMultipleBackends(serviceName, igmName, hcName), + }, + { + ResourceName: "google_compute_region_backend_service.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_withBackendMultiNic(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + net1Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + net2Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_withBackendMultiNic( + serviceName, net1Name, net2Name, igName, itName, checkName, 10), + }, + { + ResourceName: "google_compute_region_backend_service.lipsum", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_withConnectionDrainingAndUpdate(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_withConnectionDraining(serviceName, checkName, 10), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_basic(serviceName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_ilbUpdateBasic(t *testing.T) { + t.Parallel() + + backendName := fmt.Sprintf("foo-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("bar-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_ilbBasic(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_ilbUpdateBasic(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionBackendService_ilbUpdateFull(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + backendName := fmt.Sprintf("foo-%s", randString) + checkName := fmt.Sprintf("bar-%s", randString) + igName := fmt.Sprintf("tf-test-%s", randString) + instanceName := fmt.Sprintf("tf-test-%s", randString) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_ilbFull(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_ilbUpdateFull(backendName, igName, instanceName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeRegionBackendService_withBackendAndIAP(t *testing.T) { + backendName := fmt.Sprintf("foo-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("bar-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_ilbBasicwithIAP(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"iap.0.oauth2_client_secret"}, + }, + { + Config: testAccComputeRegionBackendService_ilbBasic(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionBackendService_UDPFailOverPolicyUpdate(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_UDPFailOverPolicyHasDrain(serviceName, "TCP", "true", checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_UDPFailOverPolicyHasDrain(serviceName, "TCP", "false", checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_UDPFailOverPolicy(serviceName, "UDP", "false", checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionBackendService_subsettingUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + backendName := fmt.Sprintf("foo-%s", randString) + checkName := fmt.Sprintf("bar-%s", randString) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_ilbWithSubsetting(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_ilbNoSubsetting(backendName, checkName), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccComputeRegionBackendService_ilbBasic_withUnspecifiedProtocol(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + protocol = "UNSPECIFIED" + load_balancing_scheme = "INTERNAL" + region = "us-central1" +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_ilbBasic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + port_name = "http" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + locality_lb_policy = "RING_HASH" + circuit_breakers { + max_connections = 10 + } + consistent_hash { + http_cookie { + ttl { + seconds = 11 + nanos = 1234 + } + name = "mycookie" + } + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_ilbUpdateBasic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + port_name = "https" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + locality_lb_policy = "RANDOM" + circuit_breakers { + max_connections = 10 + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionBackendService_ilbFull(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + port_name = "http" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + locality_lb_policy = "MAGLEV" + circuit_breakers { + max_connections = 10 + } + consistent_hash { + http_cookie { + ttl { + seconds = 11 + nanos = 1234 + } + name = "mycookie" + } + } + outlier_detection { + consecutive_errors = 2 + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionBackendService_ilbUpdateFull(serviceName, igName, instanceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + port_name = "https" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + locality_lb_policy = "MAGLEV" + backend { + balancing_mode = "UTILIZATION" + capacity_scaler = 0.5 + description = "The backend" + group = google_compute_instance_group.group.self_link + max_rate = 6 + max_utilization = 0.5 + } + circuit_breakers { + connect_timeout { + seconds = 3 + nanos = 4 + } + max_connections = 11 + max_requests_per_connection = 12 + max_pending_requests = 13 + max_requests = 14 + max_retries = 15 + } + consistent_hash { + http_cookie { + ttl { + seconds = 12 + } + name = "mycookie2" + path = "mycookie2/path" + } + minimum_ring_size = 16 + } + log_config { + enable = true + sample_rate = 0.5 + } + outlier_detection { + base_ejection_time { + seconds = 0 + nanos = 5 + } + consecutive_errors = 1 + consecutive_gateway_failure = 3 + enforcing_consecutive_errors = 4 + enforcing_consecutive_gateway_failure = 5 + enforcing_success_rate = 6 + interval { + seconds = 7 + } + max_ejection_percent = 99 + success_rate_minimum_hosts = 98 + success_rate_request_volume = 97 + success_rate_stdev_factor = 1800 + } +} + +resource "google_compute_instance_group" "group" { + name = "%s" + instances = [google_compute_instance.ig_instance.self_link] + + named_port { + name = "http" + port = "8080" + } + + named_port { + name = "https" + port = "8443" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "ig_instance" { + name = "%s" + machine_type = "e2-medium" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, igName, instanceName, checkName) +} +{{- end }} + +func testAccComputeRegionBackendService_UDPFailOverPolicy(serviceName, protocol, failover, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.zero.self_link] + region = "us-central1" + + protocol = "%s" + failover_policy { + # Disable connection drain on failover cannot be set when the protocol is UDP + drop_traffic_if_unhealthy = "%s" + } +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, protocol, failover, checkName) +} + +func testAccComputeRegionBackendService_UDPFailOverPolicyHasDrain(serviceName, protocol, failover, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.zero.self_link] + region = "us-central1" + + protocol = "%s" + failover_policy { + # Disable connection drain on failover cannot be set when the protocol is UDP + drop_traffic_if_unhealthy = "%s" + disable_connection_drain_on_failover = "%s" + } +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, protocol, failover, failover, checkName) +} + + +func testAccComputeRegionBackendService_basic(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.zero.self_link] + region = "us-central1" +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_basicModified(serviceName, checkOne, checkTwo string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.one.self_link] + region = "us-central1" +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = 443 + } +} + +resource "google_compute_health_check" "one" { + name = "%s" + check_interval_sec = 30 + timeout_sec = 30 + + tcp_health_check { + port = 443 + } +} +`, serviceName, checkOne, checkTwo) +} + +func testAccComputeRegionBackendService_withBackend( + serviceName, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + protocol = "TCP" + region = "us-central1" + timeout_sec = %v + + backend { + group = google_compute_instance_group_manager.foobar.instance_group +{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} + failover = true + } + + failover_policy { + disable_connection_drain_on_failover = true + drop_traffic_if_unhealthy = true + failover_ratio = 0.4 +{{- end }} + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = 443 + } +} +`, serviceName, timeout, igName, itName, checkName) +} + +func testAccComputeRegionBackendService_withBackendMultiNic( + serviceName, net1Name, net2Name, igName, itName, checkName string, timeout int64) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + protocol = "TCP" + region = "us-central1" + timeout_sec = %v + + backend { + group = google_compute_instance_group_manager.foobar.instance_group +{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} + failover = true + } + + failover_policy { + disable_connection_drain_on_failover = true + drop_traffic_if_unhealthy = true + failover_ratio = 0.4 +{{- end }} + } + + network = google_compute_network.network2.self_link + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_network" "network1" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet1" { + name = "%s" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + private_ip_google_access = true + network = google_compute_network.network1.self_link +} + +resource "google_compute_network" "network2" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet2" { + name = "%s" + ip_cidr_range = "10.0.2.0/24" + region = "us-central1" + private_ip_google_access = true + network = google_compute_network.network2.self_link +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + subnetwork = google_compute_subnetwork.subnet1.self_link + } + + network_interface { + subnetwork = google_compute_subnetwork.subnet2.self_link + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = 443 + } +} +`, serviceName, timeout, net1Name, net1Name, net2Name, net2Name, igName, itName, checkName) +} + +func testAccComputeRegionBackendService_withInvalidInternalBackend( + serviceName, igName, itName, checkName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_backend_service" "lipsum" { + name = "%s" + description = "Hello World 1234" + port_name = "http" + protocol = "TCP" + region = "us-central1" + + backend { + group = google_compute_instance_group_manager.foobar.instance_group + capacity_scaler = 1.0 + } + + health_checks = [google_compute_health_check.default.self_link] +} + +resource "google_compute_instance_group_manager" "foobar" { + name = "%s" + version { + instance_template = google_compute_instance_template.foobar.self_link + name = "primary" + } + base_instance_name = "tf-test-foobar" + zone = "us-central1-f" + target_size = 1 +} + +resource "google_compute_instance_template" "foobar" { + name = "%s" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_health_check" "default" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = 443 + } +} +`, serviceName, igName, itName, checkName) +} + +func testAccComputeRegionBackendService_internalManagedMultipleBackends(serviceName, igmName, hcName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "default" { + name = "%s" + load_balancing_scheme = "INTERNAL_MANAGED" + + backend { + group = google_compute_region_instance_group_manager.rigm1.instance_group + balancing_mode = "UTILIZATION" + } + + backend { + group = google_compute_region_instance_group_manager.rigm2.instance_group + balancing_mode = "UTILIZATION" + capacity_scaler = 1.0 + } + + region = "us-central1" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] +} + +data "google_compute_image" "debian_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_group_manager" "rigm1" { + name = "%s-1" + region = "us-central1" + version { + instance_template = google_compute_instance_template.instance_template.self_link + name = "primary" + } + base_instance_name = "tf-test-internal-glb" + target_size = 1 +} + +resource "google_compute_region_instance_group_manager" "rigm2" { + name = "%s-2" + region = "us-central1" + version { + instance_template = google_compute_instance_template.instance_template.self_link + name = "primary" + } + base_instance_name = "tf-test-internal-glb" + target_size = 1 +} + +resource "google_compute_instance_template" "instance_template" { + name = "%s-template" + machine_type = "e2-medium" + + network_interface { + network = "default" + } + + disk { + source_image = data.google_compute_image.debian_image.self_link + auto_delete = true + boot = true + } +} + +resource "google_compute_region_health_check" "default" { + name = "%s" + region = "us-central1" + + http_health_check { + port_specification = "USE_SERVING_PORT" + } +} +`, serviceName, igmName, igmName, igmName, hcName) +} + +func testAccComputeRegionBackendService_withConnectionDraining(serviceName, checkName string, drainingTimeout int64) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.zero.self_link] + region = "us-central1" + connection_draining_timeout_sec = %v +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + + tcp_health_check { + port = "80" + } +} +`, serviceName, drainingTimeout, checkName) +} + +func testAccComputeRegionBackendService_ilbBasicwithIAP(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + port_name = "http" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + locality_lb_policy = "RING_HASH" + circuit_breakers { + max_connections = 10 + } + consistent_hash { + http_cookie { + ttl { + seconds = 11 + nanos = 1234 + } + name = "mycookie" + } + } + outlier_detection { + consecutive_errors = 2 + } + + iap { + oauth2_client_id = "test" + oauth2_client_secret = "test" + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionBackendService_ilbWithSubsetting(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + protocol = "TCP" + load_balancing_scheme = "INTERNAL" + subsetting { + policy = "CONSISTENT_HASH_SUBSETTING" + } +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} + +func testAccComputeRegionBackendService_ilbNoSubsetting(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_health_check.health_check.self_link] + protocol = "TCP" + load_balancing_scheme = "INTERNAL" +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + http_health_check { + port = 80 + } +} +`, serviceName, checkName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionBackendService_withSecurityPolicy(t *testing.T) { + t.Parallel() + + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + polName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeBackendServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionBackendService_withSecurityPolicy(serviceName, polName, "google_compute_region_security_policy.policy.self_link"), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionBackendService_withSecurityPolicy(serviceName, polName, "\"\""), + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionBackendService_withSecurityPolicy(serviceName, polName, polLink string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + region = "us-central1" + security_policy = %s + load_balancing_scheme = "EXTERNAL_MANAGED" +} + +resource "google_compute_region_security_policy" "policy" { + name = "%s" + region = "us-central1" + description = "basic security policy" + type = "CLOUD_ARMOR" +} +`, serviceName, polLink, polName) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_disk_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_disk_test.go.tmpl new file mode 100644 index 000000000000..07d3d55a164d --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_disk_test.go.tmpl @@ -0,0 +1,566 @@ + +package compute_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeRegionDisk_basic(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionDisk_basic(diskName, "self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + ), + }, + { + ResourceName: "google_compute_region_disk.regiondisk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeRegionDisk_basic(diskName, "name"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + ), + }, + { + ResourceName: "google_compute_region_disk.regiondisk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeRegionDisk_basicUpdate(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionDisk_basic(diskName, "self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + ), + }, + { + ResourceName: "google_compute_region_disk.regiondisk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeRegionDisk_basicUpdated(diskName, "self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + resource.TestCheckResourceAttr("google_compute_region_disk.regiondisk", "size", "100"), + testAccCheckComputeRegionDiskHasLabel(&disk, "my-label", "my-updated-label-value"), + testAccCheckComputeRegionDiskHasLabel(&disk, "a-new-label", "a-new-label-value"), + testAccCheckComputeRegionDiskHasLabelFingerprint(&disk, "google_compute_region_disk.regiondisk"), + ), + }, + { + ResourceName: "google_compute_region_disk.regiondisk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeRegionDisk_encryption(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionDisk_encryption(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + testAccCheckRegionDiskEncryptionKey( + "google_compute_region_disk.regiondisk", &disk), + ), + }, + }, + }) +} + +func TestAccComputeRegionDisk_deleteDetach(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + regionDiskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + regionDiskName2 := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + ), + }, + // this needs to be an additional step so we refresh and see the instance + // listed as attached to the disk; the instance is created after the + // disk. and the disk's properties aren't refreshed unless there's + // another step + { + Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + testAccCheckComputeRegionDiskInstances( + "google_compute_region_disk.regiondisk", &disk), + ), + }, + // Change the disk name to destroy it, which detaches it from the instance + { + Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + ), + }, + // Add the extra step like before + { + Config: testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + testAccCheckComputeRegionDiskInstances( + "google_compute_region_disk.regiondisk", &disk), + ), + }, + }, + }) +} + +func TestAccComputeRegionDisk_cloneDisk(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionDisk_diskClone(diskName, "self_link"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk-clone", &disk), + ), + }, + { + ResourceName: "google_compute_region_disk.regiondisk-clone", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionDisk_featuresUpdated(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + var disk compute.Disk + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionDiskDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionDisk_features(diskName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionDiskExists( + t, "google_compute_region_disk.regiondisk", &disk), + ), + }, + { + ResourceName: "google_compute_region_disk.regiondisk", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionDisk_featuresUpdated(diskName), + }, + { + ResourceName: "google_compute_region_disk.regiondisk", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func testAccCheckComputeRegionDiskExists(t *testing.T, n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + p := envvar.GetTestProjectFromEnv() + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.Attributes["name"] == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).RegionDisks.Get( + p, rs.Primary.Attributes["region"], rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("RegionDisk not found") + } + + *disk = *found + + return nil + } +} + +func testAccCheckComputeRegionDiskHasLabel(disk *compute.Disk, key, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + val, ok := disk.Labels[key] + if !ok { + return fmt.Errorf("Label with key %s not found", key) + } + + if val != value { + return fmt.Errorf("Label value did not match for key %s: expected %s but found %s", key, value, val) + } + return nil + } +} + +func testAccCheckComputeRegionDiskHasLabelFingerprint(disk *compute.Disk, resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + state := s.RootModule().Resources[resourceName] + if state == nil { + return fmt.Errorf("Unable to find resource named %s", resourceName) + } + + labelFingerprint := state.Primary.Attributes["label_fingerprint"] + if labelFingerprint != disk.LabelFingerprint { + return fmt.Errorf("Label fingerprints do not match: api returned %s but state has %s", + disk.LabelFingerprint, labelFingerprint) + } + + return nil + } +} + +func testAccCheckRegionDiskEncryptionKey(n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["disk_encryption_key.0.sha256"] + if disk.DiskEncryptionKey == nil { + return fmt.Errorf("RegionDisk %s has mismatched encryption key.\nTF State: %+v\nGCP State: ", n, attr) + } else if attr != disk.DiskEncryptionKey.Sha256 { + return fmt.Errorf("RegionDisk %s has mismatched encryption key.\nTF State: %+v.\nGCP State: %+v", + n, attr, disk.DiskEncryptionKey.Sha256) + } + return nil + } +} + +func testAccCheckComputeRegionDiskInstances(n string, disk *compute.Disk) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + attr := rs.Primary.Attributes["users.#"] + if strconv.Itoa(len(disk.Users)) != attr { + return fmt.Errorf("RegionDisk %s has mismatched users.\nTF State: %+v\nGCP State: %+v", n, rs.Primary.Attributes["users"], disk.Users) + } + + for pos, user := range disk.Users { + if tpgresource.ConvertSelfLinkToV1(rs.Primary.Attributes["users."+strconv.Itoa(pos)]) != tpgresource.ConvertSelfLinkToV1(user) { + return fmt.Errorf("RegionDisk %s has mismatched users.\nTF State: %+v.\nGCP State: %+v", + n, rs.Primary.Attributes["users"], disk.Users) + } + } + return nil + } +} + +func testAccComputeRegionDisk_basic(diskName, refSelector string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "disk" { + name = "%s" + image = "debian-cloud/debian-11" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" +} + +resource "google_compute_region_disk" "regiondisk" { + name = "%s" + snapshot = google_compute_snapshot.snapdisk.%s + type = "pd-ssd" + replica_zones = ["us-central1-a", "us-central1-f"] +} +`, diskName, diskName, diskName, refSelector) +} + +func testAccComputeRegionDisk_basicUpdated(diskName, refSelector string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "disk" { + name = "%s" + image = "debian-cloud/debian-11" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" +} + +resource "google_compute_region_disk" "regiondisk" { + name = "%s" + snapshot = google_compute_snapshot.snapdisk.%s + type = "pd-ssd" + region = "us-central1" + + replica_zones = ["us-central1-a", "us-central1-f"] + + size = 100 + labels = { + my-label = "my-updated-label-value" + a-new-label = "a-new-label-value" + } +} +`, diskName, diskName, diskName, refSelector) +} + +func testAccComputeRegionDisk_encryption(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "disk" { + name = "%s" + image = "debian-cloud/debian-11" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + zone = "us-central1-a" + + source_disk = google_compute_disk.disk.name +} + +resource "google_compute_region_disk" "regiondisk" { + name = "%s" + snapshot = google_compute_snapshot.snapdisk.self_link + type = "pd-ssd" + + replica_zones = ["us-central1-a", "us-central1-f"] + + disk_encryption_key { + raw_key = "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=" + } +} +`, diskName, diskName, diskName) +} + +func testAccComputeRegionDisk_deleteDetach(instanceName, diskName, regionDiskName string) string { + return fmt.Sprintf(` +resource "google_compute_disk" "disk" { + name = "%s" + image = "debian-cloud/debian-11" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" +} + +resource "google_compute_region_disk" "regiondisk" { + name = "%s" + snapshot = google_compute_snapshot.snapdisk.self_link + type = "pd-ssd" + + replica_zones = ["us-central1-a", "us-central1-f"] +} + +resource "google_compute_instance" "inst" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + attached_disk { + source = google_compute_region_disk.regiondisk.self_link + } + + network_interface { + network = "default" + } +} +`, diskName, diskName, regionDiskName, instanceName) +} + +func testAccComputeRegionDisk_diskClone(diskName, refSelector string) string { + return fmt.Sprintf(` + resource "google_compute_region_disk" "regiondisk" { + name = "%s" + snapshot = google_compute_snapshot.snapdisk.id + type = "pd-ssd" + region = "us-central1" + physical_block_size_bytes = 4096 + + replica_zones = ["us-central1-a", "us-central1-f"] + } + + resource "google_compute_disk" "disk" { + name = "%s" + image = "debian-11-bullseye-v20220719" + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + } + + resource "google_compute_snapshot" "snapdisk" { + name = "%s" + source_disk = google_compute_disk.disk.name + zone = "us-central1-a" + } + + resource "google_compute_region_disk" "regiondisk-clone" { + name = "%s" + source_disk = google_compute_region_disk.regiondisk.%s + type = "pd-ssd" + region = "us-central1" + physical_block_size_bytes = 4096 + + replica_zones = ["us-central1-a", "us-central1-f"] + } + `, diskName, diskName, diskName, diskName+"-clone", refSelector) +} + +func testAccComputeRegionDisk_features(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_region_disk" "regiondisk" { + name = "%s" + type = "pd-ssd" + size = 50 + region = "us-central1" + + guest_os_features { + type = "SECURE_BOOT" + } + + replica_zones = ["us-central1-a", "us-central1-f"] +} +`, diskName) +} + +func testAccComputeRegionDisk_featuresUpdated(diskName string) string { + return fmt.Sprintf(` +resource "google_compute_region_disk" "regiondisk" { + name = "%s" + type = "pd-ssd" + size = 50 + region = "us-central1" + + guest_os_features { + type = "SECURE_BOOT" + } + + guest_os_features { + type = "MULTI_IP_SUBNET" + } + + replica_zones = ["us-central1-a", "us-central1-f"] +} +`, diskName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_health_check_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_health_check_test.go new file mode 100644 index 000000000000..0ce408784596 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_health_check_test.go @@ -0,0 +1,385 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeRegionHealthCheck_tcp_update(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_tcp(hckName), + }, + { + ResourceName: "google_compute_region_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionHealthCheck_tcp_update(hckName), + }, + { + ResourceName: "google_compute_region_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionHealthCheck_ssl_port_spec(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_ssl_fixed_port(hckName), + }, + { + ResourceName: "google_compute_region_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionHealthCheck_http_port_spec(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_http_port_spec(hckName), + ExpectError: regexp.MustCompile("Error in http_health_check: Must specify port_name when using USE_NAMED_PORT as port_specification."), + }, + { + Config: testAccComputeRegionHealthCheck_http_named_port(hckName), + }, + { + ResourceName: "google_compute_region_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionHealthCheck_https_serving_port(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_https_serving_port(hckName), + }, + { + ResourceName: "google_compute_region_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionHealthCheck_typeTransition(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_https(hckName), + }, + { + Config: testAccComputeRegionHealthCheck_http(hckName), + }, + { + Config: testAccComputeRegionHealthCheck_ssl(hckName), + }, + { + Config: testAccComputeRegionHealthCheck_tcp(hckName), + }, + { + Config: testAccComputeRegionHealthCheck_http2(hckName), + }, + { + Config: testAccComputeRegionHealthCheck_https(hckName), + }, + }, + }) +} + +func TestAccComputeRegionHealthCheck_tcpAndSsl_shouldFail(t *testing.T) { + // This is essentially a unit test, no interactions + acctest.SkipIfVcr(t) + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_tcpAndSsl_shouldFail(hckName), + ExpectError: regexp.MustCompile("only one of\n`grpc_health_check,http2_health_check,http_health_check,https_health_check,ssl_health_check,tcp_health_check`\ncan be specified, but `ssl_health_check,tcp_health_check` were specified"), + + }, + }, + }) +} + +func TestAccComputeRegionHealthCheck_logConfigDisabled(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionHealthCheck_logConfigDisabled(hckName), + }, + { + ResourceName: "google_compute_region_health_check.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionHealthCheck_logConfigDisabled(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http2_health_check { + port = "443" + } + log_config { + enable = false + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_tcp(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + tcp_health_check { + port = 443 + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_tcp_update(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + healthy_threshold = 10 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 10 + tcp_health_check { + port = "8080" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_ssl(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + ssl_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_ssl_fixed_port(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + ssl_health_check { + port = "443" + port_specification = "USE_FIXED_PORT" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_http(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port = "80" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_http_port_spec(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port_specification = "USE_NAMED_PORT" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_http_named_port(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http_health_check { + port_name = "http" + port_specification = "USE_NAMED_PORT" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_https(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + https_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_https_serving_port(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + https_health_check { + port_specification = "USE_SERVING_PORT" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_http2(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + http2_health_check { + port = "443" + } +} +`, hckName) +} + +func testAccComputeRegionHealthCheck_tcpAndSsl_shouldFail(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_region_health_check" "foobar" { + check_interval_sec = 3 + description = "Resource created for Terraform acceptance testing" + healthy_threshold = 3 + name = "health-test-%s" + timeout_sec = 2 + unhealthy_threshold = 3 + + tcp_health_check { + port = 443 + } + ssl_health_check { + port = 443 + } +} +`, hckName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager.go.tmpl new file mode 100644 index 000000000000..296bee5c2361 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager.go.tmpl @@ -0,0 +1,1207 @@ +package compute + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeRegionInstanceGroupManager() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionInstanceGroupManagerCreate, + Read: resourceComputeRegionInstanceGroupManagerRead, + Update: resourceComputeRegionInstanceGroupManagerUpdate, + Delete: resourceComputeRegionInstanceGroupManagerDelete, + Importer: &schema.ResourceImporter{ + State: resourceRegionInstanceGroupManagerStateImporter, + }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(15 * time.Minute), + Update: schema.DefaultTimeout(15 * time.Minute), + Delete: schema.DefaultTimeout(15 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderRegion, + ), + + Schema: map[string]*schema.Schema{ + "base_instance_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The base instance name to use for instances in this group. The value must be a valid RFC1035 name. Supported characters are lowercase letters, numbers, and hyphens (-). Instances are named by appending a hyphen and a random four-character string to the base instance name.`, + }, + + "version": { + Type: schema.TypeList, + Required: true, + Description: `Application versions managed by this instance group. Each version deals with a specific instance template, allowing canary release scenarios.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Version name.`, + }, + + "instance_template": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: compareSelfLinkRelativePathsIgnoreParams, + Description: `The full URL to an instance template from which all new instances of this version will be created.`, + }, + + "target_size": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The number of instances calculated as a fixed number or a percentage depending on the settings.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fixed": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of instances which are managed for this version. Conflicts with percent.`, + }, + + "percent": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 100), + Description: `The number of instances (calculated as percentage) which are managed for this version. Conflicts with fixed. Note that when using percent, rounding will be in favor of explicitly set target_size values; a managed instance group with 2 instances and 2 versions, one of which has a target_size.percent of 60 will create 2 instances of that version.`, + }, + }, + }, + }, + }, + }, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the instance group manager. Must be 1-63 characters long and comply with RFC1035. Supported characters include lowercase letters, numbers, and hyphens.`, + }, + + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: `The region where the managed instance group resides.`, + }, + + "creation_timestamp": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp in RFC3339 text format.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An optional textual description of the instance group manager.`, + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint of the instance group manager.`, + }, + + "instance_group": { + Type: schema.TypeString, + Computed: true, + Description: `The full URL of the instance group created by the manager.`, + }, + + "named_port": { + Type: schema.TypeSet, + Optional: true, + Description: `The named port configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the port.`, + }, + + "port": { + Type: schema.TypeInt, + Required: true, + Description: `The port number.`, + }, + }, + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URL of the created resource.`, + }, + + "target_pools": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: tpgresource.SelfLinkRelativePathHash, + Description: `The full URL of all target pools to which new instances in the group are added. Updating the target pools attribute does not affect existing instances.`, + }, + "target_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of running instances for this managed instance group. This value should always be explicitly set unless this resource is attached to an autoscaler, in which case it should never be set. Defaults to 0.`, + }, + + "list_managed_instances_results": { + Type: schema.TypeString, + Optional: true, + Default: "PAGELESS", + ValidateFunc: validation.StringInSlice([]string{"PAGELESS", "PAGINATED"}, false), + Description: `Pagination behavior of the listManagedInstances API method for this managed instance group. Valid values are: "PAGELESS", "PAGINATED". If PAGELESS (default), Pagination is disabled for the group's listManagedInstances API method. maxResults and pageToken query parameters are ignored and all instances are returned in a single response. If PAGINATED, pagination is enabled, maxResults and pageToken query parameters are respected.`, + }, + + // If true, the resource will report ready only after no instances are being created. + // This will not block future reads if instances are being recreated, and it respects + // the "createNoRetry" parameter that's available for this resource. + "wait_for_instances": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to wait for all instances to be created/updated before returning. Note that if this is set to true and the operation does not succeed, Terraform will continue trying until it times out.`, + }, + "wait_for_instances_status": { + Type: schema.TypeString, + Optional: true, + Default: "STABLE", + ValidateFunc: validation.StringInSlice([]string{"STABLE", "UPDATED"}, false), + Description: `When used with wait_for_instances specifies the status to wait for. When STABLE is specified this resource will wait until the instances are stable before returning. When UPDATED is set, it will wait for the version target to be reached and any per instance configs to be effective and all instances configs to be effective as well as all instances to be stable before returning.`, + }, + + "auto_healing_policies": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The autohealing policies for this managed instance group. You can specify only one value.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "health_check": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The health check resource that signals autohealing.`, + }, + + "initial_delay_sec": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `The number of seconds that the managed instance group waits before it applies autohealing policies to new instances or recently recreated instances. Between 0 and 3600.`, + }, + }, + }, + }, + + "distribution_policy_zones": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The distribution policy for this managed instance group. You can specify one or more values.`, + Set: hashZoneFromSelfLinkOrResourceName, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + }, + + "distribution_policy_target_shape": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The shape to which the group converges either proactively or on resize events (depending on the value set in updatePolicy.instanceRedistributionType).`, + }, + + "instance_lifecycle_policy": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The instance lifecycle policy for this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_action_on_failure": { + Type: schema.TypeString, + Default: "REPAIR", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"REPAIR", "DO_NOTHING"}, true), + Description: `Default behavior for all instance or health check failures.`, + }, + "force_update_on_repair": { + Type: schema.TypeString, + Default: "NO", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"YES", "NO"}, false), + Description: `Specifies whether to apply the group's latest configuration when repairing a VM. Valid options are: YES, NO. If YES and you updated the group's instance template or per-instance configurations after the VM was created, then these changes are applied when VM is repaired. If NO (default), then updates are applied in accordance with the group's update policy type.`, + }, + }, + }, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "standby_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Description: `Standby policy for stopped and suspended instances.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "initial_delay_sec": { + Computed: true, + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `Specifies the number of seconds that the MIG should wait to suspend or stop a VM after that VM was created. The initial delay gives the initialization script the time to prepare your VM for a quick scale out. The value of initial delay must be between 0 and 3600 seconds. The default value is 0.`, + }, + + "mode": { + Computed: true, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"MANUAL", "SCALE_OUT_POOL"}, true), + Description: `Defines how a MIG resumes or starts VMs from a standby pool when the group scales out. The default mode is "MANUAL".`, + }, + }, + }, + }, + + "target_suspended_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of suspended instances for this managed instance group.`, + }, + + "target_stopped_size": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `The target number of stopped instances for this managed instance group.`, + }, + {{- end }} + + "update_policy": { + Type: schema.TypeList, + Computed: true, + Optional: true, + MaxItems: 1, + Description: `The update policy for this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "minimal_action": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "REFRESH", "RESTART", "REPLACE"}, false), + Description: `Minimal action to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to update without stopping instances, RESTART to restart existing instances or REPLACE to delete and create new instances from the target template. If you specify a REFRESH, the Updater will attempt to perform that action only. However, if the Updater determines that the minimal action you specify is not enough to perform the update, it might perform a more disruptive action.`, + }, + + "most_disruptive_allowed_action": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "REFRESH", "RESTART", "REPLACE"}, false), + Description: `Most disruptive action that is allowed to be taken on an instance. You can specify either NONE to forbid any actions, REFRESH to allow actions that do not need instance restart, RESTART to allow actions that can be applied without instance replacing or REPLACE to allow all possible actions. If the Updater determines that the minimal update action needed is more disruptive than most disruptive allowed action you specify it will not perform the update at all.`, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"OPPORTUNISTIC", "PROACTIVE"}, false), + Description: `The type of update process. You can specify either PROACTIVE so that the instance group manager proactively executes actions in order to bring instances to their target versions or OPPORTUNISTIC so that no action is proactively executed but the update will be performed as part of other actions (for example, resizes or recreateInstances calls).`, + }, + + "max_surge_fixed": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ConflictsWith: []string{"update_policy.0.max_surge_percent"}, + Description: `The maximum number of instances that can be created above the specified targetSize during the update process. Conflicts with max_surge_percent. It has to be either 0 or at least equal to the number of zones. If fixed values are used, at least one of max_unavailable_fixed or max_surge_fixed must be greater than 0.`, + }, + + "max_surge_percent": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"update_policy.0.max_surge_fixed"}, + Description: `The maximum number of instances(calculated as percentage) that can be created above the specified targetSize during the update process. Conflicts with max_surge_fixed. Percent value is only allowed for regional managed instance groups with size at least 10.`, + ValidateFunc: validation.IntBetween(0, 100), + }, + + "max_unavailable_fixed": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: `The maximum number of instances that can be unavailable during the update process. Conflicts with max_unavailable_percent. It has to be either 0 or at least equal to the number of zones. If fixed values are used, at least one of max_unavailable_fixed or max_surge_fixed must be greater than 0.`, + ConflictsWith: []string{"update_policy.0.max_unavailable_percent"}, + }, + + "max_unavailable_percent": { + Type: schema.TypeInt, + Optional: true, + ConflictsWith: []string{"update_policy.0.max_unavailable_fixed"}, + ValidateFunc: validation.IntBetween(0, 100), + Description: `The maximum number of instances(calculated as percentage) that can be unavailable during the update process. Conflicts with max_unavailable_fixed. Percent value is only allowed for regional managed instance groups with size at least 10.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "min_ready_sec": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 3600), + Description: `Minimum number of seconds to wait for after a newly created instance becomes available. This value must be from range [0, 3600].`, + }, + {{- end }} + "instance_redistribution_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"PROACTIVE", "NONE", ""}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("PROACTIVE"), + Description: `The instance redistribution policy for regional managed instance groups. Valid values are: "PROACTIVE", "NONE". If PROACTIVE (default), the group attempts to maintain an even distribution of VM instances across zones in the region. If NONE, proactive redistribution is disabled.`, + }, + "replacement_method": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"RECREATE", "SUBSTITUTE", ""}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("SUBSTITUTE"), + Description: `The instance replacement method for regional managed instance groups. Valid values are: "RECREATE", "SUBSTITUTE". If SUBSTITUTE (default), the group replaces VM instances with new instances that have randomly generated names. If RECREATE, instance names are preserved. You must also set max_unavailable_fixed or max_unavailable_percent to be greater than 0.`, + }, + }, + }, + }, + "all_instances_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies configuration that overrides the instance template configuration for the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metadata": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The metadata key-value pairs that you want to patch onto the instance. For more information, see Project and instance metadata,`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `The label key-value pairs that you want to patch onto the instance,`, + }, + }, + }, + }, + "stateful_internal_ip": { + Type: schema.TypeList, + Optional: true, + Description: `External IPs considered stateful by the instance group. `, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface_name": { + Type: schema.TypeString, + Optional: true, + Description: `The network interface name`, + }, + "delete_rule": { + Type: schema.TypeString, + Default: "NEVER", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), + Description: `A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER.`, + }, + }, + }, + }, + "stateful_external_ip": { + Type: schema.TypeList, + Optional: true, + Description: `External IPs considered stateful by the instance group. `, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "interface_name": { + Type: schema.TypeString, + Optional: true, + Description: `The network interface name`, + }, + "delete_rule": { + Type: schema.TypeString, + Default: "NEVER", + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), + Description: `A value that prescribes what should happen to an associated static Address resource when a VM instance is permanently deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the IP when the VM is deleted, but do not delete the address resource. ON_PERMANENT_INSTANCE_DELETION will delete the stateful address when the VM is permanently deleted from the instance group. The default is NEVER.`, + }, + }, + }, + }, + "stateful_disk": { + Type: schema.TypeSet, + Optional: true, + Description: `Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the official documentation. Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the update_policy.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "device_name": { + Type: schema.TypeString, + Required: true, + Description: `The device name of the disk to be attached.`, + }, + + "delete_rule": { + Type: schema.TypeString, + Default: "NEVER", + Optional: true, + Description: `A value that prescribes what should happen to the stateful disk when the VM instance is deleted. The available options are NEVER and ON_PERMANENT_INSTANCE_DELETION. NEVER - detach the disk when the VM is deleted, but do not delete the disk. ON_PERMANENT_INSTANCE_DELETION will delete the stateful disk when the VM is permanently deleted from the instance group. The default is NEVER.`, + ValidateFunc: validation.StringInSlice([]string{"NEVER", "ON_PERMANENT_INSTANCE_DELETION"}, true), + }, + }, + }, + }, + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The status of this managed instance group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_stable": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether the managed instance group is in a stable state. A stable state means that: none of the instances in the managed instance group is currently undergoing any type of change (for example, creation, restart, or deletion); no future changes are scheduled for instances in the managed instance group; and the managed instance group itself is not being modified.`, + }, + + "version_target": { + Type: schema.TypeList, + Computed: true, + Description: `A status of consistency of Instances' versions with their target version specified by version field on Instance Group Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_reached": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether version target has been reached in this managed instance group, i.e. all instances are in their target version. Instances' target version are specified by version field on Instance Group Manager.`, + }, + }, + }, + }, + "all_instances_config": { + Type: schema.TypeList, + Computed: true, + Description: `Status of all-instances configuration on the group.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether this configuration has been applied to all managed instances in the group.`, + }, + "current_revision": { + Type: schema.TypeString, + Computed: true, + Description: `Current all-instances configuration revision. This value is in RFC3339 text format.`, + }, + }, + }, + }, + "stateful": { + Type: schema.TypeList, + Computed: true, + Description: `Stateful status of the given Instance Group Manager.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "has_stateful_config": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating whether the managed instance group has stateful configuration, that is, if you have configured any items in a stateful policy or in per-instance configs. The group might report that it has no stateful config even when there is still some preserved state on a managed instance, for example, if you have deleted all PICs but not yet applied those deletions.`, + }, + "per_instance_configs": { + Type: schema.TypeList, + Computed: true, + Description: `Status of per-instance configs on the instances.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "all_effective": { + Type: schema.TypeBool, + Computed: true, + Description: `A bit indicating if all of the group's per-instance configs (listed in the output of a listPerInstanceConfigs API call) have status EFFECTIVE or there are no per-instance-configs.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "params": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Input only additional params for instance group manager creation.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + // This field is intentionally not updatable. The API overrides all existing tags on the field when updated. + ForceNew: true, + Description: `Resource manager tags to bind to the managed instance group. The tags are key-value pairs. Keys must be in the format tagKeys/123 and values in the format tagValues/456.`, + }, + }, + }, + }, + {{- end }} + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionInstanceGroupManagerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + manager := &compute.InstanceGroupManager{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + BaseInstanceName: d.Get("base_instance_name").(string), + TargetSize: int64(d.Get("target_size").(int)), + ListManagedInstancesResults: d.Get("list_managed_instances_results").(string), + NamedPorts: getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()), + TargetPools: tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)), + AutoHealingPolicies: expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})), + Versions: expandVersions(d.Get("version").([]interface{})), + {{- if ne $.TargetVersionName "ga" }} + StandbyPolicy: expandStandbyPolicy(d), + TargetSuspendedSize: int64(d.Get("target_suspended_size").(int)), + TargetStoppedSize: int64(d.Get("target_stopped_size").(int)), + {{- end }} + UpdatePolicy: expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})), + InstanceLifecyclePolicy: expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})), + AllInstancesConfig: expandAllInstancesConfig(nil, d.Get("all_instances_config").([]interface{})), + DistributionPolicy: expandDistributionPolicy(d), + StatefulPolicy: expandStatefulPolicy(d), + {{- if ne $.TargetVersionName "ga" }} + Params: expandInstanceGroupManagerParams(d), + {{- end }} + // Force send TargetSize to allow size of 0. + ForceSendFields: []string{"TargetSize"}, + } + + op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Insert(project, region, manager).Do() + + if err != nil { + return fmt.Errorf("Error creating RegionInstanceGroupManager: %s", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceGroupManagers/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Creating InstanceGroupManager", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + if d.Get("wait_for_instances").(bool) { + err := computeRIGMWaitForInstanceStatus(d, meta) + if err != nil { + return err + } + } + + return resourceComputeRegionInstanceGroupManagerRead(d, config) +} + +func computeRIGMWaitForInstanceStatus(d *schema.ResourceData, meta interface{}) error { + waitForUpdates := d.Get("wait_for_instances_status").(string) == "UPDATED" + conf := retry.StateChangeConf{ + Pending: []string{"creating", "error", "updating per instance configs", "reaching version target", "updating all instances config"}, + Target: []string{"created"}, + Refresh: waitForInstancesRefreshFunc(getRegionalManager, waitForUpdates, d, meta), + Timeout: d.Timeout(schema.TimeoutCreate), + } + _, err := conf.WaitForState() + if err != nil { + return err + } + return nil +} + +type getInstanceManagerFunc func(*schema.ResourceData, interface{}) (*compute.InstanceGroupManager, error) + +func getRegionalManager(d *schema.ResourceData, meta interface{}) (*compute.InstanceGroupManager, error) { + config := meta.(*transport_tpg.Config) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + name := d.Get("name").(string) + manager, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Get(project, region, name).Do() + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Region Instance Manager %q", name)) + } + + return manager, nil +} + +func waitForInstancesRefreshFunc(f getInstanceManagerFunc, waitForUpdates bool, d *schema.ResourceData, meta interface{}) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + m, err := f(d, meta) + if err != nil { + log.Printf("[WARNING] Error in fetching manager while waiting for instances to come up: %s\n", err) + return nil, "error", err + } + if m == nil { + // getManager/getRegional manager call handleNotFoundError, which will return a nil error and nil object in the case + // that the original error was a 404. if m == nil here, we will assume that it was not found return an "instance manager not found" + // error so that we can parse it later on and handle it there + return nil, "error", fmt.Errorf("instance manager not found") + } + if m.Status.IsStable { + if waitForUpdates { + // waitForUpdates waits for versions to be reached and per instance configs to be updated (if present) + if m.Status.Stateful.HasStatefulConfig { + if !m.Status.Stateful.PerInstanceConfigs.AllEffective { + return false, "updating per instance configs", nil + } + } + if !m.Status.VersionTarget.IsReached { + return false, "reaching version target", nil + } + if !m.Status.VersionTarget.IsReached { + return false, "reaching version target", nil + } + if !m.Status.AllInstancesConfig.Effective { + return false, "updating all instances config", nil + } + } + return true, "created", nil + } else { + return false, "creating", nil + } + } +} + +func resourceComputeRegionInstanceGroupManagerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + manager, err := getRegionalManager(d, meta) + if err != nil { + return err + } + if manager == nil { + log.Printf("[WARN] Region Instance Group Manager %q not found, removing from state.", d.Id()) + d.SetId("") + return nil + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + if err := d.Set("base_instance_name", manager.BaseInstanceName); err != nil { + return fmt.Errorf("Error setting base_instance_name: %s", err) + } + if err := d.Set("name", manager.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("region", tpgresource.GetResourceNameFromSelfLink(manager.Region)); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("creation_timestamp", manager.CreationTimestamp); err != nil { + return fmt.Errorf("Error reading creation_timestamp: %s", err) + } + if err := d.Set("description", manager.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("target_size", manager.TargetSize); err != nil { + return fmt.Errorf("Error setting target_size: %s", err) + } + if err := d.Set("list_managed_instances_results", manager.ListManagedInstancesResults); err != nil { + return fmt.Errorf("Error setting list_managed_instances_results: %s", err) + } + if err := d.Set("target_pools", tpgresource.MapStringArr(manager.TargetPools, tpgresource.ConvertSelfLinkToV1)); err != nil { + return fmt.Errorf("Error setting target_pools in state: %s", err.Error()) + } + if err := d.Set("named_port", flattenNamedPortsBeta(manager.NamedPorts)); err != nil { + return fmt.Errorf("Error setting named_port in state: %s", err.Error()) + } + if err := d.Set("fingerprint", manager.Fingerprint); err != nil { + return fmt.Errorf("Error setting fingerprint: %s", err) + } + if err := d.Set("instance_group", tpgresource.ConvertSelfLinkToV1(manager.InstanceGroup)); err != nil { + return fmt.Errorf("Error setting instance_group: %s", err) + } + if err := d.Set("distribution_policy_zones", flattenDistributionPolicy(manager.DistributionPolicy)); err != nil { + return err + } + if err := d.Set("distribution_policy_target_shape", manager.DistributionPolicy.TargetShape); err != nil { + return err + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(manager.SelfLink)); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + + if err := d.Set("auto_healing_policies", flattenAutoHealingPolicies(manager.AutoHealingPolicies)); err != nil { + return fmt.Errorf("Error setting auto_healing_policies in state: %s", err.Error()) + } + if err := d.Set("version", flattenVersions(manager.Versions)); err != nil { + return err + } + {{- if ne $.TargetVersionName "ga" }} + if err = d.Set("standby_policy", flattenStandbyPolicy(manager.StandbyPolicy)); err != nil { + return fmt.Errorf("Error setting standby_policy in state: %s", err.Error()) + } + if err := d.Set("target_suspended_size", manager.TargetSuspendedSize); err != nil { + return fmt.Errorf("Error setting target_suspended_size: %s", err) + } + if err := d.Set("target_stopped_size", manager.TargetStoppedSize); err != nil { + return fmt.Errorf("Error setting target_stopped_size: %s", err) + } + {{- end }} + if err := d.Set("update_policy", flattenRegionUpdatePolicy(manager.UpdatePolicy)); err != nil { + return fmt.Errorf("Error setting update_policy in state: %s", err.Error()) + } + if err = d.Set("instance_lifecycle_policy", flattenInstanceLifecyclePolicy(manager.InstanceLifecyclePolicy)); err != nil { + return fmt.Errorf("Error setting instance lifecycle policy in state: %s", err.Error()) + } + if manager.AllInstancesConfig != nil { + if err = d.Set("all_instances_config", flattenAllInstancesConfig(manager.AllInstancesConfig)); err != nil { + return fmt.Errorf("Error setting all_instances_config in state: %s", err.Error()) + } + } + if err = d.Set("stateful_disk", flattenStatefulPolicy(manager.StatefulPolicy)); err != nil { + return fmt.Errorf("Error setting stateful_disk in state: %s", err.Error()) + } + if err = d.Set("status", flattenStatus(manager.Status)); err != nil { + return fmt.Errorf("Error setting status in state: %s", err.Error()) + } + if err = d.Set("stateful_internal_ip", flattenStatefulPolicyStatefulInternalIps(d, manager.StatefulPolicy)); err != nil { + return fmt.Errorf("Error setting stateful_internal_ip in state: %s", err.Error()) + } + if err = d.Set("stateful_external_ip", flattenStatefulPolicyStatefulExternalIps(d, manager.StatefulPolicy)); err != nil { + return fmt.Errorf("Error setting stateful_external_ip in state: %s", err.Error()) + } + // If unset in state set to default value + if d.Get("wait_for_instances_status").(string) == "" { + if err = d.Set("wait_for_instances_status", "STABLE"); err != nil { + return fmt.Errorf("Error setting wait_for_instances_status in state: %s", err.Error()) + } + } + + return nil +} + +func resourceComputeRegionInstanceGroupManagerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + updatedManager := &compute.InstanceGroupManager{ + Fingerprint: d.Get("fingerprint").(string), + } + var change bool + + if d.HasChange("target_pools") { + updatedManager.TargetPools = tpgresource.ConvertStringSet(d.Get("target_pools").(*schema.Set)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetPools") + change = true + } + + if d.HasChange("auto_healing_policies") { + updatedManager.AutoHealingPolicies = expandAutoHealingPolicies(d.Get("auto_healing_policies").([]interface{})) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "AutoHealingPolicies") + change = true + } + + if d.HasChange("version") { + updatedManager.Versions = expandVersions(d.Get("version").([]interface{})) + change = true + } + + if d.HasChange("distribution_policy_target_shape") { + updatedManager.DistributionPolicy = expandDistributionPolicy(d) + change = true + } + + {{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("standby_policy") { + updatedManager.StandbyPolicy = expandStandbyPolicy(d) + change = true + } + + if d.HasChange("target_suspended_size") { + updatedManager.TargetSuspendedSize = int64(d.Get("target_suspended_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetSuspendedSize") + change = true + } + + if d.HasChange("target_stopped_size") { + updatedManager.TargetStoppedSize = int64(d.Get("target_stopped_size").(int)) + updatedManager.ForceSendFields = append(updatedManager.ForceSendFields, "TargetStoppedSize") + change = true + } + {{- end }} + + if d.HasChange("update_policy") { + updatedManager.UpdatePolicy = expandRegionUpdatePolicy(d.Get("update_policy").([]interface{})) + change = true + } + + if d.HasChange("instance_lifecycle_policy") { + updatedManager.InstanceLifecyclePolicy = expandInstanceLifecyclePolicy(d.Get("instance_lifecycle_policy").([]interface{})) + change = true + } + + if d.HasChange("stateful_internal_ip") || d.HasChange("stateful_external_ip") || d.HasChange("stateful_disk") { + updatedManager.StatefulPolicy = expandStatefulPolicy(d) + change = true + } + + if d.HasChange("all_instances_config") { + oldAic, newAic := d.GetChange("all_instances_config") + if newAic == nil || len(newAic.([]interface{})) == 0 { + updatedManager.NullFields = append(updatedManager.NullFields, "AllInstancesConfig") + } else { + updatedManager.AllInstancesConfig = expandAllInstancesConfig(oldAic.([]interface{}), newAic.([]interface{})) + } + change = true + } + + if d.HasChange("list_managed_instances_results") { + updatedManager.ListManagedInstancesResults = d.Get("list_managed_instances_results").(string) + change = true + } + + if change { + op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Patch(project, region, d.Get("name").(string), updatedManager).Do() + if err != nil { + return fmt.Errorf("Error updating region managed group instances: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating region managed group instances", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + // named ports can't be updated through PATCH + // so we call the update method on the region instance group, instead of the rigm + if d.HasChange("named_port") { + d.Partial(true) + namedPorts := getNamedPortsBeta(d.Get("named_port").(*schema.Set).List()) + setNamedPorts := &compute.RegionInstanceGroupsSetNamedPortsRequest{ + NamedPorts: namedPorts, + } + + op, err := config.NewComputeClient(userAgent).RegionInstanceGroups.SetNamedPorts( + project, region, d.Get("name").(string), setNamedPorts).Do() + + if err != nil { + return fmt.Errorf("Error updating RegionInstanceGroupManager: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating RegionInstanceGroupManager", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + // target size should use resize + if d.HasChange("target_size") { + d.Partial(true) + targetSize := int64(d.Get("target_size").(int)) + op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Resize( + project, region, d.Get("name").(string), targetSize).Do() + + if err != nil { + return fmt.Errorf("Error resizing RegionInstanceGroupManager: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Resizing RegionInstanceGroupManager", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + d.Partial(false) + + if d.Get("wait_for_instances").(bool) { + err := computeRIGMWaitForInstanceStatus(d, meta) + if err != nil { + return err + } + } + + return resourceComputeRegionInstanceGroupManagerRead(d, meta) +} + +func resourceComputeRegionInstanceGroupManagerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + op, err := config.NewComputeClient(userAgent).RegionInstanceGroupManagers.Delete(project, region, name).Do() + + if err != nil { + return fmt.Errorf("Error deleting region instance group manager: %s", err) + } + + // Wait for the operation to complete + err = ComputeOperationWaitTime(config, op, project, "Deleting RegionInstanceGroupManager", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("Error waiting for delete to complete: %s", err) + } + + d.SetId("") + return nil +} + +func expandRegionUpdatePolicy(configured []interface{}) *compute.InstanceGroupManagerUpdatePolicy { + updatePolicy := &compute.InstanceGroupManagerUpdatePolicy{} + + for _, raw := range configured { + data := raw.(map[string]interface{}) + + updatePolicy.MinimalAction = data["minimal_action"].(string) + mostDisruptiveAllowedAction := data["most_disruptive_allowed_action"].(string) + if mostDisruptiveAllowedAction != "" { + updatePolicy.MostDisruptiveAllowedAction = mostDisruptiveAllowedAction + } else { + updatePolicy.NullFields = append(updatePolicy.NullFields, "MostDisruptiveAllowedAction") + } + updatePolicy.Type = data["type"].(string) + updatePolicy.InstanceRedistributionType = data["instance_redistribution_type"].(string) + updatePolicy.ReplacementMethod = data["replacement_method"].(string) +{{- if ne $.TargetVersionName "ga" }} + updatePolicy.MinReadySec = int64(data["min_ready_sec"].(int)) + updatePolicy.ForceSendFields = []string{"MinReadySec"} +{{- end }} + + // percent and fixed values are conflicting + // when the percent values are set, the fixed values will be ignored + if v := data["max_surge_percent"]; v.(int) > 0 { + updatePolicy.MaxSurge = &compute.FixedOrPercent{ + Percent: int64(v.(int)), + NullFields: []string{"Fixed"}, + } + } else { + updatePolicy.MaxSurge = &compute.FixedOrPercent{ + Fixed: int64(data["max_surge_fixed"].(int)), + // allow setting this value to 0 + ForceSendFields: []string{"Fixed"}, + NullFields: []string{"Percent"}, + } + } + + if v := data["max_unavailable_percent"]; v.(int) > 0 { + updatePolicy.MaxUnavailable = &compute.FixedOrPercent{ + Percent: int64(v.(int)), + NullFields: []string{"Fixed"}, + } + } else { + updatePolicy.MaxUnavailable = &compute.FixedOrPercent{ + Fixed: int64(data["max_unavailable_fixed"].(int)), + // allow setting this value to 0 + ForceSendFields: []string{"Fixed"}, + NullFields: []string{"Percent"}, + } + } + } + return updatePolicy +} + +func flattenRegionUpdatePolicy(updatePolicy *compute.InstanceGroupManagerUpdatePolicy) []map[string]interface{} { + results := []map[string]interface{}{} + if updatePolicy != nil { + up := map[string]interface{}{} + if updatePolicy.MaxSurge != nil { + up["max_surge_fixed"] = updatePolicy.MaxSurge.Fixed + up["max_surge_percent"] = updatePolicy.MaxSurge.Percent + } else { + up["max_surge_fixed"] = 0 + up["max_surge_percent"] = 0 + } + if updatePolicy.MaxUnavailable != nil { + up["max_unavailable_fixed"] = updatePolicy.MaxUnavailable.Fixed + up["max_unavailable_percent"] = updatePolicy.MaxUnavailable.Percent + } else { + up["max_unavailable_fixed"] = 0 + up["max_unavailable_percent"] = 0 + } +{{- if ne $.TargetVersionName "ga" }} + up["min_ready_sec"] = updatePolicy.MinReadySec +{{- end }} + up["minimal_action"] = updatePolicy.MinimalAction + up["most_disruptive_allowed_action"] = updatePolicy.MostDisruptiveAllowedAction + up["type"] = updatePolicy.Type + up["instance_redistribution_type"] = updatePolicy.InstanceRedistributionType + up["replacement_method"] = updatePolicy.ReplacementMethod + + results = append(results, up) + } + return results +} + +func expandDistributionPolicy(d *schema.ResourceData) *compute.DistributionPolicy { + dpz := d.Get("distribution_policy_zones").(*schema.Set) + dpts := d.Get("distribution_policy_target_shape").(string) + if dpz.Len() == 0 && dpts == "" { + return nil + } + + distributionPolicyZoneConfigs := make([]*compute.DistributionPolicyZoneConfiguration, 0, dpz.Len()) + for _, raw := range dpz.List() { + data := raw.(string) + distributionPolicyZoneConfig := compute.DistributionPolicyZoneConfiguration{ + Zone: "zones/" + data, + } + + distributionPolicyZoneConfigs = append(distributionPolicyZoneConfigs, &distributionPolicyZoneConfig) + } + + return &compute.DistributionPolicy{Zones: distributionPolicyZoneConfigs, TargetShape: dpts} +} + +func flattenDistributionPolicy(distributionPolicy *compute.DistributionPolicy) []string { + zones := make([]string, 0) + + if distributionPolicy != nil { + for _, zone := range distributionPolicy.Zones { + zones = append(zones, tpgresource.GetResourceNameFromSelfLink(zone.Zone)) + } + } + + return zones +} + +func hashZoneFromSelfLinkOrResourceName(value interface{}) int { + parts := strings.Split(value.(string), "/") + resource := parts[len(parts)-1] + + return tpgresource.Hashcode(resource) +} + +func resourceRegionInstanceGroupManagerStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + if err := d.Set("wait_for_instances", false); err != nil { + return nil, fmt.Errorf("Error setting wait_for_instances: %s", err) + } + if err := d.Set("wait_for_instances_status", "STABLE"); err != nil { + return nil, fmt.Errorf("Error setting wait_for_instances_status: %s", err) + } + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceGroupManagers/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceGroupManagers/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager_test.go.tmpl new file mode 100644 index 000000000000..e094913cec11 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_group_manager_test.go.tmpl @@ -0,0 +1,1919 @@ +package compute_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + {{- if ne $.TargetVersionName "ga" }} + "github.com/hashicorp/terraform-provider-google/google/envvar" + {{- end }} +) + +func TestAccRegionInstanceGroupManager_basic(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm1 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm2 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_basic(template, target, igm1, igm2), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-no-tp", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_targetSizeZero(t *testing.T) { + t.Parallel() + + templateName := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igmName := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_targetSizeZero(templateName, igmName), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_update(t *testing.T) { + t.Parallel() + + template1 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + target1 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + target2 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + template2 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_update(template1, target1, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_region_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "DO_NOTHING"), + ), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_update2(template1, target1, target2, template2, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_region_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_update3(template1, target1, target2, template2, igm), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_region_instance_group_manager.igm-update", "instance_lifecycle_policy.0.default_action_on_failure", "REPAIR"), + ), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_updateLifecycle(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + tag1 := "tag1" + tag2 := "tag2" + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_updateLifecycle(tag1, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_updateLifecycle(tag2, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_rollingUpdatePolicy(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_rollingUpdatePolicySetToDefault(igm), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + { + Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy2(igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_rollingUpdatePolicy3(igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-rolling-update-policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_separateRegions(t *testing.T) { + // Randomness in instance template + acctest.SkipIfVcr(t) + t.Parallel() + + igm1 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm2 := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_separateRegions(igm1, igm2), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic-2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_versions(t *testing.T) { + t.Parallel() + + primaryTemplate := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + canaryTemplate := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_versions(primaryTemplate, canaryTemplate, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_autoHealingPolicies(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + target := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + hck := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_autoHealingPolicies(template, target, igm, hck), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_autoHealingPoliciesRemoved(template, target, igm, hck), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_distributionPolicy(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + zones := []string{"us-central1-a", "us-central1-b"} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_distributionPolicy(template, igm, zones), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_distributionPolicyUpdate(template, igm, zones), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + +func TestAccRegionInstanceGroupManager_stateful(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_stateful(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_statefulUpdate(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_statefulRemoved(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.igm-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} + + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(t *testing.T) { + t.Parallel() + + template := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + igm := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + { + Config: testAccRegionInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(template, network, igm), + }, + { + ResourceName: "google_compute_region_instance_group_manager.sr-igm", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status"}, + }, + }, + }) +} +{{- end }} + +func TestAccRegionInstanceGroupManager_APISideListRecordering(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "name": fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRegionInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_statefulUnordered(context), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccRegionInstanceGroupManager_resourceManagerTags(t *testing.T) { + t.Parallel() + + tag_name := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + template_name := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + igm_name := fmt.Sprintf("tf-test-igm-%s", acctest.RandString(t, 10)) + project_id := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckInstanceGroupManagerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRegionInstanceGroupManager_resourceManagerTags(template_name, tag_name, igm_name, project_id), + }, + { + ResourceName: "google_compute_region_instance_group_manager.rigm-tags", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"status", "params"}, + }, + }, + }) +} +{{- end }} + +func testAccCheckRegionInstanceGroupManagerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_region_instance_group_manager" { + continue + } + _, err := config.NewComputeClient(config.UserAgent).RegionInstanceGroupManagers.Get( + rs.Primary.Attributes["project"], rs.Primary.Attributes["region"], rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("RegionInstanceGroupManager still exists") + } + } + + return nil + } +} + +func testAccRegionInstanceGroupManager_basic(template, target, igm1, igm2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "primary" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + target_size = 2 + list_managed_instances_results = "PAGINATED" +} + +resource "google_compute_region_instance_group_manager" "igm-no-tp" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "primary" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-no-tp" + region = "us-central1" + target_size = 2 +} +`, template, target, igm1, igm2) +} + +func testAccRegionInstanceGroupManager_targetSizeZero(template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "primary" + instance_template = google_compute_instance_template.igm-basic.self_link + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" +} +`, template, igm) +} + +func testAccRegionInstanceGroupManager_update(template, target, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_region_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + + version { + name = "primary" + instance_template = google_compute_instance_template.igm-update.self_link + } + + target_pools = [google_compute_target_pool.igm-update.self_link] + base_instance_name = "tf-test-igm-update" + region = "us-central1" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } + + all_instances_config { + metadata = { + foo = "bar" + } + labels = { + doo = "dad" + } + } + + instance_lifecycle_policy { + force_update_on_repair = "YES" + default_action_on_failure = "DO_NOTHING" + } +} +`, template, target, igm) +} + +// Change IGM's instance template and target size +func testAccRegionInstanceGroupManager_update2(template1, target1, target2, template2, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_target_pool" "igm-update2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_template" "igm-update2" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_region_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-update2.self_link + name = "primary" + } + + target_pools = [ + google_compute_target_pool.igm-update.self_link, + google_compute_target_pool.igm-update2.self_link, + ] + base_instance_name = "tf-test-igm-update" + region = "us-central1" + target_size = 3 + list_managed_instances_results = "PAGINATED" + named_port { + name = "customhttp" + port = 8080 + } + named_port { + name = "customhttps" + port = 8443 + } + + all_instances_config { + metadata = { + doo = "dad" + } + labels = { + foo = "bar" + } + } + + instance_lifecycle_policy { + force_update_on_repair = "NO" + default_action_on_failure = "REPAIR" + } +} +`, template1, target1, target2, template2, igm) +} + +// Remove target pools +func testAccRegionInstanceGroupManager_update3(template1, target1, target2, template2, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-update" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_target_pool" "igm-update2" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_instance_template" "igm-update2" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_region_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-update2.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-update" + region = "us-central1" + target_size = 3 + list_managed_instances_results = "PAGINATED" + named_port { + name = "customhttp" + port = 8080 + } + named_port { + name = "customhttps" + port = 8443 + } +} +`, template1, target1, target2, template2, igm) +} + +func testAccRegionInstanceGroupManager_updateLifecycle(tag, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-update" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["%s"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_instance_group_manager" "igm-update" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-update.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-update" + region = "us-central1" + target_size = 2 + named_port { + name = "customhttp" + port = 8080 + } +} +`, tag, igm) +} + +func testAccRegionInstanceGroupManager_separateRegions(igm1, igm2 string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 +} + +resource "google_compute_region_instance_group_manager" "igm-basic-2" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic-2" + region = "us-west1" + target_size = 2 +} +`, igm1, igm2) +} + +func testAccRegionInstanceGroupManager_autoHealingPolicies(template, target, igm, hck string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + auto_healing_policies { + health_check = google_compute_http_health_check.zero.self_link + initial_delay_sec = "10" + } +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, template, target, igm, hck) +} + +func testAccRegionInstanceGroupManager_autoHealingPoliciesRemoved(template, target, igm, hck string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_target_pool" "igm-basic" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + session_affinity = "CLIENT_IP_PROTO" +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + target_pools = [google_compute_target_pool.igm-basic.self_link] + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, template, target, igm, hck) +} + +func testAccRegionInstanceGroupManager_versions(primaryTemplate string, canaryTemplate string, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-primary" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_instance_template" "igm-canary" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test region instance group manager" + name = "%s" + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + + version { + name = "primary" + instance_template = google_compute_instance_template.igm-primary.self_link + } + + version { + name = "canary" + instance_template = google_compute_instance_template.igm-canary.self_link + target_size { + fixed = 1 + } + } +} +`, primaryTemplate, canaryTemplate, igm) +} + +func testAccRegionInstanceGroupManager_distributionPolicy(template, igm string, zones []string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + distribution_policy_zones = ["%s"] + distribution_policy_target_shape = "ANY" + + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } +} +`, template, igm, strings.Join(zones, "\",\"")) +} + +func testAccRegionInstanceGroupManager_distributionPolicyUpdate(template, igm string, zones []string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + distribution_policy_zones = ["%s"] + distribution_policy_target_shape = "BALANCED" + + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } +} +`, template, igm, strings.Join(zones, "\",\"")) +} + +func testAccRegionInstanceGroupManager_rollingUpdatePolicy(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + name = "primary" + } + base_instance_name = "tf-test-igm-rolling-update" + region = "us-central1" + target_size = 4 + distribution_policy_zones = ["us-central1-a", "us-central1-f"] + + update_policy { + type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_fixed = 2 + max_unavailable_fixed = 2 + } + + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccRegionInstanceGroupManager_rollingUpdatePolicySetToDefault(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + name = "primary" + } + base_instance_name = "tf-test-igm-rolling-update" + region = "us-central1" + target_size = 4 + distribution_policy_zones = ["us-central1-a", "us-central1-f"] + + update_policy { + type = "PROACTIVE" + instance_redistribution_type = "PROACTIVE" + minimal_action = "REPLACE" + max_surge_fixed = 2 + max_unavailable_fixed = 2 + } + + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccRegionInstanceGroupManager_rollingUpdatePolicy2(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "primary" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update" + region = "us-central1" + distribution_policy_zones = ["us-central1-a", "us-central1-f"] + target_size = 3 + update_policy { + type = "PROACTIVE" + instance_redistribution_type = "NONE" + minimal_action = "REPLACE" + max_surge_fixed = 2 + max_unavailable_fixed = 0 + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccRegionInstanceGroupManager_rollingUpdatePolicy3(igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "igm-rolling-update-policy" { + machine_type = "e2-medium" + can_ip_forward = false + tags = ["terraform-testing"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_compute_region_instance_group_manager" "igm-rolling-update-policy" { + description = "Terraform test instance group manager" + name = "%s" + version { + name = "primary" + instance_template = google_compute_instance_template.igm-rolling-update-policy.self_link + } + base_instance_name = "tf-test-igm-rolling-update" + region = "us-central1" + distribution_policy_zones = ["us-central1-a", "us-central1-f"] + target_size = 3 + update_policy { + type = "PROACTIVE" + instance_redistribution_type = "NONE" + minimal_action = "REPLACE" + most_disruptive_allowed_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 2 +{{- if ne $.TargetVersionName "ga" }} + min_ready_sec = 10 +{{- end }} + replacement_method = "RECREATE" + } + named_port { + name = "customhttp" + port = 8080 + } +} +`, igm) +} + +func testAccRegionInstanceGroupManager_stateful(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +resource "google_compute_network" "igm-basic" { + name = "%s" +} +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "stateful-disk2" + } + network_interface { + network = "default" + } + network_interface { + network = google_compute_network.igm-basic.self_link + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + stateful_disk { + device_name = "stateful-disk" + delete_rule = "NEVER" + } + stateful_internal_ip { + interface_name = "nic0" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_external_ip { + interface_name = "nic0" + delete_rule = "NEVER" + } + + stateful_external_ip { + interface_name = "nic1" + delete_rule = "NEVER" + } +} +`, network, template, igm) +} + +func testAccRegionInstanceGroupManager_statefulUpdate(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +resource "google_compute_network" "igm-basic" { + name = "%s" +} +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "stateful-disk2" + } + network_interface { + network = "default" + } + network_interface { + network = google_compute_network.igm-basic.self_link + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + stateful_disk { + device_name = "stateful-disk" + delete_rule = "NEVER" + } + stateful_disk { + device_name = "stateful-disk2" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + stateful_internal_ip { + interface_name = "nic0" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_external_ip { + interface_name = "nic0" + delete_rule = "NEVER" + } +} +`, network, template, igm) +} + +func testAccRegionInstanceGroupManager_statefulRemoved(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +resource "google_compute_network" "igm-basic" { + name = "%s" +} +resource "google_compute_instance_template" "igm-basic" { + name = "%s" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "stateful-disk2" + } + network_interface { + network = "default" + } + network_interface { + network = google_compute_network.igm-basic.self_link + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } +} +`, network, template, igm) +} + +func testAccRegionInstanceGroupManager_statefulUnordered(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "igm-basic" { + name = "%{name}" +} + +resource "google_compute_instance_template" "igm-basic" { + name = "%{name}" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + device_name = "stateful-disk2" + } + network_interface { + network = "default" + } + network_interface { + network = google_compute_network.igm-basic.self_link + } +} + +resource "google_compute_region_instance_group_manager" "igm-basic" { + description = "Terraform test instance group manager" + name = "%{name}" + + version { + instance_template = google_compute_instance_template.igm-basic.self_link + name = "primary" + } + + base_instance_name = "tf-test-igm-basic" + region = "us-central1" + target_size = 2 + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + stateful_disk { + device_name = "stateful-disk" + delete_rule = "NEVER" + } + + // stateful_internal_ip blocks are intentionally out of lexical order (for interface_name) + + stateful_internal_ip { + interface_name = "nic1" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + stateful_internal_ip { + interface_name = "nic0" + delete_rule = "ON_PERMANENT_INSTANCE_DELETION" + } + + // stateful_external_ip blocks are intentionally out of lexical order (for interface_name) + + stateful_external_ip { + interface_name = "nic1" + delete_rule = "NEVER" + } + + stateful_external_ip { + interface_name = "nic0" + delete_rule = "NEVER" + } + +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccRegionInstanceGroupManager_stoppedSuspendedTargetSize(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + region = "us-central1" + target_size = 2 + distribution_policy_target_shape = "ANY_SINGLE_ZONE" + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + standby_policy { + initial_delay_sec = 20 + mode = "SCALE_OUT_POOL" + } + target_suspended_size = 2 + target_stopped_size = 1 +} +`, network, template, igm) +} + +func testAccRegionInstanceGroupManager_stoppedSuspendedTargetSizeUpdate(network, template, igm string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "sr-igm" { + name = "%s" +} + +resource "google_compute_instance_template" "sr-igm" { + name = "%s" + machine_type = "e2-medium" + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "stateful-disk" + } + network_interface { + network = "default" + } +} + +resource "google_compute_region_instance_group_manager" "sr-igm" { + description = "Terraform test instance group manager" + name = "%s" + + version { + instance_template = google_compute_instance_template.sr-igm.self_link + name = "primary" + } + + base_instance_name = "tf-test-sr-igm" + region = "us-central1" + target_size = 2 + distribution_policy_target_shape = "ANY_SINGLE_ZONE" + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } + standby_policy { + initial_delay_sec = 30 + } + target_suspended_size = 1 + target_stopped_size = 2 +} +`, network, template, igm) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccRegionInstanceGroupManager_resourceManagerTags(template_name, tag_name, igm_name, project_id string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "rigm-tags" { + name = "%s" + description = "Terraform test instance template." + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + } +} + +resource "google_tags_tag_key" "rigm-key" { + description = "Terraform test tag key." + parent = "projects/%s" + short_name = "%s" +} + +resource "google_tags_tag_value" "rigm-value" { + description = "Terraform test tag value." + parent = "tagKeys/${google_tags_tag_key.rigm-key.name}" + short_name = "%s" +} + +resource "google_compute_region_instance_group_manager" "rigm-tags" { + description = "Terraform test instance group manager." + name = "%s" + base_instance_name = "tf-rigm-tags-test" + region = "us-central1" + target_size = 0 + + version { + name = "prod" + instance_template = google_compute_instance_template.rigm-tags.self_link + } + + params { + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.rigm-key.name}" = "tagValues/${google_tags_tag_value.rigm-value.name}" + } + } +} +`, template_name, project_id, tag_name, tag_name, igm_name) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl new file mode 100644 index 000000000000..68df06e180b7 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl @@ -0,0 +1,1466 @@ + +package compute + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeRegionInstanceTemplate() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRegionInstanceTemplateCreate, + Read: resourceComputeRegionInstanceTemplateRead, + Update: resourceComputeRegionInstanceTemplateUpdate, + Delete: resourceComputeRegionInstanceTemplateDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRegionInstanceTemplateImportState, + }, + SchemaVersion: 1, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderRegion, + resourceComputeInstanceTemplateSourceImageCustomizeDiff, + resourceComputeInstanceTemplateScratchDiskCustomizeDiff, + resourceComputeInstanceTemplateBootDiskCustomizeDiff, + tpgresource.SetLabelsDiff, + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + // A compute region instance template is more or less a subset of a compute + // instance. Please attempt to maintain consistency with the + // resource_compute_instance schema when updating this one. + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The region in which the instance template is located. If it is not provided, the provider region is used.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: []string{"name_prefix"}, + ValidateFunc: verify.ValidateGCEName, + Description: `The name of the instance template. If you leave this blank, Terraform will auto-generate a unique name.`, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Creates a unique name beginning with the specified prefix. Conflicts with name.`, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://cloud.google.com/compute/docs/reference/latest/instanceTemplates#resource + // uuid is 26 characters, limit the prefix to 37. + value := v.(string) + if len(value) > 37 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 37 characters, name is limited to 63", k)) + } + return + }, + }, + + "disk": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `Disks to attach to instances created from this template. This can be specified multiple times for multiple disks.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete": { + Type: schema.TypeBool, + Optional: true, + Default: true, + ForceNew: true, + Description: `Whether or not the disk should be auto-deleted. This defaults to true.`, + }, + + "boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Indicates that this is a boot disk.`, + }, + + "device_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `A unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, the server chooses a default device name to apply to this disk.`, + }, + + "disk_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the disk. When not provided, this defaults to the name of the instance.`, + }, + + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The size of the image in gigabytes. If not specified, it will inherit the size of its base image. For SCRATCH disks, the size must be one of 375 or 3000 GB, with a default of 375 GB.`, + }, + + "disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The Google Compute Engine disk type. Such as "pd-ssd", "local-ssd", "pd-balanced" or "pd-standard".`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `A set of key/value label pairs to assign to disks,`, + }, + + "provisioned_iops": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Indicates how many IOPS to provision for the disk. This sets the number of I/O operations per second that the disk can handle. Values must be between 10,000 and 120,000. For more details, see the [Extreme persistent disk documentation](https://cloud.google.com/compute/docs/disks/extreme-persistent-disk).`, + }, + + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + + "source_image": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The image from which to initialize this disk. This can be one of: the image's self_link, projects/{project}/global/images/{image}, projects/{project}/global/images/family/{family}, global/images/{image}, global/images/family/{family}, family/{family}, {project}/{family}, {project}/{image}, {family}, or {image}. ~> Note: Either source or source_image is required when creating a new instance except for when creating a local SSD.`, + }, + "source_image_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source +image. Required if the source image is protected by a +customer-supplied encryption key. + +Instance templates do not store customer-supplied +encryption keys, so you cannot create disks for +instances in a managed instance group if the source +images are encrypted with your own keys.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account being used for the encryption +request for the given KMS key. If absent, the Compute +Engine default service account is used.`, + }, + "kms_key_self_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The self link of the encryption key that is stored in +Google Cloud KMS.`, + }, + }, + }, + }, + "source_snapshot": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The source snapshot to create this disk. When creating +a new instance, one of initializeParams.sourceSnapshot, +initializeParams.sourceImage, or disks.source is +required except for local SSD.`, + }, + "source_snapshot_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The customer-supplied encryption key of the source snapshot.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The service account being used for the encryption +request for the given KMS key. If absent, the Compute +Engine default service account is used.`, + }, + "kms_key_self_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The self link of the encryption key that is stored in +Google Cloud KMS.`, + }, + }, + }, + }, + + "interface": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Specifies the disk interface to use for attaching this disk.`, + }, + + "mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The mode in which to attach this disk, either READ_WRITE or READ_ONLY. If you are attaching or creating a boot disk, this must read-write mode.`, + }, + + "source": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name (not self_link) of the disk (such as those managed by google_compute_disk) to attach. ~> Note: Either source or source_image is required when creating a new instance except for when creating a local SSD.`, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The type of Google Compute Engine disk, can be either "SCRATCH" or "PERSISTENT".`, + }, + + "disk_encryption_key": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `Encrypts or decrypts a disk using a customer-supplied encryption key.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_self_link": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + Description: `The self link of the encryption key that is stored in Google Cloud KMS.`, + }, + }, + }, + }, + + "resource_policies": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `A list (short name or id) of resource policies to attach to this disk. Currently a max of 1 resource policy is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareResourceNames, + }, + }, + }, + }, + }, + + "machine_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The machine type to create. To create a machine with a custom type (such as extended memory), format the value like custom-VCPUS-MEM_IN_MB like custom-6-20480 for 6 vCPU and 20GB of RAM.`, + }, + + "can_ip_forward": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: `Whether to allow sending and receiving of packets with non-matching source or destination IPs. This defaults to false.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A brief description of this resource.`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "enable_display": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enable Virtual Displays on this instance. Note: allow_stopping_for_update must be set to true in order to update this field.`, + }, +{{- end }} + + "instance_description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A description of the instance.`, + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Metadata key/value pairs to make available from within instances created from this template.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "partner_metadata": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: ComparePartnerMetadataDiff, + DiffSuppressOnRefresh: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Partner Metadata Map made available within the instance.`, + }, + {{- end }} + + "metadata_startup_script": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `An alternative to using the startup-script metadata key, mostly to match the compute_instance resource. This replaces the startup-script metadata key on the created instance and thus the two mechanisms are not allowed to be used simultaneously.`, + }, + + "metadata_fingerprint": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The unique fingerprint of the metadata.`, + }, + + "network_performance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Configures network performance settings for the instance. If not specified, the instance will be created with its default network performance configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"TIER_1", "DEFAULT"}, false), + Description: `The egress bandwidth tier to enable. Possible values:TIER_1, DEFAULT`, + }, + }, + }, + }, + + "network_interface": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Networks to attach to instances created from this template. This can be specified multiple times for multiple networks.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the network to attach this interface to. Use network attribute for Legacy or Auto subnetted networks and subnetwork for custom subnetted networks.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the subnetwork to attach this interface to. The subnetwork must exist in the same region this instance will be created in. Either network or subnetwork must be provided.`, + }, + + "subnetwork_project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the subnetwork belongs. If it is not provided, the provider project is used.`, + }, + + "network_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The private IP address to assign to the instance. If empty, the address will be automatically assigned.`, + }, + + "name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The name of the network_interface.`, + }, + "nic_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"GVNIC", "VIRTIO_NET"}, false), + Description: `The type of vNIC to be used on this interface. Possible values:GVNIC, VIRTIO_NET`, + }, + "access_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Access configurations, i.e. IPs via which this instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet (this means that ssh provisioners will not work unless you are running Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance on that network). This block can be repeated multiple times.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "nat_ip": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The IP address that will be 1:1 mapped to the instance's network ip. If not given, one will be generated.`, + }, + "network_tier": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The networking tier used for configuring this instance template. This field can take the following values: PREMIUM, STANDARD, FIXED_STANDARD. If this field is not specified, it is assumed to be PREMIUM.`, + }, + // Possibly configurable- this was added so we don't break if it's inadvertently set + "public_ptr_domain_name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The DNS domain name for the public PTR record.The DNS domain name for the public PTR record.`, + }, + }, + }, + }, + + "alias_ip_range": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `An array of alias IP ranges for this network interface. Can only be specified for network interfaces on subnet-mode networks.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_cidr_range": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, + }, + "subnetwork_range_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The subnetwork secondary range name specifying the secondary range from which to allocate the IP CIDR range for this alias IP range. If left unspecified, the primary range of the subnetwork will be used.`, + }, + }, + }, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"IPV4_ONLY", "IPV4_IPV6", ""}, false), + Description: `The stack type for this network interface to identify whether the IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be used.`, + }, + + "ipv6_access_type": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `One of EXTERNAL, INTERNAL to indicate whether the IP can be accessed from the Internet. This field is always inherited from its subnetwork.`, + }, + + "ipv6_access_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `An array of IPv6 access configurations for this interface. Currently, only one IPv6 access config, DIRECT_IPV6, is supported. If there is no ipv6AccessConfig specified, then this instance will have no external IPv6 Internet access.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_tier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The service-level to be provided for IPv6 traffic when the subnet has an external subnet. Only PREMIUM tier is valid for IPv6`, + }, + // Possibly configurable- this was added so we don't break if it's inadvertently set + // (assuming the same ass access config) + "public_ptr_domain_name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The domain name to be used when creating DNSv6 records for the external IPv6 ranges.`, + }, + "external_ipv6": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The first IPv6 address of the external IPv6 range associated with this instance, prefix length is stored in externalIpv6PrefixLength in ipv6AccessConfig. The field is output only, an IPv6 address from a subnetwork associated with the instance will be allocated dynamically.`, + }, + "external_ipv6_prefix_length": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The prefix length of the external IPv6 range.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The name of this access configuration.`, + }, + }, + }, + }, + "internal_ipv6_prefix_length": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The prefix length of the primary internal IPv6 range.`, + }, + "ipv6_address": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `An IPv6 internal network address for this network interface. If not specified, Google Cloud will automatically assign an internal IPv6 address from the instance's subnetwork.`, + }, + "queue_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The networking queue count that's specified by users for the network interface. Both Rx and Tx queues will be set to this number. It will be empty if not specified.`, + }, + }, + }, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `A map of resource manager tags. + Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + + "scheduling": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `The scheduling strategy to use.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "preemptible": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: false, + ForceNew: true, + Description: `Allows instance to be preempted. This defaults to false.`, + }, + + "automatic_restart": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Default: true, + ForceNew: true, + Description: `Specifies whether the instance should be automatically restarted if it is terminated by Compute Engine (not terminated by a user). This defaults to true.`, + }, + + "on_host_maintenance": { + Type: schema.TypeString, + Optional: true, + Computed: true, + AtLeastOneOf: schedulingInstTemplateKeys, + ForceNew: true, + Description: `Defines the maintenance behavior for this instance.`, + }, + + "node_affinities": { + Type: schema.TypeSet, + Optional: true, + AtLeastOneOf: schedulingInstTemplateKeys, + ForceNew: true, + Elem: instanceSchedulingNodeAffinitiesElemSchema(), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), + Description: `Specifies node affinities or anti-affinities to determine which sole-tenant nodes your instances and managed instance groups will use as host systems.`, + }, + "min_node_cpus": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Minimum number of cpus for the instance.`, + }, + "provisioning_model": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Whether the instance is spot. If this is set as SPOT.`, + }, + "instance_termination_action": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: schedulingInstTemplateKeys, + Description: `Specifies the action GCE should take when SPOT VM is preempted.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "max_run_duration" : { + Type: schema.TypeList, + Optional: true, + Description: `The timeout for new network connections to hosts.`, + MaxItems: 1, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + "on_instance_stop_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ForceNew: true, + Description: `Defines the behaviour for instances with the instance_termination_action.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "discard_local_ssd": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, the contents of any attached Local SSD disks will be discarded.`, + Default: false, + ForceNew: true, + }, + }, + }, + }, + "maintenance_interval" : { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies the frequency of planned maintenance events. The accepted values are: PERIODIC`, + }, +{{- end }} + "local_ssd_recovery_timeout" : { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Specifies the maximum amount of time a Local Ssd Vm should wait while + recovery of the Local Ssd state is attempted. Its value should be in + between 0 and 168 hours with hour granularity and the default value being 1 + hour.`, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "seconds": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `Span of time at a resolution of a second. +Must be from 0 to 315,576,000,000 inclusive.`, + }, + "nanos": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `Span of time that's a fraction of a second at nanosecond +resolution. Durations less than one second are represented +with a 0 seconds field and a positive nanos field. Must +be from 0 to 999,999,999 inclusive.`, + }, + }, + }, + }, + }, + }, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The URI of the created resource.`, + }, + + "service_account": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Service account to attach to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "email": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The service account e-mail address. If not given, the default Google Compute Engine service account is used.`, + }, + + "scopes": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: `A list of service scopes. Both OAuth2 URLs and gcloud short names are supported. To allow full access to all Cloud APIs, use the cloud-platform scope.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return tpgresource.CanonicalizeServiceScope(v.(string)) + }, + }, + Set: tpgresource.StringScopeHashcode, + }, + }, + }, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Enable Shielded VM on this instance. Shielded VM provides verifiable integrity to prevent against malware and rootkits. Defaults to disabled. Note: shielded_instance_config can only be used with boot images with shielded vm support.`, + // Since this block is used by the API based on which + // image being used, the field needs to be marked as Computed. + Computed: true, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress(""), + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: false, + ForceNew: true, + Description: `Verify the digital signature of all boot components, and halt the boot process if signature verification fails. Defaults to false.`, + }, + + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, + Description: `Use a virtualized trusted platform module, which is a specialized computer chip you can use to encrypt objects like keys and certificates. Defaults to true.`, + }, + + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: shieldedInstanceTemplateConfigKeys, + Default: true, + ForceNew: true, + Description: `Compare the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. Defaults to true.`, + }, + }, + }, + }, + "confidential_instance_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + {{- if eq $.TargetVersionName "ga" }} + "enable_confidential_compute": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Defines whether the instance should have confidential compute enabled.`, + }, + {{- else }} + "enable_confidential_compute": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Defines whether the instance should have confidential compute enabled. Field will be deprecated in a future release.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + "confidential_instance_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: ` + Specifies which confidential computing technology to use. + This could be one of the following values: SEV, SEV_SNP. + If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, + AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, + }, + {{- end }} + }, + }, + }, + "advanced_machine_features": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Controls for advanced machine-related behavior features.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_nested_virtualization": { + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + Description: `Whether to enable nested virtualization or not.`, + }, + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + Computed: false, + ForceNew: true, + Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, + }, + "visible_core_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of physical cores to expose to an instance. Multiply by the number of threads per core to compute the total number of virtual CPUs to expose to the instance. If unset, the number of cores is inferred from the instance\'s nominal CPU count and the underlying platform\'s SMT width.`, + }, + }, + }, + }, + "guest_accelerator": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `List of the type and count of accelerator cards attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of the guest accelerator cards exposed to this instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The accelerator type resource to expose to this instance. E.g. nvidia-tesla-k80.`, + }, + }, + }, + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Specifies a minimum CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell or Intel Skylake.`, + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `Tags to attach to the instance.`, + }, + + "tags_fingerprint": { + Type: schema.TypeString, + Computed: true, + ForceNew: true, + Description: `The unique fingerprint of the tags.`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + Description: `A set of key/value label pairs to assign to instances created from this template, + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Set: schema.HashString, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Set: schema.HashString, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "resource_policies": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `A list of self_links of resource policies to attach to the instance. Currently a max of 1 resource policy is supported.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareResourceNames, + }, + }, + + "reservation_affinity": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Specifies the reservations that this instance can consume from.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"ANY_RESERVATION", "SPECIFIC_RESERVATION", "NO_RESERVATION"}, false), + Description: `The type of reservation from which this instance can consume resources.`, + }, + + "specific_reservation": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + Description: `Specifies the label selector for the reservation to use.`, + + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Corresponds to the label key of a reservation resource. To target a SPECIFIC_RESERVATION by name, specify compute.googleapis.com/reservation-name as the key and specify the name of your reservation as the only value.`, + }, + "values": { + Type: schema.TypeList, + Elem: &schema.Schema{Type: schema.TypeString}, + Required: true, + ForceNew: true, + Description: `Corresponds to the label values of a reservation resource.`, + }, + }, + }, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRegionInstanceTemplateCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + disks, err := buildDisks(d, config) + if err != nil { + return err + } + + metadata, err := resourceInstanceMetadata(d) + if err != nil { + return err + } + + {{ if ne $.TargetVersionName `ga` -}} + PartnerMetadata, err := resourceInstancePartnerMetadata(d) + if err != nil { + return err + } + {{- end }} + + networks, err := expandNetworkInterfaces(d, config) + if err != nil { + return err + } + + scheduling, err := expandResourceComputeInstanceTemplateScheduling(d, config) + if err != nil { + return err + } + networkPerformanceConfig, err := expandNetworkPerformanceConfig(d, config) + if err != nil { + return nil + } + reservationAffinity, err := expandReservationAffinity(d) + if err != nil { + return err + } + resourcePolicies := expandInstanceTemplateResourcePolicies(d, "resource_policies") + + instanceProperties := &compute.InstanceProperties{ + CanIpForward: d.Get("can_ip_forward").(bool), + Description: d.Get("instance_description").(string), + GuestAccelerators: expandInstanceTemplateGuestAccelerators(d, config), + MachineType: d.Get("machine_type").(string), + MinCpuPlatform: d.Get("min_cpu_platform").(string), + Disks: disks, + Metadata: metadata, + {{- if ne $.TargetVersionName "ga" }} + PartnerMetadata: PartnerMetadata, + {{- end }} + NetworkInterfaces: networks, + NetworkPerformanceConfig: networkPerformanceConfig, + Scheduling: scheduling, + ServiceAccounts: expandServiceAccounts(d.Get("service_account").([]interface{})), + Tags: resourceInstanceTags(d), + ConfidentialInstanceConfig: expandConfidentialInstanceConfig(d), + ShieldedInstanceConfig: expandShieldedVmConfigs(d), + AdvancedMachineFeatures: expandAdvancedMachineFeatures(d), +{{- if ne $.TargetVersionName "ga" }} + DisplayDevice: expandDisplayDevice(d), +{{- end }} + ResourcePolicies: resourcePolicies, + ReservationAffinity: reservationAffinity, + } + + if _, ok := d.GetOk("effective_labels"); ok { + instanceProperties.Labels = tpgresource.ExpandEffectiveLabels(d) + } + + if _, ok := d.GetOk("resource_manager_tags"); ok { + instanceProperties.ResourceManagerTags = tpgresource.ExpandStringMap(d, "resource_manager_tags") + } + + var itName string + if v, ok := d.GetOk("name"); ok { + itName = v.(string) + } else if v, ok := d.GetOk("name_prefix"); ok { + itName = id.PrefixedUniqueId(v.(string)) + } else { + itName = id.UniqueId() + } + + instanceTemplate := make(map[string]interface{}) + instanceTemplate["description"] = d.Get("description").(string) + instanceTemplate["properties"] = instanceProperties + instanceTemplate["name"] = itName + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates") + if err != nil { + return err + } + + op, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: instanceTemplate, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RegionInstanceTemplate: %s", err) + } + + // Store the ID now + d.SetId(fmt.Sprintf("projects/%s/regions/%s/instanceTemplates/%s", project, region, instanceTemplate["name"])) + + err = ComputeOperationWaitTime(config, op, project, "Creating Region Instance Template", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return resourceComputeRegionInstanceTemplateRead(d, meta) +} + +func resourceComputeRegionInstanceTemplateUpdate(d *schema.ResourceData, meta interface{}) error { + // Only the field "labels" and "terraform_labels" is mutable + return resourceComputeRegionInstanceTemplateRead(d, meta) +} + +func resourceComputeRegionInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + splits := strings.Split(d.Id(), "/") + name := splits[len(splits)-1] + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/" + name) + if err != nil { + return err + } + + {{ if ne $.TargetVersionName `ga` -}} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"view": "FULL"}) + if err != nil { + return err + } + {{- end }} + + instanceTemplate, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRegionInstanceTemplate %q", d.Id())) + } + + instancePropertiesMap := instanceTemplate["properties"] + + instancePropertiesObj, err := json.Marshal(instancePropertiesMap) + if err != nil { + fmt.Println(err) + return err + } + + instanceProperties := compute.InstanceProperties{} + + if err := json.Unmarshal(instancePropertiesObj, &instanceProperties); err != nil { + fmt.Println(err) + return err + } + + // Set the metadata fingerprint if there is one. + if instanceProperties.Metadata != nil { + if err = d.Set("metadata_fingerprint", instanceProperties.Metadata.Fingerprint); err != nil { + return fmt.Errorf("Error setting metadata_fingerprint: %s", err) + } + + md := instanceProperties.Metadata + + _md := flattenMetadataBeta(md) + + if script, scriptExists := d.GetOk("metadata_startup_script"); scriptExists { + if err = d.Set("metadata_startup_script", script); err != nil { + return fmt.Errorf("Error setting metadata_startup_script: %s", err) + } + + delete(_md, "startup-script") + } + + if err = d.Set("metadata", _md); err != nil { + return fmt.Errorf("Error setting metadata: %s", err) + } + } + + {{ if ne $.TargetVersionName `ga` -}} + if instanceProperties.PartnerMetadata != nil { + partnerMetadata, err := flattenPartnerMetadata(instanceProperties.PartnerMetadata) + if err != nil { + return fmt.Errorf("Error parsing partner metadata: %s", err) + } + if err = d.Set("partner_metadata", partnerMetadata); err != nil { + return fmt.Errorf("Error setting partner metadata: %s", err) + } + } + {{- end }} + + // Set the tags fingerprint if there is one. + if instanceProperties.Tags != nil { + if err = d.Set("tags_fingerprint", instanceProperties.Tags.Fingerprint); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } else { + if err := d.Set("tags_fingerprint", ""); err != nil { + return fmt.Errorf("Error setting tags_fingerprint: %s", err) + } + } + if instanceProperties.Labels != nil { + if err := tpgresource.SetLabels(instanceProperties.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + } + if err := tpgresource.SetLabels(instanceProperties.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", instanceProperties.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) + } + if err = d.Set("self_link", instanceTemplate["selfLink"]); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err = d.Set("name", instanceTemplate["name"]); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if instanceProperties.Disks != nil { + disks, err := flattenDisks(instanceProperties.Disks, d, project) + if err != nil { + return fmt.Errorf("error flattening disks: %s", err) + } + if err = d.Set("disk", disks); err != nil { + return fmt.Errorf("Error setting disk: %s", err) + } + } + if err = d.Set("description", instanceTemplate["description"]); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err = d.Set("machine_type", instanceProperties.MachineType); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + if err = d.Set("min_cpu_platform", instanceProperties.MinCpuPlatform); err != nil { + return fmt.Errorf("Error setting min_cpu_platform: %s", err) + } + + if err = d.Set("can_ip_forward", instanceProperties.CanIpForward); err != nil { + return fmt.Errorf("Error setting can_ip_forward: %s", err) + } + + if err = d.Set("instance_description", instanceProperties.Description); err != nil { + return fmt.Errorf("Error setting instance_description: %s", err) + } + if err = d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("network_performance_config", flattenNetworkPerformanceConfig(instanceProperties.NetworkPerformanceConfig)); err != nil { + return err + } + if instanceProperties.NetworkInterfaces != nil { + networkInterfaces, region, _, _, err := flattenNetworkInterfaces(d, config, instanceProperties.NetworkInterfaces) + if err != nil { + return err + } + if err = d.Set("network_interface", networkInterfaces); err != nil { + return fmt.Errorf("Error setting network_interface: %s", err) + } + // region is where to look up the subnetwork if there is one attached to the instance template + if region != "" { + if err = d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + } + } + if instanceProperties.Scheduling != nil { + scheduling := flattenScheduling(instanceProperties.Scheduling) + if err = d.Set("scheduling", scheduling); err != nil { + return fmt.Errorf("Error setting scheduling: %s", err) + } + } + if instanceProperties.Tags != nil { + if err = d.Set("tags", instanceProperties.Tags.Items); err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + } else { + if err = d.Set("tags", nil); err != nil { + return fmt.Errorf("Error setting empty tags: %s", err) + } + } + if instanceProperties.ServiceAccounts != nil { + if err = d.Set("service_account", flattenServiceAccounts(instanceProperties.ServiceAccounts)); err != nil { + return fmt.Errorf("Error setting service_account: %s", err) + } + } + if instanceProperties.GuestAccelerators != nil { + if err = d.Set("guest_accelerator", flattenGuestAccelerators(instanceProperties.GuestAccelerators)); err != nil { + return fmt.Errorf("Error setting guest_accelerator: %s", err) + } + } + if instanceProperties.ShieldedInstanceConfig != nil { + if err = d.Set("shielded_instance_config", flattenShieldedVmConfig(instanceProperties.ShieldedInstanceConfig)); err != nil { + return fmt.Errorf("Error setting shielded_instance_config: %s", err) + } + } + + if instanceProperties.ConfidentialInstanceConfig != nil { + if err = d.Set("confidential_instance_config", flattenConfidentialInstanceConfig(instanceProperties.ConfidentialInstanceConfig)); err != nil { + return fmt.Errorf("Error setting confidential_instance_config: %s", err) + } + } + if instanceProperties.AdvancedMachineFeatures != nil { + if err = d.Set("advanced_machine_features", flattenAdvancedMachineFeatures(instanceProperties.AdvancedMachineFeatures)); err != nil { + return fmt.Errorf("Error setting advanced_machine_features: %s", err) + } + } +{{- if ne $.TargetVersionName "ga" }} + if instanceProperties.DisplayDevice != nil { + if err = d.Set("enable_display", flattenEnableDisplay(instanceProperties.DisplayDevice)); err != nil { + return fmt.Errorf("Error setting enable_display: %s", err) + } + } +{{- end }} + + if instanceProperties.ResourcePolicies != nil { + if err = d.Set("resource_policies", instanceProperties.ResourcePolicies); err != nil { + return fmt.Errorf("Error setting resource_policies: %s", err) + } + } + + if reservationAffinity := instanceProperties.ReservationAffinity; reservationAffinity != nil { + if err = d.Set("reservation_affinity", flattenReservationAffinity(reservationAffinity)); err != nil { + return fmt.Errorf("Error setting reservation_affinity: %s", err) + } + } + + return nil +} + +func resourceComputeRegionInstanceTemplateDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + op, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: project, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RegionInstanceTemplate") + } + + err = ComputeOperationWaitTime(config, op, project, "Deleting Region Instance Template", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func resourceComputeRegionInstanceTemplateImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/regions/(?P[^/]+)/instanceTemplates/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_internal_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_internal_test.go.tmpl new file mode 100644 index 000000000000..a643e4719836 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_internal_test.go.tmpl @@ -0,0 +1,223 @@ + +package compute + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "reflect" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestComputeRegionInstanceTemplate_reorderDisks(t *testing.T) { + t.Parallel() + + cBoot := map[string]interface{}{ + "source": "boot-source", + } + cFallThrough := map[string]interface{}{ + "auto_delete": true, + } + cDeviceName := map[string]interface{}{ + "device_name": "disk-1", + } + cScratch := map[string]interface{}{ + "type": "SCRATCH", + } + cSource := map[string]interface{}{ + "source": "disk-source", + } + cScratchNvme := map[string]interface{}{ + "type": "SCRATCH", + "interface": "NVME", + } + + aBoot := map[string]interface{}{ + "source": "boot-source", + "boot": true, + } + aScratchNvme := map[string]interface{}{ + "device_name": "scratch-1", + "type": "SCRATCH", + "interface": "NVME", + } + aSource := map[string]interface{}{ + "device_name": "disk-2", + "source": "disk-source", + } + aScratchScsi := map[string]interface{}{ + "device_name": "scratch-2", + "type": "SCRATCH", + "interface": "SCSI", + } + aFallThrough := map[string]interface{}{ + "device_name": "disk-3", + "auto_delete": true, + "source": "fake-source", + } + aFallThrough2 := map[string]interface{}{ + "device_name": "disk-4", + "auto_delete": true, + "source": "fake-source", + } + aDeviceName := map[string]interface{}{ + "device_name": "disk-1", + "auto_delete": true, + "source": "fake-source-2", + } + aNoMatch := map[string]interface{}{ + "device_name": "disk-2", + "source": "disk-source-doesn't-match", + } + + cases := map[string]struct { + ConfigDisks []interface{} + ApiDisks []map[string]interface{} + ExpectedResult []map[string]interface{} + }{ + "all disks represented": { + ApiDisks: []map[string]interface{}{ + aBoot, aScratchNvme, aSource, aScratchScsi, aFallThrough, aDeviceName, + }, + ConfigDisks: []interface{}{ + cBoot, cFallThrough, cDeviceName, cScratch, cSource, cScratchNvme, + }, + ExpectedResult: []map[string]interface{}{ + aBoot, aFallThrough, aDeviceName, aScratchScsi, aSource, aScratchNvme, + }, + }, + "one non-match": { + ApiDisks: []map[string]interface{}{ + aBoot, aNoMatch, aScratchNvme, aScratchScsi, aFallThrough, aDeviceName, + }, + ConfigDisks: []interface{}{ + cBoot, cFallThrough, cDeviceName, cScratch, cSource, cScratchNvme, + }, + ExpectedResult: []map[string]interface{}{ + aBoot, aFallThrough, aDeviceName, aScratchScsi, aScratchNvme, aNoMatch, + }, + }, + "two fallthroughs": { + ApiDisks: []map[string]interface{}{ + aBoot, aScratchNvme, aFallThrough, aSource, aScratchScsi, aFallThrough2, aDeviceName, + }, + ConfigDisks: []interface{}{ + cBoot, cFallThrough, cDeviceName, cScratch, cFallThrough, cSource, cScratchNvme, + }, + ExpectedResult: []map[string]interface{}{ + aBoot, aFallThrough, aDeviceName, aScratchScsi, aFallThrough2, aSource, aScratchNvme, + }, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + // Disks read using d.Get will always have values for all keys, so set those values + for _, disk := range tc.ConfigDisks { + d := disk.(map[string]interface{}) + for _, k := range []string{"auto_delete", "boot"} { + if _, ok := d[k]; !ok { + d[k] = false + } + } + for _, k := range []string{"device_name", "disk_name", "interface", "mode", "source", "type"} { + if _, ok := d[k]; !ok { + d[k] = "" + } + } + } + + // flattened disks always set auto_delete, boot, device_name, interface, mode, source, and type + for _, d := range tc.ApiDisks { + for _, k := range []string{"auto_delete", "boot"} { + if _, ok := d[k]; !ok { + d[k] = false + } + } + + for _, k := range []string{"device_name", "interface", "mode", "source"} { + if _, ok := d[k]; !ok { + d[k] = "" + } + } + if _, ok := d["type"]; !ok { + d["type"] = "PERSISTENT" + } + } + + result := reorderDisks(tc.ConfigDisks, tc.ApiDisks) + if !reflect.DeepEqual(tc.ExpectedResult, result) { + t.Errorf("reordering did not match\nExpected: %+v\nActual: %+v", tc.ExpectedResult, result) + } + }) + } +} + +func TestComputeRegionInstanceTemplate_scratchDiskSizeCustomizeDiff(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Typee string // misspelled on purpose, type is a special symbol + DiskType string + DiskSize int + Interfacee string + ExpectError bool + }{ + "scratch disk correct size 1": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 375, + Interfacee: "NVME", + ExpectError: false, + }, + "scratch disk correct size 2": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 3000, + Interfacee: "NVME", + ExpectError: false, + }, + "scratch disk incorrect size": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 300, + Interfacee: "NVME", + ExpectError: true, + }, + "scratch disk incorrect interface": { + Typee: "SCRATCH", + DiskType: "local-ssd", + DiskSize: 3000, + Interfacee: "SCSI", + ExpectError: true, + }, + "non-scratch disk": { + Typee: "PERSISTENT", + DiskType: "", + DiskSize: 300, + Interfacee: "NVME", + ExpectError: false, + }, + } + + for tn, tc := range cases { + d := &tpgresource.ResourceDiffMock{ + After: map[string]interface{}{ + "disk.#": 1, + "disk.0.type": tc.Typee, + "disk.0.disk_type": tc.DiskType, + "disk.0.disk_size_gb": tc.DiskSize, + "disk.0.interface": tc.Interfacee, + }, + } + err := resourceComputeInstanceTemplateScratchDiskCustomizeDiffFunc(d) + if tc.ExpectError && err == nil { + t.Errorf("%s failed, expected error but was none", tn) + } + if !tc.ExpectError && err != nil { + t.Errorf("%s failed, found unexpected error: %s", tn, err) + } + } +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl new file mode 100644 index 000000000000..f66690696fcb --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -0,0 +1,3753 @@ + +package compute_test + +import ( + "encoding/json" + "fmt" + "reflect" + "regexp" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +{{- if ne $.TargetVersionName "ga" }} + "google.golang.org/api/googleapi" +{{- end }} +) + +func TestAccComputeRegionInstanceTemplate_basic(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_basic(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateTag(&instanceTemplate, "foo"), + testAccCheckComputeRegionInstanceTemplateMetadata(&instanceTemplate, "foo", "bar"), + testAccCheckComputeRegionInstanceTemplateContainsLabel(&instanceTemplate, "my_label", "foobar"), + testAccCheckComputeRegionInstanceTemplateLacksShieldedVmConfig(&instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_imageShorthand(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_imageShorthand(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_preemptible(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_preemptible(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeRegionInstanceTemplatePreemptible(&instanceTemplate, true), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_IP(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_ip(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateNetwork(&instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_IPv6(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_ipv6(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_networkTier(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_networkTier(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_networkIP(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + networkIP := "10.128.0.2" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_networkIP(acctest.RandString(t, 10), networkIP), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateNetwork(&instanceTemplate), + testAccCheckComputeRegionInstanceTemplateNetworkIP( + "google_compute_region_instance_template.foobar", networkIP, &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_networkIPAddress(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + ipAddress := "10.128.0.2" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_networkIPAddress(acctest.RandString(t, 10), ipAddress), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateNetwork(&instanceTemplate), + testAccCheckComputeRegionInstanceTemplateNetworkIPAddress( + "google_compute_region_instance_template.foobar", ipAddress, &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_disksInvalid(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_disksInvalid(acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("Cannot use `source`.*"), + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_regionDisks(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_regionDisks(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_diskIops(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_diskIops(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_subnet_auto(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + network := "tf-test-network-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_subnet_auto(network, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateNetworkName(&instanceTemplate, network), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_subnet_custom(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_subnet_custom(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_subnet_xpn(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + org := envvar.GetTestOrgFromEnv(t) + billingId := envvar.GetTestBillingAccountFromEnv(t) + projectName := fmt.Sprintf("tf-testxpn-%d", time.Now().Unix()) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_subnet_xpn(org, billingId, projectName, acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExistsInProject( + t, "google_compute_region_instance_template.foobar", fmt.Sprintf("%s-service", projectName), + &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateSubnetwork(&instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_metadata_startup_script(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_startup_script(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateStartupScript(&instanceTemplate, "echo 'Hello'"), + ), + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_primaryAliasIpRange(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_primaryAliasIpRange(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasAliasIpRange(&instanceTemplate, "", "/24"), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_secondaryAliasIpRange(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_secondaryAliasIpRange(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasAliasIpRange(&instanceTemplate, "inst-test-secondary", "/24"), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_guestAccelerator(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_guestAccelerator(acctest.RandString(t, 10), 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasGuestAccelerator(&instanceTemplate, "nvidia-tesla-k80", 1), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) + +} + +func TestAccComputeRegionInstanceTemplate_guestAcceleratorSkip(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_guestAccelerator(acctest.RandString(t, 10), 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateLacksGuestAccelerator(&instanceTemplate), + ), + }, + }, + }) + +} + +func TestAccComputeRegionInstanceTemplate_minCpuPlatform(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_minCpuPlatform(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasMinCpuPlatform(&instanceTemplate, DEFAULT_MIN_CPU_TEST_VALUE), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_soleTenantNodeAffinities(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_soleTenantInstanceTemplate(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_instanceResourcePolicies(t *testing.T) { + t.Parallel() + + var template compute.InstanceTemplate + var policyName = "tf-test-policy-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_instanceResourcePolicyCollocated(acctest.RandString(t, 10), policyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &template), + testAccCheckComputeRegionInstanceTemplateHasInstanceResourcePolicies(&template, policyName), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_reservationAffinities(t *testing.T) { + t.Parallel() + + var template compute.InstanceTemplate + var templateName = acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, "NO_RESERVATION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &template), + testAccCheckComputeRegionInstanceTemplateHasReservationAffinity(&template, "NO_RESERVATION"), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, "ANY_RESERVATION"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &template), + testAccCheckComputeRegionInstanceTemplateHasReservationAffinity(&template, "ANY_RESERVATION"), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionInstanceTemplate_reservationAffinityInstanceTemplate_specificReservation(templateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &template), + testAccCheckComputeRegionInstanceTemplateHasReservationAffinity(&template, "SPECIFIC_RESERVATION", templateName), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_shieldedVmConfig1(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_shieldedVmConfig(acctest.RandString(t, 10), true, true, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasShieldedVmConfig(&instanceTemplate, true, true, true), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_shieldedVmConfig2(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_shieldedVmConfig(acctest.RandString(t, 10), true, true, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasShieldedVmConfig(&instanceTemplate, true, true, false), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + {{- if ne $.TargetVersionName "ga" }} + var instanceTemplate2 compute.InstanceTemplate + {{- end }} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplateConfidentialInstanceConfigEnable(acctest.RandString(t, 10), "SEV"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, true, "SEV"), + {{- if ne $.TargetVersionName "ga" }} + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar2", &instanceTemplate2), + testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, true, ""), + {{- end }} + ), + }, + {{- if ne $.TargetVersionName "ga" }} + { + Config: testAccComputeRegionInstanceTemplateConfidentialInstanceConfigNoEnable(acctest.RandString(t, 10), "AMD Milan", "SEV_SNP"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar3", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, false, "SEV_SNP"), + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar4", &instanceTemplate2), + testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), + ), + }, + {{- end }} + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_AdvancedMachineFeatures(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplateAdvancedMachineFeatures(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionInstanceTemplate_enableDisplay(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_enableDisplay(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionInstanceTemplate_maintenance_interval(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_maintenance_interval(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateMaintenanceInterval(&instanceTemplate, "PERIODIC"), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccComputeRegionInstanceTemplate_basic(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateMaintenanceInterval(&instanceTemplate, ""), + ), + }, + }, + }) +} +{{- end }} + +func TestAccComputeRegionInstanceTemplate_invalidDiskType(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_invalidDiskType(acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("SCRATCH disks must have a disk_type of local-ssd"), + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_withScratchDisk(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_with375GbScratchDisk(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_with18TbScratchDisk(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_with18TbScratchDisk(acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_imageResourceTest(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + diskName := "tf-test-disk-" + acctest.RandString(t, 10) + computeImage := "tf-test-image-" + acctest.RandString(t, 10) + imageDesc1 := "Some description" + imageDesc2 := "Some other description" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_imageResourceTest(diskName, computeImage, imageDesc1), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + { + Config: testAccComputeRegionInstanceTemplate_imageResourceTest(diskName, computeImage, imageDesc2), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_diskResourcePolicies(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + policyName := "tf-test-policy-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_diskResourcePolicies(acctest.RandString(t, 10), policyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateHasDiskResourcePolicy(&instanceTemplate, policyName), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_nictype_update(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_nictype(instanceTemplateName, instanceTemplateName, "GVNIC"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + { + Config: testAccComputeRegionInstanceTemplate_nictype(instanceTemplateName, instanceTemplateName, "VIRTIO_NET"), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_queueCount(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_queueCount(instanceTemplateName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_managedEnvoy(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_managedEnvoy(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_spot(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_spot(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeRegionInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeRegionInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionInstanceTemplate_spot_maxRunDuration(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeRegionInstanceTemplate_spot_maxRunDuration + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_spot_maxRunDuration(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeRegionInstanceTemplatePreemptible(&instanceTemplate, true), + testAccCheckComputeRegionInstanceTemplateProvisioningModel(&instanceTemplate, "SPOT"), + testAccCheckComputeRegionInstanceTemplateInstanceTerminationAction(&instanceTemplate, "DELETE"), + testAccCheckComputeRegionInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedMaxRunDuration = compute.Duration{} + // Define in testAccComputeRegionInstanceTemplate_spot + expectedMaxRunDuration.Nanos = 123 + expectedMaxRunDuration.Seconds = 60 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateAutomaticRestart(&instanceTemplate, false), + testAccCheckComputeRegionInstanceTemplateInstanceTerminationAction(&instanceTemplate, "STOP"), + testAccCheckComputeRegionInstanceTemplateMaxRunDuration(&instanceTemplate, expectedMaxRunDuration), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var expectedLocalSsdRecoveryTimeout = compute.Duration{} + // Define in testAccComputeRegionInstanceTemplate_spot + expectedLocalSsdRecoveryTimeout.Nanos = 0 + expectedLocalSsdRecoveryTimeout.Seconds = 3600 + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionInstanceTemplateLocalSsdRecoveryTimeout(&instanceTemplate, expectedLocalSsdRecoveryTimeout), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionalInstanceTemplate_partnerMetadata(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var namespace = "test.compute.googleapis.com" + expectedPartnerMetadata := make(map[string]compute.StructuredEntries) + expectedPartnerMetadata[namespace] = compute.StructuredEntries{ + Entries: googleapi.RawMessage(`{"key1": "value1", "key2": 2,"key3": {"key31":"value31"}}`), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionalInstanceTemplate_partnerMetadata(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate), + testAccCheckComputeRegionalInstanceTemplatePartnerMetadata(&instanceTemplate, expectedPartnerMetadata), + ), + }, + { + ResourceName: "google_compute_region_instance_template.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{fmt.Sprintf("partner_metadata.%s", namespace)}, + }, + }, + }) + +} +{{- end }} + +func TestAccComputeRegionInstanceTemplate_sourceSnapshotEncryptionKey(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + kmsKey := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + + context := map[string]interface{}{ + "kms_ring_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.KeyRing.Name), + "kms_key_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.CryptoKey.Name), + "random_suffix": acctest.RandString(t, 10), + } + + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_sourceSnapshotEncryptionKey(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.template", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.template", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"disk.0.source_snapshot", "disk.0.source_snapshot_encryption_key"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_sourceImageEncryptionKey(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + kmsKey := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + + context := map[string]interface{}{ + "kms_ring_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.KeyRing.Name), + "kms_key_name": tpgresource.GetResourceNameFromSelfLink(kmsKey.CryptoKey.Name), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_sourceImageEncryptionKey(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.template", &instanceTemplate), + ), + }, + { + ResourceName: "google_compute_region_instance_template.template", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"disk.0.source_image_encryption_key"}, + }, + }, + }) +} + +func TestAccComputeRegionInstanceTemplate_resourceManagerTags(t *testing.T) { + t.Parallel() + + var instanceTemplate compute.InstanceTemplate + var instanceTemplateName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "instance_name": instanceTemplateName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionInstanceTemplateDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionInstanceTemplate_resourceManagerTags(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionInstanceTemplateExists( + t, "google_compute_region_instance_template.foobar", &instanceTemplate)), + }, + }, + }) +} + +func testAccCheckComputeRegionInstanceTemplateDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_region_instance_template" { + continue + } + + splits := strings.Split(rs.Primary.ID, "/") + name := splits[len(splits)-1] + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/"+name) + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + instanceTemplate, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + _ = instanceTemplate + if err == nil { + return fmt.Errorf("Instance template still exists") + } + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateExists(t *testing.T, n string, instanceTemplate interface{}) resource.TestCheckFunc { + if instanceTemplate == nil { + panic("Attempted to check existence of Instance template that was nil.") + } + + return testAccCheckComputeRegionInstanceTemplateExistsInProject(t, n, envvar.GetTestProjectFromEnv(), instanceTemplate.(*compute.InstanceTemplate)) +} + +func testAccCheckComputeRegionInstanceTemplateExistsInProject(t *testing.T, n, p string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + splits := strings.Split(rs.Primary.ID, "/") + templateName := splits[len(splits)-1] + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/instanceTemplates/"+templateName) + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + {{- if ne $.TargetVersionName "ga" }} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"view": "FULL"}) + if err != nil { + return err + } + {{- end }} + + found, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return err + } + + foundObj, err := json.Marshal(found) + if err != nil { + fmt.Println(err) + return err + } + + instanceTemplateFound := compute.InstanceTemplate{} + + if err := json.Unmarshal(foundObj, &instanceTemplateFound); err != nil { + fmt.Println(err) + return err + } + + if instanceTemplateFound.Name != templateName { + return fmt.Errorf("Instance template not found") + } + + *instanceTemplate = instanceTemplateFound + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateMetadata( + instanceTemplate *compute.InstanceTemplate, + k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil { + return fmt.Errorf("no metadata") + } + + for _, item := range instanceTemplate.Properties.Metadata.Items { + if k != item.Key { + continue + } + + if item.Value != nil && v == *item.Value { + return nil + } + + return fmt.Errorf("bad value for %s: %s", k, *item.Value) + } + + return fmt.Errorf("metadata not found: %s", k) + } +} + +func testAccCheckComputeRegionInstanceTemplateNetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + for _, c := range i.AccessConfigs { + if c.NatIP == "" { + return fmt.Errorf("no NAT IP") + } + } + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateNetworkName(instanceTemplate *compute.InstanceTemplate, network string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if !strings.Contains(i.Network, network) { + return fmt.Errorf("Network doesn't match expected value, Expected: %s Actual: %s", network, i.Network[strings.LastIndex("/", i.Network)+1:]) + } + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateSubnetwork(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instanceTemplate.Properties.NetworkInterfaces { + if i.Subnetwork == "" { + return fmt.Errorf("no subnet") + } + } + + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeRegionInstanceTemplateMaintenanceInterval(instanceTemplate *compute.InstanceTemplate, maintenance_interval string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.MaintenanceInterval != maintenance_interval { + return fmt.Errorf("Expected maintenance interval value %v, got %v", maintenance_interval, instanceTemplate.Properties.Scheduling.MaintenanceInterval) + } + return nil + } +} +{{- end }} + +func testAccCheckComputeRegionInstanceTemplateTag(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Tags == nil { + return fmt.Errorf("no tags") + } + + for _, k := range instanceTemplate.Properties.Tags.Items { + if k == n { + return nil + } + } + + return fmt.Errorf("tag not found: %s", n) + } +} + +func testAccCheckComputeRegionInstanceTemplatePreemptible(instanceTemplate *compute.InstanceTemplate, preemptible bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.Preemptible != preemptible { + return fmt.Errorf("Expected preemptible value %v, got %v", preemptible, instanceTemplate.Properties.Scheduling.Preemptible) + } + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateProvisioningModel(instanceTemplate *compute.InstanceTemplate, provisioning_model string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.ProvisioningModel != provisioning_model { + return fmt.Errorf("Expected provisioning_model %v, got %v", provisioning_model, instanceTemplate.Properties.Scheduling.ProvisioningModel) + } + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateInstanceTerminationAction(instanceTemplate *compute.InstanceTemplate, instance_termination_action string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Scheduling.InstanceTerminationAction != instance_termination_action { + return fmt.Errorf("Expected instance_termination_action %v, got %v", instance_termination_action, instanceTemplate.Properties.Scheduling.InstanceTerminationAction) + } + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeRegionInstanceTemplateMaxRunDuration(instanceTemplate *compute.InstanceTemplate, instance_max_run_duration_want compute.Duration) resource.TestCheckFunc { + return func(s *terraform.State) error { + if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.MaxRunDuration, instance_max_run_duration_want) { + return fmt.Errorf("gExpected instance_termination_action: %#v; got %#v", instance_max_run_duration_want, instanceTemplate.Properties.Scheduling.MaxRunDuration) + } + + return nil + } +} +{{- end }} + +func testAccCheckComputeRegionInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate *compute.InstanceTemplate, instance_local_ssd_recovery_timeout_want compute.Duration) resource.TestCheckFunc { + return func(s *terraform.State) error { + if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.LocalSsdRecoveryTimeout, instance_local_ssd_recovery_timeout_want) { + return fmt.Errorf("gExpected local_ssd_recovery_timeout: %#v; got %#v", instance_local_ssd_recovery_timeout_want, instanceTemplate.Properties.Scheduling.LocalSsdRecoveryTimeout) + } + + return nil + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccCheckComputeRegionalInstanceTemplatePartnerMetadata(instanceTemplate *compute.InstanceTemplate, expectedPartnerMetadata map[string]compute.StructuredEntries) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate == nil { + return fmt.Errorf("instance template is nil") + } + if instanceTemplate.Properties.PartnerMetadata == nil { + return fmt.Errorf("no partner metadata") + } + expectedPartnerMetadataMap := make(map[string]interface{}) + acutalPartnerMetadataMap := make(map[string]interface{}) + for key, value := range instanceTemplate.Properties.PartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + acutalPartnerMetadataMap[key] = jsonMap + } + for key, value := range expectedPartnerMetadata { + var jsonMap map[string]interface{} + json.Unmarshal(value.Entries, jsonMap) + expectedPartnerMetadataMap[key] = jsonMap + } + if !reflect.DeepEqual(acutalPartnerMetadataMap, expectedPartnerMetadataMap) { + return fmt.Errorf("got the wrong instance partne metadata action: have: %+v; want: %+v", acutalPartnerMetadataMap, expectedPartnerMetadataMap) + } + return nil + + } +} +{{- end }} + +func testAccCheckComputeRegionInstanceTemplateAutomaticRestart(instanceTemplate *compute.InstanceTemplate, automaticRestart bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + ar := instanceTemplate.Properties.Scheduling.AutomaticRestart + if ar == nil { + return fmt.Errorf("Expected to see a value for AutomaticRestart, but got nil") + } + if *ar != automaticRestart { + return fmt.Errorf("Expected automatic restart value %v, got %v", automaticRestart, ar) + } + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateStartupScript(instanceTemplate *compute.InstanceTemplate, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.Metadata == nil && n == "" { + return nil + } else if instanceTemplate.Properties.Metadata == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', metadata wasn't set at all", n) + } + for _, item := range instanceTemplate.Properties.Metadata.Items { + if item.Key != "startup-script" { + continue + } + if item.Value != nil && *item.Value == n { + return nil + } else if item.Value == nil && n == "" { + return nil + } else if item.Value == nil && n != "" { + return fmt.Errorf("Expected metadata.startup-script to be '%s', wasn't set", n) + } else if *item.Value != n { + return fmt.Errorf("Expected metadata.startup-script to be '%s', got '%s'", n, *item.Value) + } + } + return fmt.Errorf("This should never be reached.") + } +} + +func testAccCheckComputeRegionInstanceTemplateNetworkIP(n, networkIP string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP + err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) + if err != nil { + return err + } + return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", networkIP)(s) + } +} + +func testAccCheckComputeRegionInstanceTemplateNetworkIPAddress(n, ipAddress string, instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + ip := instanceTemplate.Properties.NetworkInterfaces[0].NetworkIP + err := resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ip)(s) + if err != nil { + return err + } + return resource.TestCheckResourceAttr(n, "network_interface.0.network_ip", ipAddress)(s) + } +} + +func testAccCheckComputeRegionInstanceTemplateContainsLabel(instanceTemplate *compute.InstanceTemplate, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + v, ok := instanceTemplate.Properties.Labels[key] + if !ok { + return fmt.Errorf("Expected label with key '%s' not found", key) + } + if v != value { + return fmt.Errorf("Incorrect label value for key '%s': expected '%s' but found '%s'", key, value, v) + } + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateHasAliasIpRange(instanceTemplate *compute.InstanceTemplate, subnetworkRangeName, iPCidrRange string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { + for _, aliasIpRange := range networkInterface.AliasIpRanges { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + return nil + } + } + } + + return fmt.Errorf("Alias ip range with name %s and cidr %s not present", subnetworkRangeName, iPCidrRange) + } +} + +func testAccCheckComputeRegionInstanceTemplateHasGuestAccelerator(instanceTemplate *compute.InstanceTemplate, acceleratorType string, acceleratorCount int64) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instanceTemplate.Properties.GuestAccelerators) != 1 { + return fmt.Errorf("Expected only one guest accelerator") + } + + if !strings.HasSuffix(instanceTemplate.Properties.GuestAccelerators[0].AcceleratorType, acceleratorType) { + return fmt.Errorf("Wrong accelerator type: expected %v, got %v", acceleratorType, instanceTemplate.Properties.GuestAccelerators[0].AcceleratorType) + } + + if instanceTemplate.Properties.GuestAccelerators[0].AcceleratorCount != acceleratorCount { + return fmt.Errorf("Wrong accelerator acceleratorCount: expected %d, got %d", acceleratorCount, instanceTemplate.Properties.GuestAccelerators[0].AcceleratorCount) + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateLacksGuestAccelerator(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + if len(instanceTemplate.Properties.GuestAccelerators) > 0 { + return fmt.Errorf("Expected no guest accelerators") + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateHasMinCpuPlatform(instanceTemplate *compute.InstanceTemplate, minCpuPlatform string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.MinCpuPlatform != minCpuPlatform { + return fmt.Errorf("Wrong minimum CPU platform: expected %s, got %s", minCpuPlatform, instanceTemplate.Properties.MinCpuPlatform) + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateHasInstanceResourcePolicies(instanceTemplate *compute.InstanceTemplate, resourcePolicy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + resourcePolicyActual := instanceTemplate.Properties.ResourcePolicies[0] + if resourcePolicyActual != resourcePolicy { + return fmt.Errorf("Wrong instance resource policy: expected %s, got %s", resourcePolicy, resourcePolicyActual) + } + + return nil + } + +} + +func testAccCheckComputeRegionInstanceTemplateHasReservationAffinity(instanceTemplate *compute.InstanceTemplate, consumeReservationType string, specificReservationNames ...string) resource.TestCheckFunc { + if len(specificReservationNames) > 1 { + panic("too many specificReservationNames in test") + } + + return func(*terraform.State) error { + if instanceTemplate.Properties.ReservationAffinity == nil { + return fmt.Errorf("expected template to have reservation affinity, but it was nil") + } + + if actualReservationType := instanceTemplate.Properties.ReservationAffinity.ConsumeReservationType; actualReservationType != consumeReservationType { + return fmt.Errorf("Wrong reservationAffinity consumeReservationType: expected %s, got, %s", consumeReservationType, actualReservationType) + } + + if len(specificReservationNames) > 0 { + const reservationNameKey = "compute.googleapis.com/reservation-name" + if actualKey := instanceTemplate.Properties.ReservationAffinity.Key; actualKey != reservationNameKey { + return fmt.Errorf("Wrong reservationAffinity key: expected %s, got, %s", reservationNameKey, actualKey) + } + + reservationAffinityValues := instanceTemplate.Properties.ReservationAffinity.Values + if len(reservationAffinityValues) != 1 || reservationAffinityValues[0] != specificReservationNames[0] { + return fmt.Errorf("Wrong reservationAffinity values: expected %s, got, %s", specificReservationNames, reservationAffinityValues) + } + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateHasShieldedVmConfig(instanceTemplate *compute.InstanceTemplate, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) resource.TestCheckFunc { + + return func(s *terraform.State) error { + if instanceTemplate.Properties.ShieldedInstanceConfig.EnableSecureBoot != enableSecureBoot { + return fmt.Errorf("Wrong shieldedVmConfig enableSecureBoot: expected %t, got, %t", enableSecureBoot, instanceTemplate.Properties.ShieldedInstanceConfig.EnableSecureBoot) + } + + if instanceTemplate.Properties.ShieldedInstanceConfig.EnableVtpm != enableVtpm { + return fmt.Errorf("Wrong shieldedVmConfig enableVtpm: expected %t, got, %t", enableVtpm, instanceTemplate.Properties.ShieldedInstanceConfig.EnableVtpm) + } + + if instanceTemplate.Properties.ShieldedInstanceConfig.EnableIntegrityMonitoring != enableIntegrityMonitoring { + return fmt.Errorf("Wrong shieldedVmConfig enableIntegrityMonitoring: expected %t, got, %t", enableIntegrityMonitoring, instanceTemplate.Properties.ShieldedInstanceConfig.EnableIntegrityMonitoring) + } + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(instanceTemplate *compute.InstanceTemplate, EnableConfidentialCompute bool, ConfidentialInstanceType string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + if instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { + return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute) + } + {{- if ne $.TargetVersionName "ga" }} + if instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { + return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType) + } + {{- end }} + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateLacksShieldedVmConfig(instanceTemplate *compute.InstanceTemplate) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instanceTemplate.Properties.ShieldedInstanceConfig != nil { + return fmt.Errorf("Expected no shielded vm config") + } + + return nil + } +} + +func testAccCheckComputeRegionInstanceTemplateHasDiskResourcePolicy(instanceTemplate *compute.InstanceTemplate, resourcePolicy string) resource.TestCheckFunc { + return func(s *terraform.State) error { + resourcePolicyActual := instanceTemplate.Properties.Disks[0].InitializeParams.ResourcePolicies[0] + if resourcePolicyActual != resourcePolicy { + return fmt.Errorf("Wrong disk resource policy: expected %s, got %s", resourcePolicy, resourcePolicyActual) + } + + return nil + } +} + +func testAccComputeRegionInstanceTemplate_basic(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_imageShorthand(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "tf-test-%s" + description = "description-test" + family = "family-test" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + labels = { + my-label = "my-label-value" + } + timeouts { + create = "5m" + } +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = google_compute_image.foobar.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_preemptible(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = true + automatic_restart = false + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_ip(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "tf-test-instance-template-%s" +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + access_config { + nat_ip = google_compute_address.foo.address + } + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_ipv6(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_address" "foo" { + name = "tf-test-instance-template-%s" +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "foo" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork-ipv6" { + name = "tf-test-subnetwork-%s" + + ip_cidr_range = "10.0.0.0/22" + region = "us-central1" + + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + + network = google_compute_network.foo.id +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork-ipv6.name + stack_type = "IPV4_IPV6" + ipv6_access_config { + network_tier = "PREMIUM" + } + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_networkTier(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + access_config { + network_tier = "STANDARD" + } + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_networkIP(suffix, networkIP string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + network_ip = "%s" + } + + metadata = { + foo = "bar" + } +} +`, suffix, networkIP) +} + +func testAccComputeRegionInstanceTemplate_networkIPAddress(suffix, ipAddress string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + } + + network_interface { + network = "default" + network_ip = "%s" + } + + metadata = { + foo = "bar" + } +} +`, suffix, ipAddress) +} + +func testAccComputeRegionInstanceTemplate_disksInvalid(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "tf-test-instance-template-%s" + image = data.google_compute_image.my_image.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = google_compute_disk.foobar.name + disk_size_gb = 50 + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_with375GbScratchDisk(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.name + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + network_interface { + network = "default" + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_with18TbScratchDisk(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "n2-standard-16" + region = "us-central1" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.name + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + disk { + auto_delete = true + disk_size_gb = 3000 + type = "SCRATCH" + disk_type = "local-ssd" + interface = "NVME" + } + network_interface { + network = "default" + } +}`, suffix) +} + +func testAccComputeRegionInstanceTemplate_regionDisks(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_disk" "foobar" { + name = "tf-test-instance-template-%s" + size = 10 + type = "pd-ssd" + region = "us-central1" + replica_zones = ["us-central1-a", "us-central1-f"] +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + } + + disk { + source = google_compute_region_disk.foobar.self_link + auto_delete = false + boot = false + } + + network_interface { + network = "default" + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_diskIops(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 100 + boot = true + provisioned_iops = 10000 + } + + network_interface { + network = "default" + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_subnet_auto(network, suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_network" "auto-network" { + name = "%s" + auto_create_subnetworks = true +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = google_compute_network.auto-network.name + } + + metadata = { + foo = "bar" + } +} +`, network, suffix) +} + +func testAccComputeRegionInstanceTemplate_subnet_custom(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_network" "network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.network.self_link +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + } + + metadata = { + foo = "bar" + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_subnet_xpn(org, billingId, projectName, suffix string) string { + return fmt.Sprintf(` +resource "google_project" "host_project" { + name = "Test Project XPN Host" + project_id = "%s-host" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "host_project" { + project = google_project.host_project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_shared_vpc_host_project" "host_project" { + project = google_project_service.host_project.project +} + +resource "google_project" "service_project" { + name = "Test Project XPN Service" + project_id = "%s-service" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "service_project" { + project = google_project.service_project.project_id + service = "compute.googleapis.com" +} + +resource "google_compute_shared_vpc_service_project" "service_project" { + host_project = google_compute_shared_vpc_host_project.host_project.project + service_project = google_project_service.service_project.project +} + +resource "google_compute_network" "network" { + name = "tf-test-network-%s" + auto_create_subnetworks = false + project = google_compute_shared_vpc_host_project.host_project.project +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "subnetwork-%s" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.network.self_link + project = google_compute_shared_vpc_host_project.host_project.project +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + subnetwork = google_compute_subnetwork.subnetwork.name + subnetwork_project = google_compute_subnetwork.subnetwork.project + } + + metadata = { + foo = "bar" + } + project = google_compute_shared_vpc_service_project.service_project.service_project +} +`, projectName, org, billingId, projectName, org, billingId, suffix, suffix, suffix) +} + +func testAccComputeRegionInstanceTemplate_startup_script(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata = { + foo = "bar" + } + + network_interface { + network = "default" + } + + metadata_startup_script = "echo 'Hello'" +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_primaryAliasIpRange(i string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata = { + foo = "bar" + } + + network_interface { + network = "default" + alias_ip_range { + ip_cidr_range = "/24" + } + } +} +`, i) +} + +func testAccComputeRegionInstanceTemplate_secondaryAliasIpRange(i string) string { + return fmt.Sprintf(` +resource "google_compute_network" "inst-test-network" { + name = "tf-test-network-%s" +} + +resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.inst-test-network.self_link + secondary_ip_range { + range_name = "inst-test-secondary" + ip_cidr_range = "172.16.0.0/20" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + metadata = { + foo = "bar" + } + + network_interface { + subnetwork = google_compute_subnetwork.inst-test-subnetwork.self_link + + // Note that unlike compute instances, instance templates seem to be + // only able to specify the netmask here. Trying a full CIDR string + // results in: + // Invalid value for field 'resource.properties.networkInterfaces[0].aliasIpRanges[0].ipCidrRange': + // '172.16.0.0/24'. Alias IP CIDR range must be a valid netmask starting with '/' (e.g. '/24') + alias_ip_range { + subnetwork_range_name = google_compute_subnetwork.inst-test-subnetwork.secondary_ip_range[0].range_name + ip_cidr_range = "/24" + } + } +} +`, i, i, i) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionInstanceTemplate_maintenance_interval(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + maintenance_interval = "PERIODIC" + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, suffix) +} +{{- end }} + + +func testAccComputeRegionInstanceTemplate_guestAccelerator(i string, count uint8) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with guest accelerators do not support live migration. + on_host_maintenance = "TERMINATE" + } + + guest_accelerator { + count = %d + type = "nvidia-tesla-k80" + } +} +`, i, count) +} + +func testAccComputeRegionInstanceTemplate_minCpuPlatform(i string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + # Instances with guest accelerators do not support live migration. + on_host_maintenance = "TERMINATE" + } + + min_cpu_platform = "%s" +} +`, i, DEFAULT_MIN_CPU_TEST_VALUE) +} + +func testAccComputeRegionInstanceTemplate_soleTenantInstanceTemplate(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-standard-4" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + node_affinities { + key = "tfacc" + operator = "IN" + values = ["testinstancetemplate"] + } + + min_node_cpus = 2 + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_instanceResourcePolicyCollocated(suffix string, policyName string) string { + return fmt.Sprintf(` +resource "google_compute_resource_policy" "foo" { + name = "%s" + region = "us-central1" + group_placement_policy { + vm_count = 2 + collocation = "COLLOCATED" + } +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-standard-4" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = false + } + + resource_policies = [google_compute_resource_policy.foo.self_link] + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, policyName, suffix) +} + +func testAccComputeRegionInstanceTemplate_reservationAffinityInstanceTemplate_nonSpecificReservation(templateName, consumeReservationType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instancet-%s" + machine_type = "e2-medium" + can_ip_forward = false + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + reservation_affinity { + type = "%s" + } +} +`, templateName, consumeReservationType) +} + +func testAccComputeRegionInstanceTemplate_reservationAffinityInstanceTemplate_specificReservation(templateName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instancet-%s" + machine_type = "e2-medium" + can_ip_forward = false + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + reservation_affinity { + type = "SPECIFIC_RESERVATION" + + specific_reservation { + key = "compute.googleapis.com/reservation-name" + values = ["%s"] + } + } +} +`, templateName, templateName) +} + +func testAccComputeRegionInstanceTemplate_shieldedVmConfig(suffix string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + shielded_instance_config { + enable_secure_boot = %t + enable_vtpm = %t + enable_integrity_monitoring = %t + } +} +`, suffix, enableSecureBoot, enableVtpm, enableIntegrityMonitoring) +} + +func testAccComputeRegionInstanceTemplateConfidentialInstanceConfigEnable(suffix string, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "n2d-standard-2" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + enable_confidential_compute = true +{{- if ne $.TargetVersionName "ga" }} + confidential_instance_type = %q +{{- end }} + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} + +{{ if ne $.TargetVersionName `ga` -}} +resource "google_compute_region_instance_template" "foobar2" { + name = "tf-test-instance2-template-%s" + machine_type = "n2d-standard-2" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + confidential_instance_config { + enable_confidential_compute = true + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +{{- end }} +{{- if eq $.TargetVersionName "ga" }} +`, suffix) +{{- else }} +`, suffix, confidentialInstanceType, suffix) +{{- end }} +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionInstanceTemplateConfidentialInstanceConfigNoEnable(suffix string, minCpuPlatform, confidentialInstanceType string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image2" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_region_instance_template" "foobar3" { + name = "tf-test-instance3-template-%s" + machine_type = "n2d-standard-2" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image2.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + min_cpu_platform = %q + + confidential_instance_config { + enable_confidential_compute = false + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +resource "google_compute_region_instance_template" "foobar4" { + name = "tf-test-instance4-template-%s" + machine_type = "n2d-standard-2" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image2.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + min_cpu_platform = %q + + confidential_instance_config { + confidential_instance_type = %q + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) +} +{{- end }} + +func testAccComputeRegionInstanceTemplateAdvancedMachineFeatures(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "ubuntu-2004-lts" + project = "ubuntu-os-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "n2-standard-2" // Nested Virt isn't supported on E2 and N2Ds https://cloud.google.com/compute/docs/instances/nested-virtualization/overview#restrictions and https://cloud.google.com/compute/docs/instances/disabling-smt#limitations + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = true + visible_core_count = 1 + } + + scheduling { + on_host_maintenance = "TERMINATE" + } + +} +`, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionInstanceTemplate_enableDisplay(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "centos-7" + project = "centos-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + network_interface { + network = "default" + } + enable_display = true +} +`, suffix) +} + +{{ end }} + +func testAccComputeRegionInstanceTemplate_invalidDiskType(suffix string) string { + return fmt.Sprintf(` +# Use this datasource insead of hardcoded values when https://github.com/hashicorp/terraform/issues/22679 +# is resolved. +# data "google_compute_image" "my_image" { +# family = "centos-7" +# project = "centos-cloud" +# } + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20210217" + auto_delete = true + boot = true + } + disk { + auto_delete = true + disk_size_gb = 375 + type = "SCRATCH" + disk_type = "local-ssd" + } + disk { + source_image = "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20210217" + auto_delete = true + type = "SCRATCH" + } + network_interface { + network = "default" + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_imageResourceTest(diskName string, imageName string, imageDescription string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "my_disk" { + name = "%s" + zone = "us-central1-a" + image = data.google_compute_image.my_image.self_link +} + +resource "google_compute_image" "diskimage" { + name = "%s" + description = "%s" + source_disk = google_compute_disk.my_disk.self_link +} + +resource "google_compute_region_instance_template" "foobar" { + name_prefix = "tf-test-instance-template-" + machine_type = "e2-medium" + region = "us-central1" + disk { + source_image = google_compute_image.diskimage.self_link + } + network_interface { + network = "default" + access_config {} + } +} +`, diskName, imageName, imageDescription) +} + +func testAccComputeRegionInstanceTemplate_diskResourcePolicies(suffix string, policyName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} +resource "google_compute_region_instance_template" "foobar" { + region = "us-central1" + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + can_ip_forward = false + disk { + source_image = data.google_compute_image.my_image.self_link + resource_policies = [google_compute_resource_policy.foo.id] + } + network_interface { + network = "default" + } + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + labels = { + my_label = "foobar" + } +} + +resource "google_compute_resource_policy" "foo" { + name = "%s" + region = "us-central1" + snapshot_schedule_policy { + schedule { + daily_schedule { + days_in_cycle = 1 + start_time = "04:00" + } + } + } +} +`, suffix, policyName) +} + +func testAccComputeRegionInstanceTemplate_nictype(image, instance, nictype string) string { + return fmt.Sprintf(` +resource "google_compute_image" "example" { + name = "%s" + raw_disk { + source = "https://storage.googleapis.com/bosh-gce-raw-stemcells/bosh-stemcell-97.98-google-kvm-ubuntu-xenial-go_agent-raw-1557960142.tar.gz" + } + + guest_os_features { + type = "SECURE_BOOT" + } + + guest_os_features { + type = "MULTI_IP_SUBNET" + } + + guest_os_features { + type = "GVNIC" + } +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = google_compute_image.example.name + auto_delete = true + boot = true + } + + network_interface { + network = "default" + nic_type = "%s" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } + + labels = { + my_label = "foobar" + } +} +`, image, instance, nictype) +} + +func testAccComputeRegionInstanceTemplate_queueCount(instanceTemplateName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "%s" + region = "us-central1" + machine_type = "e2-medium" + network_interface { + network = "default" + access_config {} + queue_count = 2 + } + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } +} +`, instanceTemplateName) +} + +func testAccComputeRegionInstanceTemplate_managedEnvoy(suffix string) string { + return fmt.Sprintf(` +data "google_compute_default_service_account" "default" { +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + machine_type = "e2-medium" + region = "us-central1" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = false + automatic_restart = true + } + + metadata = { + gce-software-declaration = <<-EOF + { + "softwareRecipes": [{ + "name": "install-gce-service-proxy-agent", + "desired_state": "INSTALLED", + "installSteps": [{ + "scriptRun": { + "script": "#! /bin/bash\nZONE=$(curl --silent http://metadata.google.internal/computeMetadata/v1/instance/zone -H Metadata-Flavor:Google | cut -d/ -f4 )\nexport SERVICE_PROXY_AGENT_DIRECTORY=$(mktemp -d)\nsudo gsutil cp gs://gce-service-proxy-"$ZONE"/service-proxy-agent/releases/service-proxy-agent-0.2.tgz "$SERVICE_PROXY_AGENT_DIRECTORY" || sudo gsutil cp gs://gce-service-proxy/service-proxy-agent/releases/service-proxy-agent-0.2.tgz "$SERVICE_PROXY_AGENT_DIRECTORY"\nsudo tar -xzf "$SERVICE_PROXY_AGENT_DIRECTORY"/service-proxy-agent-0.2.tgz -C "$SERVICE_PROXY_AGENT_DIRECTORY"\n"$SERVICE_PROXY_AGENT_DIRECTORY"/service-proxy-agent/service-proxy-agent-bootstrap.sh" + } + }] + }] + } + EOF + gce-service-proxy = <<-EOF + { + "api-version": "0.2", + "proxy-spec": { + "proxy-port": 15001, + "network": "my-network", + "tracing": "ON", + "access-log": "/var/log/envoy/access.log" + } + "service": { + "serving-ports": [80, 81] + }, + "labels": { + "app_name": "bookserver_app", + "app_version": "STABLE" + } + } + EOF + enable-guest-attributes = "true" + enable-osconfig = "true" + + } + + service_account { + email = data.google_compute_default_service_account.default.email + scopes = ["cloud-platform"] + } + + labels = { + gce-service-proxy = "on" + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_spot(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = true + automatic_restart = false + provisioning_model = "SPOT" + instance_termination_action = "STOP" + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +func testAccComputeRegionInstanceTemplate_spot_maxRunDuration(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + preemptible = true + automatic_restart = false + provisioning_model = "SPOT" + instance_termination_action = "DELETE" +{{- if ne $.TargetVersionName "ga" }} + max_run_duration { + nanos = 123 + seconds = 60 + } +{{- end }} + + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + automatic_restart = false + provisioning_model = "STANDARD" + instance_termination_action = "STOP" + max_run_duration { + nanos = 123 + seconds = 60 + } + on_instance_stop_action { + discard_local_ssd = true + } + + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +{{- end }} + +func testAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + scheduling { + local_ssd_recovery_timeout { + nanos = 0 + seconds = 3600 + } + } + + metadata = { + foo = "bar" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionalInstanceTemplate_partnerMetadata(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "tf-test-instance-template-%s" + region = "us-central1" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + partner_metadata = { + "test.compute.googleapis.com" = jsonencode({ + entries = { + key1 = "value1" + key2 = 2 + key3 = { + key31 = "value31" + } + } + }) + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} +`, suffix) +} +{{- end }} + +func testAccComputeRegionInstanceTemplate_sourceSnapshotEncryptionKey(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_kms_key_ring" "ring" { + name = "%{kms_ring_name}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key" { + name = "%{kms_key_name}" + key_ring = data.google_kms_key_ring.ring.id +} + +resource "google_service_account" "test" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "KMS Ops Account" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = data.google_kms_crypto_key.key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${google_service_account.test.email}" +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "persistent" { + name = "tf-test-debian-disk-%{random_suffix}" + image = data.google_compute_image.debian.self_link + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "snapshot" { + name = "tf-test-my-snapshot-%{random_suffix}" + source_disk = google_compute_disk.persistent.id + zone = "us-central1-a" + snapshot_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} + +resource "google_compute_region_instance_template" "template" { + name = "tf-test-instance-template-%{random_suffix}" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_snapshot = google_compute_snapshot.snapshot.self_link + source_snapshot_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} +`, context) +} + +func testAccComputeRegionInstanceTemplate_sourceImageEncryptionKey(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_kms_key_ring" "ring" { + name = "%{kms_ring_name}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key" { + name = "%{kms_key_name}" + key_ring = data.google_kms_key_ring.ring.id +} + +resource "google_service_account" "test" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "KMS Ops Account" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = data.google_kms_crypto_key.key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${google_service_account.test.email}" +} + +data "google_compute_image" "debian" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_image" "image" { + name = "debian-image" + source_image = data.google_compute_image.debian.self_link + image_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} + + +resource "google_compute_region_instance_template" "template" { + name = "tf-test-instance-template-%{random_suffix}" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = google_compute_image.image.self_link + source_image_encryption_key { + kms_key_self_link = data.google_kms_crypto_key.key.id + kms_key_service_account = google_service_account.test.email + } + auto_delete = true + boot = true + } + + network_interface { + network = "default" + } + + depends_on = [ + google_kms_crypto_key_iam_member.crypto_key + ] +} +`, context) +} + +func testAccComputeRegionInstanceTemplate_resourceManagerTags(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_tags_tag_key" "key" { + parent = "projects/%{project}" + short_name = "foobarbaz%{random_suffix}" + description = "For foo/bar resources." +} + +resource "google_tags_tag_value" "value" { + parent = "tagKeys/${google_tags_tag_key.key.name}" + short_name = "foo%{random_suffix}" + description = "For foo resources." +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_region_instance_template" "foobar" { + name = "%{instance_name}" + machine_type = "e2-medium" + region = "us-central1" + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + disk_size_gb = 10 + boot = true + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + + network_interface { + network = "default" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_group_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_group_test.go.tmpl new file mode 100644 index 000000000000..82aa1567fe05 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_group_test.go.tmpl @@ -0,0 +1,78 @@ +package compute_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRegionNetworkEndpointGroup_negWithServerlessDeployment(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionNetworkEndpointGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionNetworkEndpointGroup_negWithServerlessDeployment(context), + }, + { + ResourceName: "google_compute_region_network_endpoint_group.test_neg", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRegionNetworkEndpointGroup_negWithServerlessDeployment(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_api_gateway_api" "api_gw" { + api_id = "tf-test-%{random_suffix}" +} + +resource "google_api_gateway_api_config" "api_gw" { + api = google_api_gateway_api.api_gw.api_id + api_config_id = "tf-test-config-%{random_suffix}" + + openapi_documents { + document { + path = "spec.yaml" + contents = filebase64("test-fixtures/openapi.yaml") + } + } + + lifecycle { + create_before_destroy = true + } +} + +resource "google_api_gateway_gateway" "api_gw" { + api_config = google_api_gateway_api_config.api_gw.id + gateway_id = "tf-test-%{random_suffix}" +} + +resource "google_compute_region_network_endpoint_group" "test_neg" { + name = "tf-test-neg-%{random_suffix}" + network_endpoint_type = "SERVERLESS" + region = "us-central1" + serverless_deployment { + platform = "apigateway.googleapis.com" + url_mask = format("%s/hello", trimprefix(google_api_gateway_gateway.api_gw.default_hostname, "tf-test-%{random_suffix}")) + } +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_test.go.tmpl new file mode 100644 index 000000000000..10ee3c6c45e4 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_network_endpoint_test.go.tmpl @@ -0,0 +1,319 @@ +package compute_test +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeRegionNetworkEndpoint_regionNetworkEndpointBasic(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "modified_port": 100, + "add1_port": 101, + "add2_port": 102, + } + negId := fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/tf-test-neg-%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one endpoint + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointBasic(context), + }, + { + ResourceName: "google_compute_region_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Force-recreate old endpoint + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointsModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionNetworkEndpointWithPortsDestroyed(t, negId, "90"), + ), + }, + { + ResourceName: "google_compute_region_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Add two new endpoints + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointsAdditional(context), + }, + { + ResourceName: "google_compute_region_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_region_network_endpoint.add1", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_region_network_endpoint.add2", + ImportState: true, + ImportStateVerify: true, + }, + { + // Remove add1 and add2 endpoints + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointsModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionNetworkEndpointWithPortsDestroyed(t, negId, "90"), + ), + }, + { + ResourceName: "google_compute_region_network_endpoint.default", + ImportState: true, + ImportStateVerify: true, + }, + { + // Delete all endpoints + Config: testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionNetworkEndpointWithPortsDestroyed(t, negId, "100"), + ), + }, + }, + }) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_network_endpoint" "default" { + region = "us-central1" + region_network_endpoint_group = google_compute_region_network_endpoint_group.neg.id + + ip_address = "8.8.8.8" + port = 443 +} +`, context) + testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointsModified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_network_endpoint" "default" { + region = "us-central1" + region_network_endpoint_group = google_compute_region_network_endpoint_group.neg.name + + ip_address = "8.8.8.8" + port = "%{modified_port}" +} +`, context) + testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointsAdditional(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_network_endpoint" "default" { + region = "us-central1" + region_network_endpoint_group = google_compute_region_network_endpoint_group.neg.id + + ip_address = "8.8.8.8" + port = "%{modified_port}" +} + +resource "google_compute_region_network_endpoint" "add1" { + region = "us-central1" + region_network_endpoint_group = google_compute_region_network_endpoint_group.neg.id + + ip_address = "8.8.8.8" + port = "%{add1_port}" +} + +resource "google_compute_region_network_endpoint" "add2" { + region = "us-central1" + region_network_endpoint_group = google_compute_region_network_endpoint_group.neg.name + + ip_address = "8.8.8.8" + port = "%{add2_port}" +} +`, context) + testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + negId := fmt.Sprintf("projects/%s/regions/%s/networkEndpointGroups/tf-test-portmap-neg%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), context["random_suffix"]) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapExample(context), + }, + { + ResourceName: "google_compute_region_network_endpoint.region_network_endpoint_portmap", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "region", "region_network_endpoint_group"}, + }, + { + // Delete all endpoints + Config: testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapNoEndpointExample(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionNetworkEndpointWithPortsDestroyed(t, negId, "80"), + ), + }, + }, + }) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapNoEndpointExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "network%{random_suffix}" + auto_create_subnetworks = false + provider = google-beta +} + +resource "google_compute_subnetwork" "default" { + name = "subnetwork%{random_suffix}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id + provider = google-beta +} + +resource "google_compute_region_network_endpoint_group" default { + name = "tf-test-portmap-neg%{random_suffix}" + region = "us-central1" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + + network_endpoint_type = "GCE_VM_IP_PORTMAP" + provider = google-beta +} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" + provider = google-beta +} + +resource "google_compute_instance" "default" { + name = "instance%{random_suffix}" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + subnetwork = google_compute_subnetwork.default.id + access_config { + } + } + provider = google-beta +} +`, context) +} + +func testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapExample(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_network_endpoint" "region_network_endpoint_portmap" { + region_network_endpoint_group = google_compute_region_network_endpoint_group.default.name + region = "us-central1" + instance = google_compute_instance.default.self_link + port = 80 + ip_address = google_compute_instance.default.network_interface[0].network_ip + client_destination_port = 8080 + provider = google-beta +} +`, context) + testAccComputeRegionNetworkEndpoint_regionNetworkEndpointPortmapNoEndpointExample(context) +} +{{- end }} + +func testAccComputeRegionNetworkEndpoint_noRegionNetworkEndpoints(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_network_endpoint_group" "neg" { + name = "tf-test-neg-%{random_suffix}" + region = "us-central1" + network = google_compute_network.default.self_link + network_endpoint_type = "INTERNET_IP_PORT" +} + +resource "google_compute_network" "default" { + name = "tf-test-neg-network-%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} + +// testAccCheckComputeRegionNetworkEndpointDestroyed makes sure the endpoint with +// given Terraform resource name and previous information (obtained from Exists) +// was destroyed properly. +func testAccCheckComputeRegionNetworkEndpointWithPortsDestroyed(t *testing.T, negId string, ports ...string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundPorts, err := testAccComputeRegionNetworkEndpointListEndpointPorts(t, negId) + if err != nil { + return fmt.Errorf("unable to confirm endpoints with ports %+v was destroyed: %v", ports, err) + } + for _, p := range ports { + if _, ok := foundPorts[p]; ok { + return fmt.Errorf("region network endpoint with port %s still exists", p) + } + } + + return nil + } +} + +func testAccComputeRegionNetworkEndpointListEndpointPorts(t *testing.T, negId string) (map[string]struct{}, error) { + config := acctest.GoogleProviderConfig(t) + + {{ if eq $.TargetVersionName `ga` }} + url := fmt.Sprintf("https://www.googleapis.com/compute/v1/%s/listNetworkEndpoints", negId) + {{- else }} + url := fmt.Sprintf("https://www.googleapis.com/compute/beta/%s/listNetworkEndpoints", negId) + {{- end }} + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return nil, err + } + + v, ok := res["items"] + if !ok || v == nil { + return nil, nil + } + items := v.([]interface{}) + ports := make(map[string]struct{}) + for _, item := range items { + endptWithHealth := item.(map[string]interface{}) + v, ok := endptWithHealth["networkEndpoint"] + if !ok || v == nil { + continue + } + endpt := v.(map[string]interface{}) + ports[fmt.Sprintf("%v", endpt["port"])] = struct{}{} + } + return ports, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_per_instance_config_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_per_instance_config_test.go new file mode 100644 index 000000000000..0b9e84cc5974 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_per_instance_config_test.go @@ -0,0 +1,695 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccComputeRegionPerInstanceConfig_statefulBasic(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + suffix := acctest.RandString(t, 10) + rigmName := fmt.Sprintf("tf-test-rigm-%s", suffix) + context := map[string]interface{}{ + "rigm_name": rigmName, + "random_suffix": suffix, + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name2": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name3": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name4": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + } + rigmId := fmt.Sprintf("projects/%s/regions/%s/instanceGroupManagers/%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), rigmName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one endpoint + Config: testAccComputeRegionPerInstanceConfig_statefulBasic(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + { + // Force-recreate old config + Config: testAccComputeRegionPerInstanceConfig_statefulModified(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name"].(string)), + ), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + { + // Add two new endpoints + Config: testAccComputeRegionPerInstanceConfig_statefulAdditional(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + { + ResourceName: "google_compute_region_per_instance_config.with_disks", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"most_disruptive_allowed_action", "minimal_action", "remove_instance_state_on_destroy"}, + }, + { + ResourceName: "google_compute_region_per_instance_config.add2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + { + // delete all configs + Config: testAccComputeRegionPerInstanceConfig_rigm(context), + Check: resource.ComposeTestCheckFunc( + // Config with remove_instance_state_on_destroy = false won't be destroyed (config4) + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name2"].(string)), + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name3"].(string)), + ), + }, + }, + }) +} + +func TestAccComputeRegionPerInstanceConfig_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "rigm_name": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one config + Config: testAccComputeRegionPerInstanceConfig_statefulBasic(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + { + // Update an existing config + Config: testAccComputeRegionPerInstanceConfig_update(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + }, + }) +} + +func TestAccComputeRegionPerInstanceConfig_statefulIps(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "rigm_name": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "network": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "subnetwork": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "address1": fmt.Sprintf("tf-test-rigm-address%s", acctest.RandString(t, 10)), + "address2": fmt.Sprintf("tf-test-rigm-address%s", acctest.RandString(t, 10)), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Create one config + Config: testAccComputeRegionPerInstanceConfig_statefulIpsBasic(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + { + // Update an existing config + Config: testAccComputeRegionPerInstanceConfig_statefulIpsUpdate(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_state_on_destroy", "region"}, + }, + }, + }) +} + +func TestAccComputeRegionPerInstanceConfig_removeInstanceOnDestroy(t *testing.T) { + t.Parallel() + + rigmName := fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "rigm_name": rigmName, + "config_name": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "config_name2": fmt.Sprintf("instance-%s", acctest.RandString(t, 10)), + "network": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "subnetwork": fmt.Sprintf("tf-test-rigm-%s", acctest.RandString(t, 10)), + "address1": fmt.Sprintf("tf-test-rigm-address%s", acctest.RandString(t, 10)), + "address2": fmt.Sprintf("tf-test-rigm-address%s", acctest.RandString(t, 10)), + } + rigmId := fmt.Sprintf("projects/%s/regions/%s/instanceGroupManagers/%s", + envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), rigmName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyBefore(context), + }, + { + ResourceName: "google_compute_region_per_instance_config.config_one", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + ResourceName: "google_compute_region_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + Config: testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyAfter(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name"].(string)), + testAccCheckComputeRegionPerInstanceConfigInstanceDestroyed(t, rigmId, context["config_name"].(string)), + ), + }, + { + ResourceName: "google_compute_region_per_instance_config.config_two", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_instance_on_destroy", "zone"}, + }, + { + // delete all configs + Config: testAccComputeRegionPerInstanceConfig_rigm(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionPerInstanceConfigDestroyed(t, rigmId, context["config_name2"].(string)), + testAccCheckComputeRegionPerInstanceConfigInstanceDestroyed(t, rigmId, context["config_name2"].(string)), + ), + }, + }, + }) +} + +func testAccComputeRegionPerInstanceConfig_statefulBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_per_instance_config" "default" { + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_per_instance_config" "default" { + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "foo" + updated = "12345" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_statefulModified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_per_instance_config" "default" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name2}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_statefulAdditional(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_per_instance_config" "default" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name2}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + } +} + +resource "google_compute_region_per_instance_config" "with_disks" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name3}" + most_disruptive_allowed_action = "REFRESH" + minimal_action = "REFRESH" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + meta = "123" + } + + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + + disk { + device_name = "my-stateful-disk3" + source = google_compute_disk.disk2.id + } + } +} + +resource "google_compute_region_per_instance_config" "add2" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name4}" + preserved_state { + metadata = { + foo = "abc" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk2" { + name = "test-disk3-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "https://www.googleapis.com/compute/v1/projects/centos-cloud/global/images/centos-7-v20210217" + physical_block_size_bytes = 4096 +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_rigm(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance_template" "rigm-basic" { + name = "tf-test-rigm-%{random_suffix}" + machine_type = "e2-medium" + can_ip_forward = false + tags = ["foo", "bar"] + + disk { + source_image = data.google_compute_image.my_image.self_link + auto_delete = true + boot = true + device_name = "my-stateful-disk" + } + + network_interface { + network = "default" + } + + service_account { + scopes = ["userinfo-email", "compute-ro", "storage-ro"] + } +} + +resource "google_compute_region_instance_group_manager" "rigm" { + description = "Terraform test instance group manager" + name = "%{rigm_name}" + + version { + name = "prod" + instance_template = google_compute_instance_template.rigm-basic.self_link + } + + base_instance_name = "tf-test-rigm-no-tp" + + update_policy { + instance_redistribution_type = "NONE" + type = "OPPORTUNISTIC" + minimal_action = "REPLACE" + max_surge_fixed = 0 + max_unavailable_fixed = 6 + } +} +`, context) +} + + +func testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyBefore(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_region_per_instance_config" "config_one" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-one" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} + +resource "google_compute_region_per_instance_config" "config_two" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_removeInstanceOnDestroyAfter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_per_instance_config" "config_two" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name2}" + remove_instance_on_destroy = true + preserved_state { + metadata = { + asdf = "config-two" + } + } +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_statefulIpsBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_region_per_instance_config" "default" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "NEVER" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +func testAccComputeRegionPerInstanceConfig_statefulIpsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "%{network}" +} + +resource "google_compute_subnetwork" "default" { + name = "%{subnetwork}" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_address" "static_internal_ip" { + name = "%{address1}" + address_type = "INTERNAL" +} + +resource "google_compute_address" "static_external_ip" { + name = "%{address2}" + address_type = "EXTERNAL" +} + +resource "google_compute_region_per_instance_config" "default" { + region = google_compute_region_instance_group_manager.rigm.region + region_instance_group_manager = google_compute_region_instance_group_manager.rigm.name + name = "%{config_name}" + remove_instance_state_on_destroy = true + preserved_state { + metadata = { + asdf = "asdf" + } + disk { + device_name = "my-stateful-disk1" + source = google_compute_disk.disk.id + } + + disk { + device_name = "my-stateful-disk2" + source = google_compute_disk.disk1.id + } + internal_ip { + ip_address { + address = google_compute_address.static_internal_ip.self_link + } + auto_delete = "ON_PERMANENT_INSTANCE_DELETION" + interface_name = "nic0" + } + external_ip { + ip_address { + address = google_compute_address.static_external_ip.self_link + } + auto_delete = "ON_PERMANENT_INSTANCE_DELETION" + interface_name = "nic0" + } + } +} + +resource "google_compute_disk" "disk" { + name = "test-disk-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-8-jessie-v20170523" + physical_block_size_bytes = 4096 +} + +resource "google_compute_disk" "disk1" { + name = "test-disk2-%{random_suffix}" + type = "pd-ssd" + zone = "us-central1-c" + image = "debian-cloud/debian-11" + physical_block_size_bytes = 4096 +} +`, context) + testAccComputeRegionPerInstanceConfig_rigm(context) +} + +// Checks that the per instance config with the given name was destroyed +func testAccCheckComputeRegionPerInstanceConfigDestroyed(t *testing.T, rigmId, configName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundNames, err := testAccComputePerInstanceConfigListNames(t, rigmId) + if err != nil { + return fmt.Errorf("unable to confirm config with name %s was destroyed: %v", configName, err) + } + if _, ok := foundNames[configName]; ok { + return fmt.Errorf("config with name %s still exists", configName) + } + + return nil + } +} + +// Checks that the instance with the given name was destroyed. +func testAccCheckComputeRegionPerInstanceConfigInstanceDestroyed(t *testing.T, rigmId, configName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + foundNames, err := testAccComputePerInstanceConfigListInstances(t, rigmId) + if err != nil { + return fmt.Errorf("unable to confirm instance with name %s was destroyed: %v", configName, err) + } + if _, ok := foundNames[configName]; ok { + return fmt.Errorf("instance with name %s still exists", configName) + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_rule_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_rule_test.go.tmpl new file mode 100644 index 000000000000..4b161b5c7fe3 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_rule_test.go.tmpl @@ -0,0 +1,1035 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleBasicUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRulePreUpdate(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRulePostUpdate(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRulePreUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "default" { + region = "us-west2" + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.default.name + region = "us-west2" + description = "basic rule pre update" + action = "allow" + priority = 100 + preview = false + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + } + } +} +`, context) +} + +func testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRulePostUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "default" { + region = "us-west2" + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.default.name + region = "us-west2" + description = "basic rule post update" + action = "deny(403)" + priority = 100 + preview = true + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["172.16.0.0/12"] + } + } +} +`, context) +} + +func TestAccComputeRegionSecurityPolicyRule_withRuleExpr(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRulePreUpdate(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRuleExpr(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicyRule_withRuleExpr(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "default" { + region = "us-west2" + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + region = "us-west2" + security_policy = google_compute_region_security_policy.default.name + description = "basic rule post update withRuleExpr" + action = "allow" + priority = "100" + match { + expr { + expression = "evaluatePreconfiguredExpr('xss-canary')" + } + } + preview = true +} +`, context) +} + +func TestAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchUpdate(t *testing.T) { + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchBasic(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule_network_match", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchUpdate(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule_network_match", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchUpdate2(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule_network_match", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchBasic(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule_network_match", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "us-west2" + name = "tf-test-policyddos%{random_suffix}" + description = "region security policy for network match" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "us-west2" + name = "tf-test-edgesec%{random_suffix}" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_region_security_policy" "policynetworkmatch" { + region = "us-west2" + name = "tf-test-polnetmatch%{random_suffix}" + description = "region security policy for network match" + type = "CLOUD_ARMOR_NETWORK" + user_defined_fields { + name = "SIG1_AT_0" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } + user_defined_fields { + name = "SIG2_AT_8" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy_rule" "policy_rule_network_match" { + region = "us-west2" + security_policy = google_compute_region_security_policy.policynetworkmatch.name + priority = 100 + network_match { + src_ip_ranges = ["10.10.0.0/16"] + } + action = "allow" + preview = true +} +`, context) +} + +func testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "us-west2" + name = "tf-test-policyddos%{random_suffix}" + description = "region security policy for network match" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "us-west2" + name = "tf-test-edgesec%{random_suffix}" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_region_security_policy" "policynetworkmatch" { + region = "us-west2" + name = "tf-test-polnetmatch%{random_suffix}" + description = "region security policy for network match" + type = "CLOUD_ARMOR_NETWORK" + user_defined_fields { + name = "SIG1_AT_0" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } + user_defined_fields { + name = "SIG2_AT_8" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy_rule" "policy_rule_network_match" { + region = "us-west2" + security_policy = google_compute_region_security_policy.policynetworkmatch.name + priority = 100 + network_match { + src_ip_ranges = ["10.10.0.0/16"] + src_asns = [6939] + src_ports = [443] + src_region_codes = ["US"] + ip_protocols = ["UDP"] + dest_ip_ranges = ["10.0.0.0/8"] + dest_ports = [80] + user_defined_fields { + name = "SIG1_AT_0" + values = ["0x8700"] + } + } + action = "allow" + preview = true +} +`, context) +} + +func testAccComputeRegionSecurityPolicyRule_regionSecurityPolicyRuleNetworkMatchUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "us-west2" + name = "tf-test-policyddos%{random_suffix}" + description = "region security policy for network match" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + region = "us-west2" + name = "tf-test-edgesec%{random_suffix}" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_region_security_policy" "policynetworkmatch" { + region = "us-west2" + name = "tf-test-polnetmatch%{random_suffix}" + description = "region security policy for network match" + type = "CLOUD_ARMOR_NETWORK" + user_defined_fields { + name = "SIG1_AT_0" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } + user_defined_fields { + name = "SIG2_AT_8" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy_rule" "policy_rule_network_match" { + region = "us-west2" + security_policy = google_compute_region_security_policy.policynetworkmatch.name + priority = 100 + network_match { + src_ip_ranges = ["10.0.0.0/8"] + src_asns = [15169] + src_ports = [80] + src_region_codes = ["AU"] + ip_protocols = ["TCP"] + dest_ip_ranges = ["10.10.0.0/16"] + dest_ports = [443] + user_defined_fields { + name = "SIG2_AT_8" + values = ["0x8700","0x8F00"] + } + } + action = "allow" + preview = true +} +`, context) +} + +func TestAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig_create(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig_update(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig_clear(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_compute_region_security_policy_rule.policy_rule", "preconfigured_waf_config.0"), + ), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig_create(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "policy" { + name = "tf-test%{random_suffix}" + region = "us-west2" + type = "CLOUD_ARMOR" + description = "Regional security policy - create" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "Rule with preconfiguredWafConfig - create" + action = "deny" + priority = "1000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + } + } + preconfigured_waf_config { + exclusion { + request_cookie { + operator = "EQUALS_ANY" + } + request_header { + operator = "EQUALS" + value = "Referer" + } + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + request_query_param { + operator = "EQUALS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + target_rule_set = "sqli-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + target_rule_set = "xss-stable" + } + } + preview = false +} +`, context) +} + +func testAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "policy" { + name = "tf-test%{random_suffix}" + region = "us-west2" + type = "CLOUD_ARMOR" + description = "Regional security policy - update" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "Rule with preconfiguredWafConfig - update" + action = "deny" + priority = "1000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + } + } + preconfigured_waf_config { + exclusion { + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + target_rule_set = "rce-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + request_query_param { + operator = "EQUALS" + value = "description" + } + request_cookie { + operator = "CONTAINS" + value = "TokenExpired" + } + target_rule_set = "xss-stable" + target_rule_ids = [ + "owasp-crs-v030001-id941330-xss", + "owasp-crs-v030001-id941340-xss", + ] + } + } + preview = false +} +`, context) +} + +func testAccComputeRegionSecurityPolicyRule_withPreconfiguredWafConfig_clear(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "policy" { + name = "tf-test%{random_suffix}" + region = "us-west2" + type = "CLOUD_ARMOR" + description = "Regional security policy - clear" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "Rule with preconfiguredWafConfig - clear" + action = "deny" + priority = "1000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + } + } + preview = false +} +`, context) +} + +func TestAccComputeRegionSecurityPolicyRule_withRateLimitOptions(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptionsCreate(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptionsUpdate(context), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOptionsCreate(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_region_security_policy" "default" { + region = "us-west2" + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" + } + + resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.default.name + region = "us-west2" + description = "rule create with rate limit" + priority = 101 + action = "rate_based_ban" + rate_limit_options { + rate_limit_threshold { + count = 500 + interval_sec = 10 + } + conform_action = "allow" + exceed_action = "deny(404)" + enforce_on_key = "ALL" + ban_threshold { + count = 750 + interval_sec = 180 + } + ban_duration_sec = 180 + } + match { + config { + src_ip_ranges = [ + "*" + ] + } + versioned_expr = "SRC_IPS_V1" + } + } +`, context) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOptionsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_region_security_policy" "default" { + region = "us-west2" + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR" + } + + resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.default.name + region = "us-west2" + description = "rule update with rate limit" + priority = 101 + action = "rate_based_ban" + rate_limit_options { + rate_limit_threshold { + count = 1000 + interval_sec = 30 + } + conform_action = "allow" + exceed_action = "deny(404)" + enforce_on_key = "ALL" + ban_threshold { + count = 2000 + interval_sec = 180 + } + ban_duration_sec = 300 + } + match { + config { + src_ip_ranges = [ + "*" + ] + } + versioned_expr = "SRC_IPS_V1" + } + } +`, context) +} + +func TestAccComputeRegionSecurityPolicyRule_withRateLimit_withEnforceOnKeyConfigs(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs2(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func TestAccComputeRegionSecurityPolicyRule_EnforceOnKeyUpdates(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withoutRateLimitOptions(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyName(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKey(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKey(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyName(spName), + }, + { + ResourceName: "google_compute_region_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKey(spName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policy" { + region = "us-west2" + name = "%s" + description = "basic regional policy base" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "throttle rule withEnforceOnKey" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(403)" + + enforce_on_key = "IP" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } +} +`, spName) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyConfigs(spName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policy" { + region = "us-west2" + name = "%s" + description = "basic regional policy base" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "throttle rule withEnforceOnKeyConfigs" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(403)" + + enforce_on_key_configs { + enforce_on_key_type = "IP" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } +} +`, spName) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs(spName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policy" { + region = "us-west2" + name = "%s" + description = "basic regional policy base" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "throttle rule with withMultipleEnforceOnKeyConfigs" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(429)" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + + enforce_on_key_configs { + enforce_on_key_type = "HTTP_PATH" + } + + enforce_on_key_configs { + enforce_on_key_type = "HTTP_HEADER" + enforce_on_key_name = "user-agent" + } + + enforce_on_key_configs { + enforce_on_key_type = "REGION_CODE" + } + } +} +`, spName) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs2(spName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policy" { + region = "us-west2" + name = "%s" + description = "basic regional policy base" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "throttle rule withMultipleEnforceOnKeyConfigs2" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(429)" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + + enforce_on_key_configs { + enforce_on_key_type = "REGION_CODE" + } + + enforce_on_key_configs { + enforce_on_key_type = "TLS_JA3_FINGERPRINT" + } + + enforce_on_key_configs { + enforce_on_key_type = "USER_IP" + } + } +} + +`, spName) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withoutRateLimitOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policy" { + region = "us-west2" + name = "%s" + description = "basic regional policy base" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "basic policy rule withoutRateLimitOptions" + action = "deny(403)" + priority = "100" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } +} + +`, spName) +} + +func testAccComputeRegionSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyName(spName string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policy" { + region = "us-west2" + name = "%s" + description = "basic regional policy base" + type = "CLOUD_ARMOR" +} + +resource "google_compute_region_security_policy_rule" "policy_rule" { + security_policy = google_compute_region_security_policy.policy.name + region = "us-west2" + description = "throttle rule withEnforceOnKeyName" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(403)" + + enforce_on_key = "HTTP_HEADER" + enforce_on_key_name = "user-agent" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } +} +`, spName) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_test.go.tmpl new file mode 100644 index 000000000000..591de2371e5b --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_security_policy_test.go.tmpl @@ -0,0 +1,169 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeRegionSecurityPolicy_regionSecurityPolicyBasicUpdateExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicy_basic(context), + }, + { + ResourceName: "google_compute_region_security_policy.regionSecPolicy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicy_update(context), + }, + { + ResourceName: "google_compute_region_security_policy.regionSecPolicy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicy_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "regionSecPolicy" { + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR_NETWORK" + + ddos_protection_config { + ddos_protection = "STANDARD" + } +} +`, context) +} + +func testAccComputeRegionSecurityPolicy_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "regionSecPolicy" { + name = "tf-test%{random_suffix}" + description = "basic update region security policy" + type = "CLOUD_ARMOR_NETWORK" + + ddos_protection_config { + ddos_protection = "ADVANCED" + } +} +`, context) +} + +func TestAccComputeRegionSecurityPolicy_regionSecurityPolicyUserDefinedFieldsUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSecurityPolicy_withoutUserDefinedFields(context), + }, + { + ResourceName: "google_compute_region_security_policy.regionSecPolicy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicy_withUserDefinedFields(context), + }, + { + ResourceName: "google_compute_region_security_policy.regionSecPolicy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicy_withUserDefinedFieldsUpdate(context), + }, + { + ResourceName: "google_compute_region_security_policy.regionSecPolicy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSecurityPolicy_withoutUserDefinedFields(context), + }, + { + ResourceName: "google_compute_region_security_policy.regionSecPolicy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionSecurityPolicy_withoutUserDefinedFields(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "regionSecPolicy" { + name = "tf-test%{random_suffix}" + description = "basic region security policy" + type = "CLOUD_ARMOR_NETWORK" +} +`, context) +} + +func testAccComputeRegionSecurityPolicy_withUserDefinedFields(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "regionSecPolicy" { + name = "tf-test%{random_suffix}" + description = "basic update region security policy" + type = "CLOUD_ARMOR_NETWORK" + user_defined_fields { + name = "SIG1_AT_0" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8F00" + } +} +`, context) +} + +func testAccComputeRegionSecurityPolicy_withUserDefinedFieldsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_region_security_policy" "regionSecPolicy" { + name = "tf-test%{random_suffix}" + description = "basic update region security policy" + type = "CLOUD_ARMOR_NETWORK" + user_defined_fields { + name = "SIG1_AT_0" + base = "UDP" + offset = 4 + size = 4 + mask = "0xFFFF" + } + user_defined_fields { + name = "SIG2_AT_8" + base = "TCP" + offset = 8 + size = 2 + mask = "0x8700" + } +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_ssl_policy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_ssl_policy_test.go.tmpl new file mode 100644 index 000000000000..0b9a504bc192 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_ssl_policy_test.go.tmpl @@ -0,0 +1,282 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeRegionSslPolicy_regionInherit(t *testing.T) { + t.Parallel() + + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSslRegionDefaultFromEnv(sslPolicyName), + }, + { + ResourceName: "google_compute_region_ssl_policy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSslUpdateRegionDefaultFromEnv(sslPolicyName), + }, + { + ResourceName: "google_compute_region_ssl_policy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionSslPolicy_update(t *testing.T) { + t.Parallel() + + var sslPolicy compute.SslPolicy + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSslUpdate1(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionSslPolicyExists( + t, "google_compute_region_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "profile", "MODERN"), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "min_tls_version", "TLS_1_0"), + ), + }, + { + ResourceName: "google_compute_region_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSslUpdate2(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionSslPolicyExists( + t, "google_compute_region_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "profile", "RESTRICTED"), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "min_tls_version", "TLS_1_2"), + ), + }, + { + ResourceName: "google_compute_region_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionSslPolicy_update_to_custom(t *testing.T) { + t.Parallel() + + var sslPolicy compute.SslPolicy + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSslUpdate1(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionSslPolicyExists( + t, "google_compute_region_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "profile", "MODERN"), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "min_tls_version", "TLS_1_0"), + ), + }, + { + ResourceName: "google_compute_region_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSslUpdate3(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionSslPolicyExists( + t, "google_compute_region_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "profile", "CUSTOM"), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "min_tls_version", "TLS_1_1"), + ), + }, + { + ResourceName: "google_compute_region_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionSslPolicy_update_from_custom(t *testing.T) { + t.Parallel() + + var sslPolicy compute.SslPolicy + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionSslUpdate3(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionSslPolicyExists( + t, "google_compute_region_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "profile", "CUSTOM"), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "min_tls_version", "TLS_1_1"), + ), + }, + { + ResourceName: "google_compute_region_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionSslUpdate1(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionSslPolicyExists( + t, "google_compute_region_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "profile", "MODERN"), + resource.TestCheckResourceAttr( + "google_compute_region_ssl_policy.update", "min_tls_version", "TLS_1_0"), + ), + }, + { + ResourceName: "google_compute_region_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckComputeRegionSslPolicyExists(t *testing.T, n string, sslPolicy *compute.SslPolicy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).RegionSslPolicies.Get( + project, "us-central1" , name).Do() + if err != nil { + return fmt.Errorf("Error Reading SSL Policy %s: %s", name, err) + } + + if found.Name != name { + return fmt.Errorf("SSL Policy not found") + } + + *sslPolicy = *found + + return nil + } +} + +func testAccComputeRegionSslUpdate1(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "update" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_0" + profile = "MODERN" + region = "us-central1" +} +`, resourceName) +} + +func testAccComputeRegionSslUpdate2(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "update" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_2" + profile = "RESTRICTED" + region = "us-central1" +} +`, resourceName) +} + +func testAccComputeRegionSslUpdate3(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "update" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_1" + profile = "CUSTOM" + region = "us-central1" + custom_features = ["TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"] +} +`, resourceName) +} + +func testAccComputeRegionSslRegionDefaultFromEnv(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "foobar" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_0" + profile = "MODERN" +} +`, resourceName) +} + +func testAccComputeRegionSslUpdateRegionDefaultFromEnv(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_region_ssl_policy" "foobar" { + name = "%s" + description = "Generated by TF provider acceptance test - updated" + min_tls_version = "TLS_1_0" + profile = "MODERN" +} +`, resourceName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_http_proxy_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_http_proxy_test.go new file mode 100644 index 000000000000..bb480cf9f3b9 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_http_proxy_test.go @@ -0,0 +1,179 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeRegionTargetHttpProxy_update(t *testing.T) { + t.Parallel() + + target := fmt.Sprintf("thttp-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("thttp-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("thttp-test-%s", acctest.RandString(t, 10)) + urlmap1 := fmt.Sprintf("thttp-test-%s", acctest.RandString(t, 10)) + urlmap2 := fmt.Sprintf("thttp-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2), + }, + { + ResourceName: "google_compute_region_target_http_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2), + }, + { + ResourceName: "google_compute_region_target_http_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpProxy_basic1(target, backend, hc, urlmap1, urlmap2 string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_region_url_map.foobar1.self_link +} + +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "%s" + default_service = google_compute_region_backend_service.foobar.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "%s" + default_service = google_compute_region_backend_service.foobar.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar.self_link + } +} +`, target, backend, hc, urlmap1, urlmap2) +} + +func testAccComputeRegionTargetHttpProxy_basic2(target, backend, hc, urlmap1, urlmap2 string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_http_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + url_map = google_compute_region_url_map.foobar2.self_link +} + +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "%s" + default_service = google_compute_region_backend_service.foobar.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "%s" + default_service = google_compute_region_backend_service.foobar.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar.self_link + } +} +`, target, backend, hc, urlmap1, urlmap2) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go new file mode 100644 index 000000000000..f83a6d4cc6dc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_https_proxy_test.go @@ -0,0 +1,630 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeRegionTargetHttpsProxy_update(t *testing.T) { + t.Parallel() + + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_basic1(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_basic2(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_basic3(resourceSuffix), + }, + { + ResourceName: "google_compute_region_target_https_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_basic1(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar1.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar1.self_link] +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id) +} + +func testAccComputeRegionTargetHttpsProxy_basic2(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar2.self_link + ssl_certificates = [ + google_compute_region_ssl_certificate.foobar1.self_link, + google_compute_region_ssl_certificate.foobar2.self_link, + ] +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id) +} + +func testAccComputeRegionTargetHttpsProxy_basic3(id string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "httpsproxy-test-%s" + url_map = google_compute_region_url_map.foobar2.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar2.self_link] + ssl_policy = google_compute_region_ssl_policy.foobar.self_link +} + +resource "google_compute_region_backend_service" "foobar1" { + name = "httpsproxy-test-backend1-%s" + health_checks = [google_compute_region_health_check.zero.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "httpsproxy-test-backend2-%s" + health_checks = [google_compute_region_health_check.one.self_link] + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "httpsproxy-test-health-check1-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_health_check" "one" { + name = "httpsproxy-test-health-check2-%s" + http_health_check { + port = 443 + } +} + +resource "google_compute_region_url_map" "foobar1" { + name = "httpsproxy-test-url-map1-%s" + default_service = google_compute_region_backend_service.foobar1.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar1.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar1.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar1.self_link + } +} + +resource "google_compute_region_url_map" "foobar2" { + name = "httpsproxy-test-url-map2-%s" + default_service = google_compute_region_backend_service.foobar2.self_link + host_rule { + hosts = ["mysite2.com", "myothersite2.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_region_backend_service.foobar2.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar2.self_link + } + } + test { + host = "mysite2.com" + path = "/*" + service = google_compute_region_backend_service.foobar2.self_link + } +} + +resource "google_compute_region_ssl_policy" "foobar" { + name = "sslproxy-test-%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" + region = "us-central1" +} + +resource "google_compute_region_ssl_certificate" "foobar1" { + name = "httpsproxy-test-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_region_ssl_certificate" "foobar2" { + name = "httpsproxy-test-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id, id, id, id) +} + +func TestAccComputeRegionTargetHttpsProxy_addSslPolicy_withForwardingRule(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "resource_suffix": acctest.RandString(t, 10), + "project_id": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context), + }, + { + ResourceName: "google_compute_region_target_https_proxy.default-https", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # webscoket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} + +func testAccComputeRegionTargetHttpsProxy_withForwardingRule_withSslPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_forwarding_rule" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-frwd-rule-%{resource_suffix}" + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_https_proxy.default-https.self_link + network = google_compute_network.ilb_network.name + subnetwork = google_compute_subnetwork.ilb_subnet.name + ip_address = google_compute_address.consumer_address.id + ip_protocol = "TCP" + port_range = "443" + allow_global_access = "true" + depends_on = [google_compute_subnetwork.ilb_subnet2] +} + +resource "google_compute_region_backend_service" "default" { + project = "%{project_id}" + region = "us-central1" + name = "backend-service-%{resource_suffix}" + protocol = "HTTPS" + port_name = "https-server" + load_balancing_scheme = "INTERNAL_MANAGED" + session_affinity = "HTTP_COOKIE" + health_checks = [google_compute_region_health_check.default.self_link] + locality_lb_policy = "RING_HASH" + + # webscoket handling: https://stackoverflow.com/questions/63822612/websocket-connection-being-closed-on-google-compute-engine + timeout_sec = 600 + + consistent_hash { + http_cookie { + ttl { + # 24hr cookie ttl + seconds = 86400 + nanos = null + } + name = "X-CLIENT-SESSION" + path = null + } + http_header_name = null + minimum_ring_size = 1024 + } + + log_config { + enable = true + sample_rate = 1.0 + } +} + +resource "google_compute_region_health_check" "default" { + project = "%{project_id}" + region = "us-central1" + name = "hc-%{resource_suffix}" + timeout_sec = 5 + check_interval_sec = 30 + healthy_threshold = 3 + unhealthy_threshold = 3 + + https_health_check { + port = 443 + request_path = "/health" + } +} + +resource "google_compute_region_target_https_proxy" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "https-proxy-%{resource_suffix}" + url_map = google_compute_region_url_map.default-https.self_link + ssl_certificates = [google_compute_region_ssl_certificate.foobar0.self_link] + ssl_policy = google_compute_region_ssl_policy.default.id +} + +resource "google_compute_region_url_map" "default-https" { + project = "%{project_id}" + region = "us-central1" + name = "lb-%{resource_suffix}" + default_service = google_compute_region_backend_service.default.id +} + +resource "google_compute_region_ssl_policy" "default" { + project = "%{project_id}" + region = "us-central1" + name = "ssl-policy-%{resource_suffix}" + + profile = "RESTRICTED" + min_tls_version = "TLS_1_2" +} + +resource "google_compute_region_ssl_certificate" "foobar0" { + name = "httpsproxy-test-cert0-%{resource_suffix}" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_network" "ilb_network" { + name = "tf-test-l4-ilb-network-%{resource_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "ilb_subnet" { + name = "tf-test-l4-ilb-subnet-%{resource_suffix}" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_subnetwork" "ilb_subnet2" { + name = "tf-test-l4-ilb-subnet2-%{resource_suffix}" + ip_cidr_range = "10.142.0.0/20" + region = "us-central1" + purpose = "REGIONAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.ilb_network.id +} + +resource "google_compute_address" "consumer_address" { + name = "tf-test-website-ip-%{resource_suffix}-1" + region = "us-central1" + subnetwork = google_compute_subnetwork.ilb_subnet.id + address_type = "INTERNAL" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_tcp_proxy_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_tcp_proxy_test.go new file mode 100644 index 000000000000..e9bd70a3b702 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_target_tcp_proxy_test.go @@ -0,0 +1,155 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccComputeRegionTargetTcpProxy_update(t *testing.T) { + t.Parallel() + + target := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + backend := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRegionTargetTcpProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionTargetTcpProxy_basic1(target, backend, hc), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionTargetTcpProxyExists( + t, "google_compute_region_target_tcp_proxy.foobar"), + ), + }, + { + ResourceName: "google_compute_region_target_tcp_proxy.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_region_backend_service.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_region_health_check.zero", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionTargetTcpProxy_update2(target, backend, hc), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRegionTargetTcpProxyExists( + t, "google_compute_region_target_tcp_proxy.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeRegionTargetTcpProxyExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + name := rs.Primary.Attributes["name"] + region := rs.Primary.Attributes["region"] + + found, err := config.NewComputeClient(config.UserAgent).RegionTargetTcpProxies.Get( + config.Project, region, name).Do() + if err != nil { + return err + } + + if found.Name != name { + return fmt.Errorf("RegionTargetTcpProxy not found") + } + + return nil + } +} + +func testAccComputeRegionTargetTcpProxy_basic1(target, backend, hc string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_tcp_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + backend_service = google_compute_region_backend_service.foobar.self_link + proxy_header = "NONE" + region = "us-central1" +} + +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + protocol = "TCP" + health_checks = [google_compute_region_health_check.zero.self_link] + region = "us-central1" + + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "443" + } + region = "us-central1" +} +`, target, backend, hc) +} + +func testAccComputeRegionTargetTcpProxy_update2(target, backend, hc string) string { + return fmt.Sprintf(` +resource "google_compute_region_target_tcp_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + backend_service = google_compute_region_backend_service.foobar2.self_link + proxy_header = "PROXY_V1" + region = "us-central1" +} + +resource "google_compute_region_backend_service" "foobar" { + name = "%s" + protocol = "TCP" + health_checks = [google_compute_region_health_check.zero.self_link] + region = "us-central1" + + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "foobar2" { + name = "%s-2" + protocol = "TCP" + health_checks = [google_compute_region_health_check.zero.self_link] + region = "us-central1" + + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "443" + } + region = "us-central1" +} +`, target, backend, backend, hc) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_url_map_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_url_map_test.go new file mode 100644 index 000000000000..d2021019146a --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_url_map_test.go @@ -0,0 +1,1207 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeRegionUrlMap_update_path_matcher(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_basic1(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionUrlMap_basic2(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionUrlMap_advanced(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_advanced1(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionUrlMap_advanced2(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionUrlMap_noPathRulesWithUpdate(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_noPathRules(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionUrlMap_basic1(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionUrlMap_ilbPathUpdate(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_ilbPath(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionUrlMap_ilbPathUpdate(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionUrlMap_ilbRouteUpdate(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_ilbRoute(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionUrlMap_ilbRouteUpdate(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionUrlMap_defaultUrlRedirect(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_defaultUrlRedirectConfig(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRegionUrlMap_defaultUrlRedirectWithinPathMatcher(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_defaultUrlRedirectWithinPathMatcherConfig(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// Set all fields nested within `defaultRouteAction`, test import, then test updating all fields +func TestAccComputeRegionUrlMap_defaultRouteAction_full_update(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRegionUrlMap_defaultRouteAction_full(randomSuffix), + }, + { + ResourceName: "google_compute_region_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRegionUrlMap_defaultRouteAction_full_update(randomSuffix), + }, + }, + }) +} + +func testAccComputeRegionUrlMap_basic1(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + health_checks = [google_compute_region_health_check.zero.self_link] +} + +resource "google_compute_region_health_check" "zero" { + region = "us-central1" + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} + +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + default_service = google_compute_region_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "boop" + + path_rule { + paths = ["/*"] + service = google_compute_region_backend_service.foobar.self_link + } + } + + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar.self_link + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_basic2(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + health_checks = [google_compute_region_health_check.zero.self_link] +} + +resource "google_compute_region_health_check" "zero" { + region = "us-central1" + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} + +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + default_service = google_compute_region_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_region_backend_service.foobar.self_link + } + } + + test { + host = "mysite.com" + path = "/test" + service = google_compute_region_backend_service.foobar.self_link + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_advanced1(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + health_checks = [google_compute_region_health_check.zero.self_link] +} + +resource "google_compute_region_health_check" "zero" { + region = "us-central1" + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} + +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + default_service = google_compute_region_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blop" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "blop" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_region_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_region_backend_service.foobar.self_link + } + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_advanced2(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + health_checks = [google_compute_region_health_check.zero.self_link] +} + +resource "google_compute_region_health_check" "zero" { + region = "us-central1" + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} + +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + default_service = google_compute_region_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + host_rule { + hosts = ["myleastfavoritesite.com"] + path_matcher = "blub" + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_region_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_region_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "blub" + + path_rule { + paths = ["/*", "/blub"] + service = google_compute_region_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_region_backend_service.foobar.self_link + } + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_noPathRules(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_backend_service" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + health_checks = [google_compute_region_health_check.zero.self_link] +} + +resource "google_compute_region_health_check" "zero" { + region = "us-central1" + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} + +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + name = "regionurlmap-test-%s" + default_service = google_compute_region_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = google_compute_region_backend_service.foobar.self_link + name = "boop" + } + + test { + host = "mysite.com" + path = "/*" + service = google_compute_region_backend_service.foobar.self_link + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_ilbPath(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + name = "regionurlmap-test-%s" + description = "a description" + default_service = google_compute_region_backend_service.home.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = google_compute_region_backend_service.home.self_link + + path_rule { + paths = ["/home"] + route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = false + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = google_compute_region_backend_service.home.self_link + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = google_compute_region_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + } + } + + test { + service = google_compute_region_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_region_backend_service" "home" { + name = "regionurlmap-test-%s" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "default" { + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_ilbPathUpdate(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + name = "regionurlmap-test-%s" + description = "a description" + default_service = google_compute_region_backend_service.home2.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = google_compute_region_backend_service.home.self_link + + path_rule { + paths = ["/home2"] + route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content again"] + allow_methods = ["PUT"] + allow_origins = ["Allowed origin again"] + expose_headers = ["Exposed header again"] + max_age = 31 + disabled = true + } + fault_injection_policy { + abort { + http_status = 345 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 51000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = google_compute_region_backend_service.home.self_link + } + retry_policy { + num_retries = 6 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = google_compute_region_backend_service.home.self_link + weight = 401 + header_action { + request_headers_to_remove = ["RemoveMe2"] + request_headers_to_add { + header_name = "AddMe2" + header_value = "MyValue2" + replace = false + } + response_headers_to_remove = ["RemoveMe2"] + response_headers_to_add { + header_name = "AddMe2" + header_value = "MyValue2" + replace = true + } + } + } + } + } + } + + test { + service = google_compute_region_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_region_backend_service" "home" { + name = "regionurlmap-test-%s" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "home2" { + name = "regionurlmap-test-%s-2" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "default" { + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} +`, randomSuffix, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_ilbRoute(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + name = "regionurlmap-test-%s" + description = "a description" + default_service = google_compute_region_backend_service.home.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = google_compute_region_backend_service.home.self_link + + route_rules { + priority = 1 + header_action { + request_headers_to_remove = ["RemoveMe2"] + request_headers_to_add { + header_name = "AddSomethingElse" + header_value = "MyOtherValue" + replace = true + } + response_headers_to_remove = ["RemoveMe3"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + match_rules { + full_path_match = "a full path" + header_matches { + header_name = "someheader" + exact_match = "match this exactly" + invert_match = true + } + ignore_case = true + metadata_filters { + filter_match_criteria = "MATCH_ANY" + filter_labels { + name = "PLANET" + value = "MARS" + } + } + query_parameter_matches { + name = "a query parameter" + present_match = true + } + } + url_redirect { + host_redirect = "A host" + https_redirect = false + path_redirect = "some/path" + redirect_response_code = "TEMPORARY_REDIRECT" + strip_query = true + } + } + } + + test { + service = google_compute_region_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_region_backend_service" "home" { + name = "regionurlmap-test-%s" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "default" { + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_ilbRouteUpdate(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + name = "regionurlmap-test-%s" + description = "a description" + default_service = google_compute_region_backend_service.home.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = google_compute_region_backend_service.home2.self_link + + route_rules { + priority = 2 + header_action { + request_headers_to_remove = ["RemoveMe2Again"] + request_headers_to_add { + header_name = "AddSomethingElseAgain" + header_value = "MyOtherValueAgain" + replace = false + } + response_headers_to_remove = ["RemoveMe3Again"] + response_headers_to_add { + header_name = "AddMeAgain" + header_value = "MyValueAgain" + replace = true + } + } + match_rules { + full_path_match = "a full path again" + header_matches { + header_name = "someheaderagain" + exact_match = "match this exactly again" + invert_match = false + } + ignore_case = false + metadata_filters { + filter_match_criteria = "MATCH_ALL" + filter_labels { + name = "PLANET" + value = "JUPITER" + } + } + } + url_redirect { + host_redirect = "A hosti again" + https_redirect = true + path_redirect = "some/path/again" + redirect_response_code = "TEMPORARY_REDIRECT" + strip_query = false + } + } + } + + test { + service = google_compute_region_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_region_backend_service" "home" { + name = "regionurlmap-test-%s" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_backend_service" "home2" { + name = "regionurlmap-test-%s-2" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_region_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_MANAGED" +} + +resource "google_compute_region_health_check" "default" { + name = "regionurlmap-test-%s" + http_health_check { + port = 80 + } +} +`, randomSuffix, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_defaultUrlRedirectConfig(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + name = "urlmap-test-%s" + default_url_redirect { + https_redirect = true + strip_query = false + } +} +`, randomSuffix) +} + +func testAccComputeRegionUrlMap_defaultUrlRedirectWithinPathMatcherConfig(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + name = "urlmap-test-%s" + default_url_redirect { + https_redirect = true + strip_query = false + } + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_url_redirect { + https_redirect = true + strip_query = false + } + } +} +`, randomSuffix) +} + +func testAccComputeRegionUrlMap_defaultRouteAction_full(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + + name = "regionurlmap%s" + description = "a description" + + default_route_action { + + retry_policy { + num_retries = 3 + per_try_timeout { + seconds = 0 + nanos = 500 + } + } + + timeout { + seconds = 3 + nanos = 0 + } + + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + + request_mirror_policy { + backend_service = google_compute_region_backend_service.login.id + } + + cors_policy { + allow_origins = [ "https://www.example.com" ] + allow_methods = [ "GET" ] + allow_headers = [ "Content-Type" ] + expose_headers = [ "Authorization" ] + max_age = 600 + allow_credentials = true + disabled = false + } + + weighted_backend_services { + backend_service = google_compute_region_backend_service.login.id + weight = 200 + header_action { + request_headers_to_add { + header_name = "foo-request-2" + header_value = "bar" + replace = true + } + request_headers_to_add { + header_name = "foo-request-1" + header_value = "bar" + replace = true + } + request_headers_to_remove = [ + "fizz", + "buzz" + ] + response_headers_to_add { + header_name = "foo-response-2" + header_value = "bar" + replace = true + } + response_headers_to_add { + header_name = "foo-response-1" + header_value = "bar" + replace = true + } + response_headers_to_remove = [ + "fizz", + "buzz" + ] + } + } + weighted_backend_services { + backend_service = google_compute_region_backend_service.home.id + weight = 100 + header_action { + request_headers_to_add { + header_name = "foo-request-2" + header_value = "bar" + replace = true + } + request_headers_to_add { + header_name = "foo-request-1" + header_value = "bar" + replace = true + } + request_headers_to_remove = [ + "fizz", + "buzz" + ] + response_headers_to_add { + header_name = "foo-response-2" + header_value = "bar" + replace = true + } + response_headers_to_add { + header_name = "foo-response-1" + header_value = "bar" + replace = true + } + response_headers_to_remove = [ + "fizz", + "buzz" + ] + } + } + } +} + +resource "google_compute_region_backend_service" "login" { + region = "us-central1" + + name = "login%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 +} + +resource "google_compute_region_backend_service" "home" { + region = "us-central1" + + name = "home%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 +} +`, randomSuffix, randomSuffix, randomSuffix) +} + +func testAccComputeRegionUrlMap_defaultRouteAction_full_update(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_region_url_map" "foobar" { + region = "us-central1" + + name = "regionurlmap%s" + description = "a description" + + default_route_action { + + # update all fields in retry_policy block + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 1 + nanos = 0 + } + } + + # update to be <1 second + timeout { + seconds = 0 + nanos = 10000000 # 0.01 seconds + } + + # update both values + url_rewrite { + host_rewrite = "stage.example.com" + path_prefix_rewrite = "/v2/api/" + } + + # update backend_service field from 'login' to 'home' + request_mirror_policy { + backend_service = google_compute_region_backend_service.home.id + } + + # update policy and disable it + cors_policy { + allow_origins = [ "https://xylophone.example.com", "https://www.example.com" ] + allow_methods = [ "PUT", "GET" ] + allow_headers = [ "Content-Type" ] + expose_headers = [ "Authorization" ] + max_age = 600 + allow_credentials = true + disabled = true + } + + # Change various fields - marked with comments + weighted_backend_services { + backend_service = google_compute_region_backend_service.login.id + weight = 150 # updated + header_action { + request_headers_to_add { + header_name = "fizz-request-2" # updated + header_value = "buzz" # updated + replace = true + } + request_headers_to_add { + header_name = "foo-request-1" + header_value = "bar" + replace = false # updated + } + request_headers_to_remove = [ + "fizz" # updated to remove element + ] + response_headers_to_add { + header_name = "foo-response-2" + header_value = "bar" + replace = true + } + response_headers_to_add { + header_name = "foo-response-1" + header_value = "bar" + replace = true + } + response_headers_to_remove = [ + "fizz", + "buzz", + "quack" # updated to add element + ] + } + } + weighted_backend_services { + backend_service = google_compute_region_backend_service.home.id + weight = 300 # updated + header_action { + request_headers_to_add { + header_name = "foo-request-2" + header_value = "bar" + replace = true + } + # updated to remove a 'request_headers_to_add' block + request_headers_to_remove = [ + "fizz", + "buzz" + ] + response_headers_to_add { + header_name = "foo-response-2" + header_value = "bar" + replace = true + } + response_headers_to_add { + header_name = "foo-response-1" + header_value = "bar" + replace = true + } + # updated to add 'response_headers_to_add' block below + response_headers_to_add { + header_name = "foo-response-3" + header_value = "bar" + replace = true + } + response_headers_to_remove = [ + "fizz", + "buzz" + ] + } + } + } +} + +resource "google_compute_region_backend_service" "login" { + region = "us-central1" + + name = "login%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 +} + +resource "google_compute_region_backend_service" "home" { + region = "us-central1" + + name = "home%s" + protocol = "HTTP" + load_balancing_scheme = "INTERNAL_MANAGED" + timeout_sec = 10 +} +`, randomSuffix, randomSuffix, randomSuffix) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_bgp_peer_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_bgp_peer_test.go new file mode 100644 index 000000000000..f02de5e34a0c --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_bgp_peer_test.go @@ -0,0 +1,1701 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeRouterPeer_basic(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerBasic(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerKeepRouter(routerName), + Check: testAccCheckComputeRouterPeerDelete( + t, "google_compute_router_peer.foobar"), + }, + }, + }) +} + +func TestAccComputeRouterPeer_advertiseMode(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerAdvertiseMode(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerAdvertiseModeUpdate(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_enable(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerBasic(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerEnable(routerName, false), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerEnable(routerName, true), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_bfd(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerBasic(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerBfd(routerName, "DISABLED"), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerBasic(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_routerApplianceInstance(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerRouterApplianceInstance(routerName), + Check: testAccCheckComputeRouterPeerExists( + t, "google_compute_router_peer.foobar"), + }, + { + ResourceName: "google_compute_router_peer.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_Ipv6Basic(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_router_peer.foobar" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerIpv6(routerName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv6", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_Ipv4BasicCreateUpdate(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_router_peer.foobar" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerIpv4(routerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv4", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerUpdateIpv4Address(routerName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv4", "true"), + resource.TestCheckResourceAttr(resourceName, "ipv4_nexthop_address", "169.254.1.2"), + resource.TestCheckResourceAttr(resourceName, "peer_ipv4_nexthop_address", "169.254.1.1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_UpdateIpv6Address(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_router_peer.foobar" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerIpv6(routerName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv6", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerUpdateIpv6Address(routerName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv6", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_EnableDisableIpv6(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_router_peer.foobar" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerNoIpv6(routerName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv6", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerIpv6(routerName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv6", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterPeerIpv6(routerName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeRouterPeerExists( + t, resourceName), + resource.TestCheckResourceAttr(resourceName, "enable_ipv6", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterPeer_AddMd5AuthenticationKey(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + resourceName1 := "google_compute_router_peer.foobar" + resourceName2 := "google_compute_router_peer.foobar1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerWithMd5AuthKey(routerName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName1, "md5_authentication_key.#", "1"), // Check for one element in the list + resource.TestCheckResourceAttr(resourceName1, "md5_authentication_key.0.name", fmt.Sprintf("%s-peer-key", routerName)), + resource.TestCheckResourceAttr(resourceName1, "md5_authentication_key.0.key", fmt.Sprintf("%s-peer-key-value", routerName)), + resource.TestCheckResourceAttr(resourceName2, "md5_authentication_key.#", "1"), // Check for one element in the list + resource.TestCheckResourceAttr(resourceName2, "md5_authentication_key.0.name", fmt.Sprintf("%s-peer1-key", routerName)), + resource.TestCheckResourceAttr(resourceName2, "md5_authentication_key.0.key", fmt.Sprintf("%s-peer1-key-value", routerName)), + ), + }, + }, + }) +} + +func TestAccComputeRouterPeer_UpdateMd5AuthenticationKey(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + resourceName := "google_compute_router_peer.foobar1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterPeerWithMd5AuthKey(routerName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "md5_authentication_key.#", "1"), // Check for one element in the list + resource.TestCheckResourceAttr(resourceName, "md5_authentication_key.0.name", fmt.Sprintf("%s-peer1-key", routerName)), + resource.TestCheckResourceAttr(resourceName, "md5_authentication_key.0.key", fmt.Sprintf("%s-peer1-key-value", routerName)), + ), + }, + { + Config: testAccComputeRouterPeerWithMd5AuthKeyUpdate(routerName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "md5_authentication_key.#", "1"), // Check for one element in the list + resource.TestCheckResourceAttr(resourceName, "md5_authentication_key.0.name", fmt.Sprintf("%s-peer1-key", routerName)), + resource.TestCheckResourceAttr(resourceName, "md5_authentication_key.0.key", fmt.Sprintf("%s-peer1-key-value-changed", routerName)), + ), + }, + }, + }) +} + +func testAccCheckComputeRouterPeerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + routersService := config.NewComputeClient(config.UserAgent).Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + routerName, region) + } + } + + return nil + } +} + +func testAccCheckComputeRouterPeerDelete(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + routersService := config.NewComputeClient(config.UserAgent).Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_peer" { + continue + } + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + peers := router.BgpPeers + for _, peer := range peers { + + if peer.Name == name { + return fmt.Errorf("Peer %s still exists on router %s/%s", name, region, router.Name) + } + } + } + + return nil + } +} + +func testAccCheckComputeRouterPeerExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + routersService := config.NewComputeClient(config.UserAgent).Routers + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + for _, peer := range router.BgpPeers { + + if peer.Name == name { + return nil + } + } + + return fmt.Errorf("Peer %s not found for router %s", name, router.Name) + } +} + +func testAccComputeRouterPeerBasic(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterPeerWithMd5AuthKey(routerName string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + + resource "google_compute_address" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + } + + resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region + } + + resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } + } + + resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } + } + + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 + } + + resource "google_compute_vpn_tunnel" "foobar1" { + name = "%s1" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 1 + } + + resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name + ip_range = "169.254.3.1/30" + } + + resource "google_compute_router_interface" "foobar1" { + name = "%s1" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar1.name + ip_range = "169.254.4.1/30" + depends_on = [ + google_compute_router_interface.foobar + ] + } + + resource "google_compute_router_peer" "foobar" { + name = "%s-peer" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + peer_ip_address = "169.254.3.2" + md5_authentication_key { + name = "%s-peer-key" + key = "%s-peer-key-value" + } + } + + resource "google_compute_router_peer" "foobar1" { + name = "%s-peer1" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65516 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar1.name + peer_ip_address = "169.254.4.2" + md5_authentication_key { + name = "%s-peer1-key" + key = "%s-peer1-key-value" + } + depends_on = [ + google_compute_router_peer.foobar + ] + } +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, + routerName, routerName) +} + +func testAccComputeRouterPeerWithMd5AuthKeyUpdate(routerName string) string { + return fmt.Sprintf(` + resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + } + + resource "google_compute_address" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + } + + resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region + } + + resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } + } + + resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } + } + + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 + } + + resource "google_compute_vpn_tunnel" "foobar1" { + name = "%s1" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 1 + } + + resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name + ip_range = "169.254.3.1/30" + } + + resource "google_compute_router_interface" "foobar1" { + name = "%s1" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar1.name + ip_range = "169.254.4.1/30" + depends_on = [ + google_compute_router_interface.foobar + ] + } + + resource "google_compute_router_peer" "foobar" { + name = "%s-peer" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + peer_ip_address = "169.254.3.2" + md5_authentication_key { + name = "%s-peer-key" + key = "%s-peer-key-value" + } + } + + resource "google_compute_router_peer" "foobar1" { + name = "%s-peer1" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65516 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar1.name + peer_ip_address = "169.254.4.2" + md5_authentication_key { + name = "%s-peer1-key" + key = "%s-peer1-key-value-changed" + } + depends_on = [ + google_compute_router_peer.foobar + ] + } +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, + routerName, routerName) +} + +func testAccComputeRouterPeerKeepRouter(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterPeerAdvertiseMode(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_ip_address = "169.254.3.2" + peer_asn = 65515 + advertise_mode = "DEFAULT" + interface = google_compute_router_interface.foobar.name +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterPeerRouterApplianceInstance(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-sub" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "addr_intf" { + name = "%s-addr-intf" + region = google_compute_subnetwork.foobar.region + subnetwork = google_compute_subnetwork.foobar.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_intf_red" { + name = "%s-addr-intf-red" + region = google_compute_subnetwork.foobar.region + subnetwork = google_compute_subnetwork.foobar.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_peer" { + name = "%s-addr-peer" + region = google_compute_subnetwork.foobar.region + subnetwork = google_compute_subnetwork.foobar.id + address_type = "INTERNAL" +} + +resource "google_compute_instance" "foobar" { + name = "%s-vm" + machine_type = "e2-medium" + zone = "us-central1-a" + can_ip_forward = true + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network_ip = google_compute_address.addr_peer.address + subnetwork = google_compute_subnetwork.foobar.self_link + } +} + +resource "google_network_connectivity_hub" "foobar" { + name = "%s-hub" +} + +resource "google_network_connectivity_spoke" "foobar" { + name = "%s-spoke" + location = google_compute_subnetwork.foobar.region + hub = google_network_connectivity_hub.foobar.id + + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.foobar.self_link + ip_address = google_compute_address.addr_peer.address + } + site_to_site_data_transfer = false + } +} + +resource "google_compute_router" "foobar" { + name = "%s-ra" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "foobar_redundant" { + name = "%s-intf-red" + region = google_compute_router.foobar.region + router = google_compute_router.foobar.name + subnetwork = google_compute_subnetwork.foobar.self_link + private_ip_address = google_compute_address.addr_intf_red.address +} + +resource "google_compute_router_interface" "foobar" { + name = "%s-intf" + region = google_compute_router.foobar.region + router = google_compute_router.foobar.name + subnetwork = google_compute_subnetwork.foobar.self_link + private_ip_address = google_compute_address.addr_intf.address + redundant_interface = google_compute_router_interface.foobar_redundant.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s-peer" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_ip_address = google_compute_address.addr_peer.address + peer_asn = 65515 + interface = google_compute_router_interface.foobar.name + router_appliance_instance = google_compute_instance.foobar.self_link +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterPeerAdvertiseModeUpdate(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_ip_address = "169.254.3.3" + peer_asn = 65516 + advertised_route_priority = 0 + advertise_mode = "CUSTOM" + advertised_groups = ["ALL_SUBNETS"] + advertised_ip_ranges { + range = "10.1.0.0/32" + } + interface = google_compute_router_interface.foobar.name +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterPeerEnable(routerName string, enable bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + enable = %v +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, enable) +} + +func testAccComputeRouterPeerBfd(routerName, bfdMode string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.self_link + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + + bfd { + min_receive_interval = 2000 + min_transmit_interval = 2000 + multiplier = 6 + session_initialization_mode = "%s" + } +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, bfdMode) +} + +func testAccComputeRouterPeerUpdateIpv6Address(routerName string, enableIpv6 bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.id + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.id + region = google_compute_subnetwork.foobar.region + stack_type = "IPV4_IPV6" +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.id + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_address = "169.254.3.1" + peer_ip_address = "169.254.3.2" + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + + enable_ipv6 = %v + ipv6_nexthop_address = "2600:2d00:0000:0002:0000:0000:0000:0002" + peer_ipv6_nexthop_address = "2600:2d00:0:2::1" +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, enableIpv6) +} + +func testAccComputeRouterPeerNoIpv6(routerName string, enableIpv6 bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.id + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.id + region = google_compute_subnetwork.foobar.region + stack_type = "IPV4_IPV6" +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.id + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_address = "169.254.3.1" + peer_ip_address = "169.254.3.2" + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + enable_ipv6 = %v + +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, enableIpv6) +} + +func testAccComputeRouterPeerIpv6(routerName string, enableIpv6 bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.id + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.id + region = google_compute_subnetwork.foobar.region + stack_type = "IPV4_IPV6" +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.id + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} + +resource "google_compute_router_peer" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_address = "169.254.3.1" + peer_ip_address = "169.254.3.2" + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + enable_ipv6 = %v + ipv6_nexthop_address = "2600:2d00:0000:0002:0000:0000:0000:0001" + peer_ipv6_nexthop_address = "2600:2d00:0:2::2" +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName, enableIpv6) +} + +func testAccComputeRouterPeerIpv4(routerName string) string { + return fmt.Sprintf(`resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + } + + resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region + stack_type = "IPV4_IPV6" + } + + resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } + } + + resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } + } + + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s-tunnel" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 + } + + resource "google_compute_router_interface" "foobar" { + name = "%s-interface" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name + ip_range = "fdff:1::1:1/126" + } + + resource "google_compute_router_peer" "foobar" { + name = "%s-peer" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + ip_address = "fdff:1::1:1" + peer_ip_address = "fdff:1::1:2" + + enable_ipv4 = true + enable_ipv6 = true + ipv4_nexthop_address = "169.254.1.1" + peer_ipv4_nexthop_address = "169.254.1.2" + } + `, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterPeerUpdateIpv4Address(routerName string) string { + return fmt.Sprintf(`resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" + } + + resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region + stack_type = "IPV4_IPV6" + } + + resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } + } + + resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } + } + + resource "google_compute_vpn_tunnel" "foobar" { + name = "%s-tunnel" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.id + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 + } + + resource "google_compute_router_interface" "foobar" { + name = "%s-interface" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + vpn_tunnel = google_compute_vpn_tunnel.foobar.name + ip_range = "fdff:1::1:1/126" + } + + resource "google_compute_router_peer" "foobar" { + name = "%s-peer" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + peer_asn = 65515 + advertised_route_priority = 100 + interface = google_compute_router_interface.foobar.name + ip_address = "fdff:1::1:1" + peer_ip_address = "fdff:1::1:2" + + enable_ipv4 = true + enable_ipv6 = true + ipv4_nexthop_address = "169.254.1.2" + peer_ipv4_nexthop_address = "169.254.1.1" + } + `, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface.go.tmpl new file mode 100644 index 000000000000..0ea91c647ef6 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface.go.tmpl @@ -0,0 +1,429 @@ +package compute + +import ( + "fmt" + "log" + "time" + + "strings" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/verify" + "google.golang.org/api/googleapi" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeRouterInterface() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterInterfaceCreate, + Read: resourceComputeRouterInterfaceRead, + Delete: resourceComputeRouterInterfaceDelete, + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterInterfaceImportState, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderRegion, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A unique name for the interface, required by GCE. Changing this forces a new interface to be created.`, + }, + "router": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the router this interface will be attached to. Changing this forces a new interface to be created.`, + }, + "vpn_tunnel": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, + ConflictsWith: []string{"interconnect_attachment", "subnetwork"}, + Description: `The name or resource link to the VPN tunnel this interface will be linked to. Changing this forces a new interface to be created. Only one of vpn_tunnel, interconnect_attachment or subnetwork can be specified.`, + }, + "interconnect_attachment": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, + ConflictsWith: []string{"subnetwork", "vpn_tunnel"}, + Description: `The name or resource link to the VLAN interconnect for this interface. Changing this forces a new interface to be created. Only one of interconnect_attachment, subnetwork or vpn_tunnel can be specified.`, + }, + "ip_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, + Description: `The IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. Changing this forces a new interface to be created.`, + }, + "ip_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: verify.ValidateEnum([]string{"IPV4", "IPV6"}), + Description: `IP version of this interface.`, + }, + "private_ip_address": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The regional private internal IP address that is used to establish BGP sessions to a VM instance acting as a third-party Router Appliance. Changing this forces a new interface to be created.`, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + AtLeastOneOf: []string{"ip_range", "interconnect_attachment", "subnetwork", "vpn_tunnel"}, + ConflictsWith: []string{"interconnect_attachment", "vpn_tunnel"}, + Description: `The URI of the subnetwork resource that this interface belongs to, which must be in the same region as the Cloud Router. Changing this forces a new interface to be created. Only one of subnetwork, interconnect_attachment or vpn_tunnel can be specified.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which this interface's router belongs. If it is not provided, the provider project is used. Changing this forces a new interface to be created.`, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The region this interface's router sits in. If not specified, the project region will be used. Changing this forces a new interface to be created.`, + }, + + "redundant_interface": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of the interface that is redundant to this interface. Changing this forces a new interface to be created.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouterInterfaceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerLock := tpgresource.GetRouterLockName(region, routerName) + transport_tpg.MutexStore.Lock(routerLock) + defer transport_tpg.MutexStore.Unlock(routerLock) + + routersService := config.NewComputeClient(userAgent).Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + ifaces := router.Interfaces + + for _, iface := range ifaces { + if iface.Name == ifaceName { + d.SetId("") + return fmt.Errorf("Router %s has interface %s already", routerName, ifaceName) + } + } + + iface := &compute.RouterInterface{Name: ifaceName} + + if riVal, ok := d.GetOk("redundant_interface"); ok { + iface.RedundantInterface = riVal.(string) + } + + if ipRangeVal, ok := d.GetOk("ip_range"); ok { + iface.IpRange = ipRangeVal.(string) + } + + if ipVersionVal, ok := d.GetOk("ip_version"); ok { + iface.IpVersion = ipVersionVal.(string) + } + + if privateIpVal, ok := d.GetOk("private_ip_address"); ok { + iface.PrivateIpAddress = privateIpVal.(string) + } + + if vpnVal, ok := d.GetOk("vpn_tunnel"); ok { + vpnTunnel, err := getVpnTunnelLink(config, project, region, vpnVal.(string), userAgent) + if err != nil { + return err + } + iface.LinkedVpnTunnel = vpnTunnel + } + + if icVal, ok := d.GetOk("interconnect_attachment"); ok { + interconnectAttachment, err := tpgresource.GetInterconnectAttachmentLink(config, project, region, icVal.(string), userAgent) + if err != nil { + return err + } + iface.LinkedInterconnectAttachment = interconnectAttachment + } + + if subVal, ok := d.GetOk("subnetwork"); ok { + iface.Subnetwork = subVal.(string) + } + + log.Printf("[INFO] Adding interface %s", ifaceName) + ifaces = append(ifaces, iface) + patchRouter := &compute.Router{ + Interfaces: ifaces, + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, ifaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + err = ComputeOperationWaitTime(config, op, project, "Patching router", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + return resourceComputeRouterInterfaceRead(d, meta) +} + +func resourceComputeRouterInterfaceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routersService := config.NewComputeClient(userAgent).Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error Reading router %s/%s: %s", region, routerName, err) + } + + for _, iface := range router.Interfaces { + + if iface.Name == ifaceName { + d.SetId(fmt.Sprintf("%s/%s/%s", region, routerName, ifaceName)) + if err := d.Set("vpn_tunnel", iface.LinkedVpnTunnel); err != nil { + return fmt.Errorf("Error setting vpn_tunnel: %s", err) + } + if err := d.Set("interconnect_attachment", iface.LinkedInterconnectAttachment); err != nil { + return fmt.Errorf("Error setting interconnect_attachment: %s", err) + } + if err := d.Set("ip_range", iface.IpRange); err != nil { + return fmt.Errorf("Error setting ip_range: %s", err) + } + if err := d.Set("ip_version", iface.IpVersion); err != nil { + return fmt.Errorf("Error setting ip_version: %s", err) + } + if err := d.Set("private_ip_address", iface.PrivateIpAddress); err != nil { + return fmt.Errorf("Error setting private_ip_address: %s", err) + } + if err := d.Set("subnetwork", iface.Subnetwork); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + if err := d.Set("region", region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("redundant_interface", iface.RedundantInterface); err != nil { + return fmt.Errorf("Error setting redundant interface: %s", err) + } + return nil + } + } + + log.Printf("[WARN] Removing router interface %s/%s/%s because it is gone", region, routerName, ifaceName) + d.SetId("") + return nil +} + +func resourceComputeRouterInterfaceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + routerName := d.Get("router").(string) + ifaceName := d.Get("name").(string) + + routerLock := tpgresource.GetRouterLockName(region, routerName) + transport_tpg.MutexStore.Lock(routerLock) + defer transport_tpg.MutexStore.Unlock(routerLock) + + routersService := config.NewComputeClient(userAgent).Routers + router, err := routersService.Get(project, region, routerName).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + log.Printf("[WARN] Removing router interface %s because its router %s/%s is gone", ifaceName, region, routerName) + + return nil + } + + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + var ifaceFound bool + + newIfaces := make([]*compute.RouterInterface, 0, len(router.Interfaces)) + for _, iface := range router.Interfaces { + + if iface.Name == ifaceName { + ifaceFound = true + continue + } else { + // If this is a redundant interface, + // remove its reference from other interfaces as well + if iface.RedundantInterface == ifaceName { + iface.RedundantInterface = ""; + } + newIfaces = append(newIfaces, iface) + } + } + + if !ifaceFound { + log.Printf("[DEBUG] Router %s/%s had no interface %s already", region, routerName, ifaceName) + d.SetId("") + return nil + } + + log.Printf( + "[INFO] Removing interface %s from router %s/%s", ifaceName, region, routerName) + patchRouter := &compute.Router{ + Interfaces: newIfaces, + } + + if len(newIfaces) == 0 { + patchRouter.ForceSendFields = append(patchRouter.ForceSendFields, "Interfaces") + } + + log.Printf("[DEBUG] Updating router %s/%s with interfaces: %+v", region, routerName, newIfaces) + op, err := routersService.Patch(project, region, router.Name, patchRouter).Do() + if err != nil { + return fmt.Errorf("Error patching router %s/%s: %s", region, routerName, err) + } + + err = ComputeOperationWaitTime(config, op, project, "Patching router", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return fmt.Errorf("Error waiting to patch router %s/%s: %s", region, routerName, err) + } + + d.SetId("") + return nil +} + +func resourceComputeRouterInterfaceImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + switch len(parts) { + case 3: + // {{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}/{{"{{"}}name{{"}}"}} import id + if err := d.Set("region", parts[0]); err != nil { + return nil, fmt.Errorf("error setting region: %s", err) + } + if err := d.Set("router", parts[1]); err != nil { + return nil, fmt.Errorf("error setting router: %s", err) + } + if err := d.Set("name", parts[2]); err != nil { + return nil, fmt.Errorf("error setting name: %s", err) + } + return []*schema.ResourceData{d}, nil + case 4: + // {{"{{"}}project{{"}}"}}/{{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}/{{"{{"}}name{{"}}"}} import id + if err := d.Set("project", parts[0]); err != nil { + return nil, fmt.Errorf("error setting project: %s", err) + } + if err := d.Set("region", parts[1]); err != nil { + return nil, fmt.Errorf("error setting region: %s", err) + } + if err := d.Set("router", parts[2]); err != nil { + return nil, fmt.Errorf("error setting router: %s", err) + } + if err := d.Set("name", parts[3]); err != nil { + return nil, fmt.Errorf("error setting name: %s", err) + } + return []*schema.ResourceData{d}, nil + } + + return nil, fmt.Errorf("invalid router interface specifier. Expecting either {region}/{router}/{interface} or {project}/{region}/{router}/{interface} import id format") +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface_test.go.tmpl new file mode 100644 index 000000000000..d0e7a44189fc --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_interface_test.go.tmpl @@ -0,0 +1,619 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeRouterInterface_basic(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + context := map[string]interface{}{ + "name": name, + "region": "us-central1", + } + importIdFourPart := fmt.Sprintf("%s/%s/%s/%s", envvar.GetTestProjectFromEnv(), context["region"], context["name"], context["name"]) // name reused in config + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterInterfaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterInterfaceBasic(context), + Check: testAccCheckComputeRouterInterfaceExists( + t, "google_compute_router_interface.foobar"), + }, + { + ResourceName: "google_compute_router_interface.foobar", + ImportState: true, // Will use the 3 part {{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}/{{"{{"}}name{{"}}"}} import id by default as it's the id in state + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_interface.foobar", + ImportState: true, + ImportStateId: importIdFourPart, // Make test step use 4 part {{"{{"}}project{{"}}"}}/{{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}/{{"{{"}}name{{"}}"}} import id + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterInterfaceKeepRouter(name), + Check: testAccCheckComputeRouterInterfaceDelete( + t, "google_compute_router_interface.foobar"), + }, + }, + }) +} + +func TestAccComputeRouterInterface_redundant(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterInterfaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterInterfaceRedundant(routerName), + Check: testAccCheckComputeRouterInterfaceExists( + t, "google_compute_router_interface.foobar_int2"), + }, + { + ResourceName: "google_compute_router_interface.foobar_int2", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterInterface_withTunnel(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterInterfaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterInterfaceWithTunnel(routerName), + Check: testAccCheckComputeRouterInterfaceExists( + t, "google_compute_router_interface.foobar"), + }, + { + ResourceName: "google_compute_router_interface.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterInterface_withPrivateIpAddress(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterInterfaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterInterfaceWithPrivateIpAddress(routerName), + Check: testAccCheckComputeRouterInterfaceExists( + t, "google_compute_router_interface.foobar"), + }, + { + ResourceName: "google_compute_router_interface.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterInterface_withIPVersionV4(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterInterfaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterInterfaceWithIpVersionIPV4(routerName), + Check: testAccCheckComputeRouterInterfaceExists( + t, "google_compute_router_interface.foobar"), + }, + { + ResourceName: "google_compute_router_interface.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterInterface_withIPVersionV6(t *testing.T) { + t.Parallel() + + routerName := fmt.Sprintf("tf-test-router-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterInterfaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterInterfaceWithIpVersionIPV6(routerName), + Check: testAccCheckComputeRouterInterfaceExists( + t, "google_compute_router_interface.foobar"), + }, + { + ResourceName: "google_compute_router_interface.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckComputeRouterInterfaceDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + routersService := config.NewComputeClient(config.UserAgent).Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", + routerName, region) + } + } + + return nil + } +} + +func testAccCheckComputeRouterInterfaceDelete(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + routersService := config.NewComputeClient(config.UserAgent).Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_interface" { + continue + } + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + ifaces := router.Interfaces + for _, iface := range ifaces { + + if iface.Name == name { + return fmt.Errorf("Interface %s still exists on router %s/%s", name, region, router.Name) + } + } + } + + return nil + } +} + +func testAccCheckComputeRouterInterfaceExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + routersService := config.NewComputeClient(config.UserAgent).Routers + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + for _, iface := range router.Interfaces { + + if iface.Name == name { + return nil + } + } + + return fmt.Errorf("Interface %s not found for router %s", name, router.Name) + } +} + +func testAccComputeRouterInterfaceBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "foobar" { + name = "%{name}-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%{name}-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "%{region}" +} + +resource "google_compute_address" "foobar" { + name = "%{name}-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_vpn_gateway" "foobar" { + name = "%{name}-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%{name}-fr1" + region = google_compute_vpn_gateway.foobar.region + ip_protocol = "ESP" + ip_address = google_compute_address.foobar.address + target = google_compute_vpn_gateway.foobar.self_link +} + +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%{name}-fr2" + region = google_compute_forwarding_rule.foobar_esp.region + ip_protocol = "UDP" + port_range = "500-500" + ip_address = google_compute_address.foobar.address + target = google_compute_vpn_gateway.foobar.self_link +} + +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%{name}-fr3" + region = google_compute_forwarding_rule.foobar_udp500.region + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = google_compute_address.foobar.address + target = google_compute_vpn_gateway.foobar.self_link +} + +resource "google_compute_router" "foobar" { + name = "%{name}" + region = google_compute_forwarding_rule.foobar_udp500.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "foobar" { + name = "%{name}" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" +} +`, context) +} + +func testAccComputeRouterInterfaceRedundant(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "foobar_int1" { + name = "%s-int1" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" +} + +resource "google_compute_router_interface" "foobar_int2" { + name = "%s-int2" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.4.1/30" + redundant_interface = google_compute_router_interface.foobar_int1.name +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterInterfaceKeepRouter(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tf-test-%s" +} + +resource "google_compute_subnetwork" "foobar" { + name = "tf-test-router-interface-subnetwork-%s" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_vpn_gateway" "foobar" { + name = "%s" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_forwarding_rule" "foobar_esp" { + name = "%s-1" + region = google_compute_vpn_gateway.foobar.region + ip_protocol = "ESP" + ip_address = google_compute_address.foobar.address + target = google_compute_vpn_gateway.foobar.self_link +} + +resource "google_compute_forwarding_rule" "foobar_udp500" { + name = "%s-2" + region = google_compute_forwarding_rule.foobar_esp.region + ip_protocol = "UDP" + port_range = "500-500" + ip_address = google_compute_address.foobar.address + target = google_compute_vpn_gateway.foobar.self_link +} + +resource "google_compute_forwarding_rule" "foobar_udp4500" { + name = "%s-3" + region = google_compute_forwarding_rule.foobar_udp500.region + ip_protocol = "UDP" + port_range = "4500-4500" + ip_address = google_compute_address.foobar.address + target = google_compute_vpn_gateway.foobar.self_link +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_forwarding_rule.foobar_udp500.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterInterfaceWithTunnel(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tf-test-%s" +} + +resource "google_compute_subnetwork" "foobar" { + name = "tf-test-router-interface-subnetwork-%s" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_ha_vpn_gateway" "foobar" { + name = "%s-gateway" + network = google_compute_network.foobar.self_link + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_external_vpn_gateway" "external_gateway" { + name = "%s-external-gateway" + redundancy_type = "SINGLE_IP_INTERNALLY_REDUNDANT" + description = "An externally managed VPN gateway" + interface { + id = 0 + ip_address = "8.8.8.8" + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_vpn_tunnel" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + vpn_gateway = google_compute_ha_vpn_gateway.foobar.self_link + peer_external_gateway = google_compute_external_vpn_gateway.external_gateway.id + peer_external_gateway_interface = 0 + shared_secret = "unguessable" + router = google_compute_router.foobar.name + vpn_gateway_interface = 0 +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + vpn_tunnel = google_compute_vpn_tunnel.foobar.name +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterInterfaceWithPrivateIpAddress(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "tf-test-%s" +} + +resource "google_compute_subnetwork" "foobar" { + name = "tf-test-router-interface-subnetwork-%s" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region + subnetwork = google_compute_subnetwork.foobar.id + address_type = "INTERNAL" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + subnetwork = google_compute_subnetwork.foobar.self_link + private_ip_address = google_compute_address.foobar.address +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterInterfaceWithIpVersionIPV6(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_router" "foobar" { + name = "%s" + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "foobar" { + name = "%s-interface" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "fdff:1::1:1/126" + ip_version = "IPV6" +} +`, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterInterfaceWithIpVersionIPV4(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_router" "foobar" { + name = "%s" + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "foobar" { + name = "%s-interface" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + ip_range = "169.254.3.1/30" + ip_version = "IPV4" +} +`, routerName, routerName, routerName, routerName) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_nat_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_nat_test.go.tmpl new file mode 100644 index 000000000000..c1f4639732bd --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_nat_test.go.tmpl @@ -0,0 +1,1933 @@ +package compute_test + +import ( + "fmt" +{{- if ne $.TargetVersionName "ga" }} + "regexp" +{{- end }} + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeRouterNat_basic(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + region := envvar.GetTestRegionFromEnv() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatBasic(routerName), + }, + { + // implicitly full ImportStateId + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", project, region, routerName, routerName), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportStateId: fmt.Sprintf("%s/%s/%s", region, routerName, routerName), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportStateId: fmt.Sprintf("%s/%s", routerName, routerName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatKeepRouter(routerName), + Check: testAccCheckComputeRouterNatDelete( + t, "google_compute_router_nat.foobar"), + }, + }, + }) +} + +func TestAccComputeRouterNat_update(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatBasicBeforeUpdate(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatUpdated(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatUpdateToNatIPsId(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatUpdateToNatIPsName(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatBasicBeforeUpdate(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterNat_removeLogConfig(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatLogConfig(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatLogConfigRemoved(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterNat_withManualIpAndSubnetConfiguration(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatWithManualIpAndSubnetConfiguration(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterNat_withPortAllocationMethods(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatWithAllocationMethod(routerName, false, true), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatWithAllocationMethod(routerName, true, false), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatWithAllocationMethod(routerName, false, false), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatWithAllocationMethod(routerName, true, false), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatWithAllocationMethod(routerName, false, true), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatWithAllocationMethodWithParameters(routerName, false, true, 256, 8192), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRouterNat_withNatIpsAndDrainNatIps(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + // (ERROR): Creation with drain nat IPs should fail + { + Config: testAccComputeRouterNatWithOneDrainOneRemovedNatIps(routerName), + ExpectError: regexp.MustCompile("New RouterNat cannot have drain_nat_ips"), + }, + // Create NAT with three nat IPs + { + Config: testAccComputeRouterNatWithNatIps(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + // (ERROR) - Should not allow draining IPs still in natIps + { + Config: testAccComputeRouterNatWithInvalidDrainNatIpsStillInNatIps(routerName), + ExpectError: regexp.MustCompile("cannot be drained if still set in nat_ips"), + }, + // natIps #1, #2, #3--> natIp #2, drainNatIp #3 + { + Config: testAccComputeRouterNatWithOneDrainOneRemovedNatIps(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + // (ERROR): Should not be able to drain previously removed natIps (#1) + { + Config: testAccComputeRouterNatWithInvalidDrainMissingNatIp(routerName), + ExpectError: regexp.MustCompile("was not previously set in nat_ips"), + }, + }, + }) +} + +{{ end }} + +func TestAccComputeRouterNat_withNatRules(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + ruleDescription := acctest.RandString(t, 10) + ruleDescriptionUpdate := acctest.RandString(t, 10) + match := "inIpRange(destination.ip, '1.1.0.0/16') || inIpRange(destination.ip, '2.2.0.0/16')" + matchUpdate := "destination.ip == '1.1.0.1' || destination.ip == '8.8.8.8'" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatRulesBasic_omitRules(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic(routerName, 0, ruleDescription, match), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic(routerName, 65000, ruleDescription, match), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic(routerName, 100, ruleDescription, match), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic(routerName, 100, ruleDescriptionUpdate, match), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic(routerName, 100, ruleDescriptionUpdate, matchUpdate), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesWithSourceActiveAndDrainIps(routerName, 100, ruleDescriptionUpdate, matchUpdate), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesWithDrainIps(routerName, 100, ruleDescriptionUpdate, matchUpdate), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatMultiRules(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_omitAction(routerName, 100, ruleDescriptionUpdate, matchUpdate), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_omitDescription(routerName, 100, matchUpdate), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatMultiRulesWithIpId(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_omitRules(routerName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterNat_withEndpointTypes(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + testResourceName := "google_compute_router_nat.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatBasic(routerName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "endpoint_types.0", "ENDPOINT_TYPE_VM"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatUpdateEndpointType(routerName, "ENDPOINT_TYPE_SWG"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "endpoint_types.0", "ENDPOINT_TYPE_SWG"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatUpdateEndpointType(routerName, "ENDPOINT_TYPE_VM"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "endpoint_types.0", "ENDPOINT_TYPE_VM"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatUpdateEndpointType(routerName, "ENDPOINT_TYPE_MANAGED_PROXY_LB"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "endpoint_types.0", "ENDPOINT_TYPE_MANAGED_PROXY_LB"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterNat_AutoNetworkTier(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-private-nat-%s", testId) + hubName := fmt.Sprintf("%s-hub", routerName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatWitAutoNetworkTier(routerName, hubName), + }, + { + // implicitly full ImportStateId + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeRouterNat_withPrivateNat(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + region := envvar.GetTestRegionFromEnv() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-nat-%s", testId) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatPrivateType(routerName), + }, + { + // implicitly full ImportStateId + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", project, region, routerName, routerName), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportStateId: fmt.Sprintf("%s/%s/%s", region, routerName, routerName), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportStateId: fmt.Sprintf("%s/%s", routerName, routerName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatKeepRouter(routerName), + Check: testAccCheckComputeRouterNatDelete( + t, "google_compute_router_nat.foobar"), + }, + }, + }) +} + +func TestAccComputeRouterNat_withPrivateNatAndRules(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-private-nat-%s", testId) + hubName := fmt.Sprintf("%s-hub", routerName) + pEnv := envvar.GetTestProjectFromEnv() + ruleDescription := acctest.RandString(t, 10) + match := fmt.Sprintf("nexthop.hub == '//networkconnectivity.googleapis.com/projects/%s/locations/global/hubs/%s'", pEnv, hubName) + activeRangesNetworkOne := "google_compute_subnetwork.subnet1.self_link" + drainRangesEmpty := "" + activeRangesNetworkTwoAndThree := "google_compute_subnetwork.subnet2.self_link,google_compute_subnetwork.subnet3.self_link" + activeRangesNetworkThreeAndFour := "google_compute_subnetwork.subnet3.self_link,google_compute_subnetwork.subnet4.self_link" + drainRangesNetworkOne := "google_compute_subnetwork.subnet1.self_link" + drainRangesNetworkOneAndTwo := "google_compute_subnetwork.subnet1.self_link,google_compute_subnetwork.subnet2.self_link" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNatRulesBasic_privateNatOmitRules(routerName, hubName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName, 100, ruleDescription, match, activeRangesNetworkOne, drainRangesEmpty), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName, 100, ruleDescription, match, activeRangesNetworkTwoAndThree, drainRangesNetworkOne), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName, 100, ruleDescription, match, activeRangesNetworkThreeAndFour, drainRangesNetworkOneAndTwo), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName, 100, ruleDescription, match, activeRangesNetworkOne, drainRangesEmpty), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterNatRulesBasic_privateNatOmitRules(routerName, hubName), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouterNat_withPrivateNatAndEmptyAction(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-private-nat-%s", testId) + hubName := fmt.Sprintf("%s-hub", routerName) + pEnv := envvar.GetTestProjectFromEnv() + ruleDescription := acctest.RandString(t, 10) + match := fmt.Sprintf("nexthop.hub == '//networkconnectivity.googleapis.com/projects/%s/locations/global/hubs/%s'", pEnv, hubName) + activeRangesNetworkOne := "google_compute_subnetwork.subnet1.self_link" + drainRangesEmpty := "" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + // (ERROR): Creation with empty action should fail + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndEmptyAction(routerName, hubName, 100, ruleDescription, match), + ExpectError: regexp.MustCompile("The rule for PRIVATE nat type must contain an action with source_nat_active_ranges set"), + }, + // Create NAT with action and active ranges set + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName, 100, ruleDescription, match, activeRangesNetworkOne, drainRangesEmpty), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + // (ERROR) - Updating the rule by removing the action should fail + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndEmptyAction(routerName, hubName, 100, ruleDescription, match), + ExpectError: regexp.MustCompile("The rule for PRIVATE nat type must contain an action with source_nat_active_ranges set"), + }, + }, + }) +} + +func TestAccComputeRouterNat_withPrivateNatAndEmptyActionActiveRanges(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-private-nat-%s", testId) + hubName := fmt.Sprintf("%s-hub", routerName) + pEnv := envvar.GetTestProjectFromEnv() + ruleDescription := acctest.RandString(t, 10) + match := fmt.Sprintf("nexthop.hub == '//networkconnectivity.googleapis.com/projects/%s/locations/global/hubs/%s'", pEnv, hubName) + activeRangesNetworkOne := "google_compute_subnetwork.subnet1.self_link" + drainRangesEmpty := "" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterNatDestroyProducer(t), + Steps: []resource.TestStep{ + // (ERROR): Creation with empty action active ranges should fail + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndEmptyActionActiveRanges(routerName, hubName, 100, ruleDescription, match), + ExpectError: regexp.MustCompile("The rule for PRIVATE nat type must contain an action with source_nat_active_ranges set"), + }, + // Create NAT with action and active ranges set + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName, 100, ruleDescription, match, activeRangesNetworkOne, drainRangesEmpty), + }, + { + ResourceName: "google_compute_router_nat.foobar", + ImportState: true, + ImportStateVerify: true, + }, + // (ERROR) - Updating the rule by erasing the action active ranges should fail + { + Config: testAccComputeRouterNatRulesBasic_privateNatWithRuleAndEmptyActionActiveRanges(routerName, hubName, 100, ruleDescription, match), + ExpectError: regexp.MustCompile("The rule for PRIVATE nat type must contain an action with source_nat_active_ranges set"), + }, + }, + }) +} +{{- end }} + +func testAccCheckComputeRouterNatDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + routersService := config.NewComputeClient(config.UserAgent).Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router" { + continue + } + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + routerName := rs.Primary.Attributes["router"] + + _, err = routersService.Get(project, region, routerName).Do() + + if err == nil { + return fmt.Errorf("Error, Router %s in region %s still exists", routerName, region) + } + } + + return nil + } +} + +func testAccCheckComputeRouterNatDelete(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + routersService := config.NewComputeClient(config.UserAgent).Routers + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_nat" { + continue + } + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + region, err := acctest.GetTestRegion(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + routerName := rs.Primary.Attributes["router"] + + router, err := routersService.Get(project, region, routerName).Do() + + if err != nil { + return fmt.Errorf("Error Reading Router %s: %s", routerName, err) + } + + nats := router.Nats + for _, nat := range nats { + if nat.Name == name { + return fmt.Errorf("Nat %s still exists on router %s/%s", name, region, router.Name) + } + } + } + + return nil + } +} + +func testAccComputeRouterNatBasic(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + log_config { + enable = true + filter = "ERRORS_ONLY" + } +} +`, routerName, routerName, routerName, routerName) +} + +// Like basic but with extra resources +func testAccComputeRouterNatBasicBeforeUpdate(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + + log_config { + enable = true + filter = "ERRORS_ONLY" + } +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatUpdated(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.foobar.self_link] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + udp_idle_timeout_sec = 60 + icmp_idle_timeout_sec = 60 + tcp_established_idle_timeout_sec = 1600 + tcp_transitory_idle_timeout_sec = 60 + tcp_time_wait_timeout_sec = 60 + + log_config { + enable = true + filter = "TRANSLATIONS_ONLY" + } +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatUpdateEndpointType(routerName string, endpointType string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%[1]s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%[1]s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_router" "foobar" { + name = "%[1]s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_router_nat" "foobar" { + name = "%[1]s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + endpoint_types = [ "%[2]s" ] + log_config { + enable = true + filter = "ERRORS_ONLY" + } +} +`, routerName, endpointType) +} + +func testAccComputeRouterNatUpdateToNatIPsId(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_router" "foobar" { +name = "%s" +region = google_compute_subnetwork.foobar.region +network = google_compute_network.foobar.self_link +} + +resource "google_compute_network" "foobar" { +name = "%s-net" +} +resource "google_compute_subnetwork" "foobar" { +name = "%s-subnet" +network = google_compute_network.foobar.self_link +ip_cidr_range = "10.0.0.0/16" +region = "us-central1" +} + +resource "google_compute_address" "foobar" { +name = "%s-addr" +region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.foobar.id] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + udp_idle_timeout_sec = 60 + icmp_idle_timeout_sec = 60 + tcp_established_idle_timeout_sec = 1600 + tcp_transitory_idle_timeout_sec = 60 + tcp_time_wait_timeout_sec = 60 + + log_config { + enable = true + filter = "TRANSLATIONS_ONLY" + } +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatUpdateToNatIPsName(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_router" "foobar" { +name = "%s" +region = google_compute_subnetwork.foobar.region +network = google_compute_network.foobar.self_link +} + +resource "google_compute_network" "foobar" { +name = "%s-net" +} +resource "google_compute_subnetwork" "foobar" { +name = "%s-subnet" +network = google_compute_network.foobar.self_link +ip_cidr_range = "10.0.0.0/16" +region = "us-central1" +} + +resource "google_compute_address" "foobar" { +name = "%s-addr" +region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.foobar.name] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + udp_idle_timeout_sec = 60 + icmp_idle_timeout_sec = 60 + tcp_established_idle_timeout_sec = 1600 + tcp_transitory_idle_timeout_sec = 60 + tcp_time_wait_timeout_sec = 60 + + log_config { + enable = true + filter = "TRANSLATIONS_ONLY" + } +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatWithManualIpAndSubnetConfiguration(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-router-nat-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.foobar.self_link] + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.name + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } +} +`, routerName, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatWithAllocationMethod(routerName string, enableEndpointIndependentMapping, enableDynamicPortAllocation bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-router-nat-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.foobar.self_link] + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.name + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + enable_endpoint_independent_mapping = %t + enable_dynamic_port_allocation = %t +} +`, routerName, routerName, routerName, routerName, routerName, enableEndpointIndependentMapping, enableDynamicPortAllocation) +} + +func testAccComputeRouterNatWithAllocationMethodWithParameters(routerName string, enableEndpointIndependentMapping, enableDynamicPortAllocation bool, minPortsPerVm, maxPortsPerVm uint32) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "foobar" { + name = "%s-router-nat-addr" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.foobar.self_link] + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.name + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + enable_endpoint_independent_mapping = %t + enable_dynamic_port_allocation = %t + min_ports_per_vm = %d + max_ports_per_vm = %d +} +`, routerName, routerName, routerName, routerName, routerName, enableEndpointIndependentMapping, enableDynamicPortAllocation, minPortsPerVm, maxPortsPerVm) +} + +func testAccComputeRouterNatBaseResourcesWithNatIps(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "addr1" { + name = "%s-addr1" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_address" "addr2" { + name = "%s-addr2" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_address" "addr3" { + name = "%s-addr3" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_address" "addr4" { + name = "%s-addr4" + region = google_compute_subnetwork.foobar.region +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} +`, routerName, routerName, routerName, routerName, routerName, routerName, routerName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRouterNatWithNatIps(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [ + google_compute_address.addr1.self_link, + google_compute_address.addr2.self_link, + google_compute_address.addr3.self_link, + ] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} + +func testAccComputeRouterNatWithOneDrainOneRemovedNatIps(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [ + google_compute_address.addr2.self_link, + ] + + drain_nat_ips = [ + google_compute_address.addr3.self_link, + ] +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} + +func testAccComputeRouterNatWithInvalidDrainMissingNatIp(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [ + google_compute_address.addr2.self_link, + ] + + drain_nat_ips = [ + google_compute_address.addr1.self_link, + google_compute_address.addr3.self_link, + ] +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} + +func testAccComputeRouterNatWithInvalidDrainNatIpsStillInNatIps(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.self_link + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [ + google_compute_address.addr1.self_link, + google_compute_address.addr2.self_link, + google_compute_address.addr3.self_link, + ] + + drain_nat_ips = [ + google_compute_address.addr3.self_link, + ] +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} +{{- end }} + +func testAccComputeRouterNatRulesBasic_omitRules(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} + +func testAccComputeRouterNatRulesBasic_omitAction(routerName string, ruleNumber int, ruleDescription string, ruleMatch string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName, ruleNumber, ruleDescription, ruleMatch) +} + +func testAccComputeRouterNatRulesBasic_omitDescription(routerName string, ruleNumber int, ruleMatch string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + match = "%s" + action { + source_nat_active_ips = [google_compute_address.addr2.self_link, google_compute_address.addr3.self_link] + } + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName, ruleNumber, ruleMatch) +} + +func testAccComputeRouterNatRulesBasic(routerName string, ruleNumber int, ruleDescription string, ruleMatch string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + action { + source_nat_active_ips = [google_compute_address.addr2.self_link, google_compute_address.addr3.self_link] + } + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName, ruleNumber, ruleDescription, ruleMatch) +} + +func testAccComputeRouterNatRulesWithSourceActiveAndDrainIps(routerName string, ruleNumber int, ruleDescription string, ruleMatch string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + action { + source_nat_active_ips = [google_compute_address.addr2.self_link] + source_nat_drain_ips = [google_compute_address.addr3.self_link] + } + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName, ruleNumber, ruleDescription, ruleMatch) +} + +func testAccComputeRouterNatRulesWithDrainIps(routerName string, ruleNumber int, ruleDescription string, ruleMatch string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + action { + source_nat_drain_ips = [google_compute_address.addr2.self_link] + } + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName, ruleNumber, ruleDescription, ruleMatch) +} + +func testAccComputeRouterNatMultiRules(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.self_link] + + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = 100 + description = "a" + match = "destination.ip == '1.1.1.1' || destination.ip == '2.2.2.2'" + action { + source_nat_active_ips = [google_compute_address.addr2.self_link] + } + } + + rules { + rule_number = 5000 + description = "b" + match = "destination.ip == '3.3.3.3' || destination.ip == '4.4.4.4'" + action { + source_nat_active_ips = [google_compute_address.addr3.self_link] + } + } + + rules { + rule_number = 300 + description = "c" + match = "destination.ip == '5.5.5.5' || destination.ip == '8.8.8.8'" + action { + source_nat_active_ips = [google_compute_address.addr4.self_link] + } + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} + +func testAccComputeRouterNatMultiRulesWithIpId(routerName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + nat_ip_allocate_option = "MANUAL_ONLY" + nat_ips = [google_compute_address.addr1.id] + + + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = 100 + description = "a" + match = "destination.ip == '1.1.1.1' || destination.ip == '2.2.2.2'" + action { + source_nat_active_ips = [google_compute_address.addr2.id] + } + } + + rules { + rule_number = 5000 + description = "b" + match = "destination.ip == '3.3.3.3' || destination.ip == '4.4.4.4'" + action { + source_nat_active_ips = [google_compute_address.addr3.id] + } + } + + rules { + rule_number = 300 + description = "c" + match = "destination.ip == '5.5.5.5' || destination.ip == '8.8.8.8'" + action { + source_nat_active_ips = [google_compute_address.addr4.id] + } + } + + enable_endpoint_independent_mapping = false +} +`, testAccComputeRouterNatBaseResourcesWithNatIps(routerName), routerName) +} + +func testAccComputeRouterNatKeepRouter(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} +`, routerName, routerName, routerName) +} + +func testAccComputeRouterNatLogConfig(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + log_config { + enable = false + filter = "ALL" + } +} +`, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatLogConfigRemoved(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + nat_ip_allocate_option = "AUTO_ONLY" + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" +} +`, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatBaseResourcesWithPrivateNatSubnetworks(routerName, hubName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = "false" +} + +resource "google_compute_subnetwork" "subnet1" { + name = "%s-subnet1" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + purpose = "PRIVATE_NAT" +} + +resource "google_compute_subnetwork" "subnet2" { + name = "%s-subnet2" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.10.1.0/24" + region = "us-central1" + purpose = "PRIVATE_NAT" +} + +resource "google_compute_subnetwork" "subnet3" { + name = "%s-subnet3" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.158.1.0/24" + region = "us-central1" + purpose = "PRIVATE_NAT" +} + +resource "google_compute_subnetwork" "subnet4" { + name = "%s-subnet4" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.168.1.0/24" + region = "us-central1" + purpose = "PRIVATE_NAT" +} + +resource "google_network_connectivity_hub" "foobar" { + name = "%s" + description = "vpc hub for inter vpc nat" +} + +resource "google_network_connectivity_spoke" "primary" { + name = "%s-spoke" + location = "global" + description = "vpc spoke for inter vpc nat" + hub = google_network_connectivity_hub.foobar.id + linked_vpc_network { + exclude_export_ranges = [ + "10.10.0.0/16" + ] + uri = google_compute_network.foobar.self_link + } +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.subnet1.region + network = google_compute_network.foobar.self_link + depends_on = [ + google_network_connectivity_spoke.primary + ] +} +`, routerName, routerName, routerName, routerName, routerName, hubName, routerName, routerName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeRouterNatPrivateType(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + purpose = "PRIVATE_NAT" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.self_link +} + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + type = "PRIVATE" + enable_dynamic_port_allocation = false + enable_endpoint_independent_mapping = false + min_ports_per_vm = 32 + subnetwork { + name = google_compute_subnetwork.foobar.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } +} +`, routerName, routerName, routerName, routerName) +} + +func testAccComputeRouterNatRulesBasic_privateNatOmitRules(routerName, hubName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + type = "PRIVATE" + enable_dynamic_port_allocation = false + enable_endpoint_independent_mapping = false + min_ports_per_vm = 32 + subnetwork { + name = google_compute_subnetwork.subnet1.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } +} +`, testAccComputeRouterNatBaseResourcesWithPrivateNatSubnetworks(routerName, hubName), routerName) +} + +func testAccComputeRouterNatRulesBasic_privateNatWithRuleAndActiveDrainRange(routerName, hubName string, ruleNumber int, ruleDescription, match, activeRanges, drainRanges string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + type = "PRIVATE" + enable_dynamic_port_allocation = false + enable_endpoint_independent_mapping = false + min_ports_per_vm = 32 + subnetwork { + name = google_compute_subnetwork.subnet1.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + action { + source_nat_active_ranges = [%s] + source_nat_drain_ranges = [%s] + } + } +} +`, testAccComputeRouterNatBaseResourcesWithPrivateNatSubnetworks(routerName, hubName), routerName, ruleNumber, ruleDescription, match, activeRanges, drainRanges) +} + +func testAccComputeRouterNatRulesBasic_privateNatWithRuleAndEmptyAction(routerName, hubName string, ruleNumber int, ruleDescription, match string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + type = "PRIVATE" + enable_dynamic_port_allocation = false + enable_endpoint_independent_mapping = false + min_ports_per_vm = 32 + subnetwork { + name = google_compute_subnetwork.subnet1.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + # action is missing + } +} +`, testAccComputeRouterNatBaseResourcesWithPrivateNatSubnetworks(routerName, hubName), routerName, ruleNumber, ruleDescription, match) +} + +func testAccComputeRouterNatRulesBasic_privateNatWithRuleAndEmptyActionActiveRanges(routerName, hubName string, ruleNumber int, ruleDescription, match string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS" + type = "PRIVATE" + enable_dynamic_port_allocation = false + enable_endpoint_independent_mapping = false + min_ports_per_vm = 32 + subnetwork { + name = google_compute_subnetwork.subnet1.id + source_ip_ranges_to_nat = ["ALL_IP_RANGES"] + } + + rules { + rule_number = %d + description = "%s" + match = "%s" + action { + source_nat_active_ranges = [] + } + } +} +`, testAccComputeRouterNatBaseResourcesWithPrivateNatSubnetworks(routerName, hubName), routerName, ruleNumber, ruleDescription, match) +} + +{{ end }} + +func testAccComputeRouterNatWitAutoNetworkTier(routerName, hubName string) string { + return fmt.Sprintf(` +%s + +resource "google_compute_router_nat" "foobar" { + name = "%s" + router = google_compute_router.foobar.name + region = google_compute_router.foobar.region + + source_subnetwork_ip_ranges_to_nat = "ALL_SUBNETWORKS_ALL_IP_RANGES" + nat_ip_allocate_option = "AUTO_ONLY" + auto_network_tier = "PREMIUM" +} +`, testAccComputeRouterNatBaseResourcesWithPrivateNatSubnetworks(routerName, hubName), routerName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl new file mode 100644 index 000000000000..2d9efd38c9d7 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer.go.tmpl @@ -0,0 +1,1600 @@ +package compute + +import ( + "fmt" + "log" + "net" + "reflect" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ipv6RepresentationDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + //Diff suppress any equal IPV6 address in different representations + //An IPV6 address can have long or short representations + //E.g 2001:0cb0:0000:0000:0fc0:0000:0000:0abc, after compression: + //A) 2001:0cb0::0fc0:0000:0000:0abc (Omit groups of all zeros) + //B) 2001:cb0:0:0:fc0::abc (Omit leading zeros) + //C) 2001:cb0::fc0:0:0:abc (Combining A and B) + //The GCP API follows rule B) for normalzation + + oldIp := net.ParseIP(old) + newIp := net.ParseIP(new) + return oldIp.Equal(newIp) +} + +func ResourceComputeRouterBgpPeer() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeRouterBgpPeerCreate, + Read: resourceComputeRouterBgpPeerRead, + Update: resourceComputeRouterBgpPeerUpdate, + Delete: resourceComputeRouterBgpPeerDelete, + + Importer: &schema.ResourceImporter{ + State: resourceComputeRouterBgpPeerImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "interface": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Name of the interface the BGP peer is associated with.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRFC1035Name(2, 63), + Description: `Name of this BGP peer. The name must be 1-63 characters long, +and comply with RFC1035. Specifically, the name must be 1-63 characters +long and match the regular expression '[a-z]([-a-z0-9]*[a-z0-9])?' which +means the first character must be a lowercase letter, and all +following characters must be a dash, lowercase letter, or digit, +except the last character, which cannot be a dash.`, + }, + "peer_asn": { + Type: schema.TypeInt, + Required: true, + Description: `Peer BGP Autonomous System Number (ASN). +Each BGP interface may use a different value.`, + }, + "router": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name of the Cloud Router in which this BgpPeer will be configured.`, + }, + "advertise_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.ValidateEnum([]string{"DEFAULT", "CUSTOM", ""}), + Description: `User-specified flag to indicate which mode to use for advertisement. +Valid values of this enum field are: 'DEFAULT', 'CUSTOM' Default value: "DEFAULT" Possible values: ["DEFAULT", "CUSTOM"]`, + Default: "DEFAULT", + }, + "advertised_groups": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of prefix groups to advertise in custom +mode, which currently supports the following option: + +* 'ALL_SUBNETS': Advertises all of the router's own VPC subnets. +This excludes any routes learned for subnets that use VPC Network +Peering. + + +Note that this field can only be populated if advertiseMode is 'CUSTOM' +and overrides the list defined for the router (in the "bgp" message). +These groups are advertised in addition to any specified prefixes. +Leave this field blank to advertise no custom groups.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "advertised_ip_ranges": { + Type: schema.TypeList, + Optional: true, + Description: `User-specified list of individual IP ranges to advertise in +custom mode. This field can only be populated if advertiseMode +is 'CUSTOM' and is advertised to all peers of the router. These IP +ranges will be advertised in addition to any specified groups. +Leave this field blank to advertise no custom IP ranges.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "range": { + Type: schema.TypeString, + Required: true, + Description: `The IP range to advertise. The value must be a +CIDR-formatted string.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `User-specified description for the IP range.`, + }, + }, + }, + }, + "advertised_route_priority": { + Type: schema.TypeInt, + Optional: true, + Description: `The priority of routes advertised to this BGP peer. +Where there is more than one matching route of maximum +length, the routes with the lowest priority value win.`, + }, + "bfd": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `BFD configuration for the BGP peering.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "session_initialization_mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateEnum([]string{"ACTIVE", "DISABLED", "PASSIVE"}), + Description: `The BFD session initialization mode for this BGP peer. +If set to 'ACTIVE', the Cloud Router will initiate the BFD session +for this BGP peer. If set to 'PASSIVE', the Cloud Router will wait +for the peer router to initiate the BFD session for this BGP peer. +If set to 'DISABLED', BFD is disabled for this BGP peer. Possible values: ["ACTIVE", "DISABLED", "PASSIVE"]`, + }, + "min_receive_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum interval, in milliseconds, between BFD control packets +received from the peer router. The actual value is negotiated +between the two routers and is equal to the greater of this value +and the transmit interval of the other router. If set, this value +must be between 1000 and 30000.`, + Default: 1000, + }, + "min_transmit_interval": { + Type: schema.TypeInt, + Optional: true, + Description: `The minimum interval, in milliseconds, between BFD control packets +transmitted to the peer router. The actual value is negotiated +between the two routers and is equal to the greater of this value +and the corresponding receive interval of the other router. If set, +this value must be between 1000 and 30000.`, + Default: 1000, + }, + "multiplier": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of consecutive BFD packets that must be missed before +BFD declares that a peer is unavailable. If set, the value must +be a value between 5 and 16.`, + Default: 5, + }, + }, + }, + }, + "enable": { + Type: schema.TypeBool, + Optional: true, + Description: `The status of the BGP peer connection. If set to false, any active session +with the peer is terminated and all associated routing information is removed. +If set to true, the peer connection can be established with routing information. +The default is true.`, + Default: true, + }, + "enable_ipv6": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable IPv6 traffic over BGP Peer. If not specified, it is disabled by default.`, + Default: false, + }, + "enable_ipv4": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable IPv4 traffic over BGP Peer. It is enabled by default if the peerIpAddress is version 4.`, + Computed: true, + }, + "ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `IP address of the interface inside Google Cloud Platform. +Only IPv4 is supported.`, + }, + "ipv6_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `IPv6 address of the interface inside Google Cloud Platform. +The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. +If you do not specify the next hop addresses, Google Cloud automatically +assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.`, + }, + "ipv4_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + Description: `IPv4 address of the interface inside Google Cloud Platform.`, + }, + "peer_ip_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `IP address of the BGP interface outside Google Cloud Platform. +Only IPv4 is supported. Required if 'ip_address' is set.`, + }, + "peer_ipv6_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + DiffSuppressFunc: ipv6RepresentationDiffSuppress, + Description: `IPv6 address of the BGP interface outside Google Cloud Platform. +The address must be in the range 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64. +If you do not specify the next hop addresses, Google Cloud automatically +assigns unused addresses from the 2600:2d00:0:2::/64 or 2600:2d00:0:3::/64 range for you.`, + }, + "peer_ipv4_nexthop_address": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: verify.ValidateIpAddress, + Description: `IPv4 address of the BGP interface outside Google Cloud Platform.`, + }, + "region": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Region where the router and BgpPeer reside. +If it is not provided, the provider region is used.`, + }, + "router_appliance_instance": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The URI of the VM instance that is used as third-party router appliances +such as Next Gen Firewalls, Virtual Routers, or Router Appliances. +The VM instance must be located in zones contained in the same region as +this Cloud Router. The VM instance is the peer side of the BGP session.`, + }, + "management_type": { + Type: schema.TypeString, + Computed: true, + Description: `The resource that configures and manages this BGP peer. + +* 'MANAGED_BY_USER' is the default value and can be managed by +you or other users +* 'MANAGED_BY_ATTACHMENT' is a BGP peer that is configured and +managed by Cloud Interconnect, specifically by an +InterconnectAttachment of type PARTNER. Google automatically +creates, updates, and deletes this type of BGP peer when the +PARTNER InterconnectAttachment is created, updated, +or deleted.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "md5_authentication_key": { + Type: schema.TypeList, + Optional: true, + Description: `Present if MD5 authentication is enabled for the peering. Must be the name +of one of the entries in the Router.md5_authentication_keys. The field must comply with RFC1035.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `[REQUIRED] Name used to identify the key. +Must be unique within a router. Must be referenced by exactly one bgpPeer. Must comply with RFC1035.`, + }, + "key": { + Type: schema.TypeString, + Required: true, + Description: `Value of the key.`, + Sensitive: true, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeRouterBgpPeerCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + nameProp, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + interfaceNameProp, err := expandNestedComputeRouterBgpPeerInterface(d.Get("interface"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("interface"); !tpgresource.IsEmptyValue(reflect.ValueOf(interfaceNameProp)) && (ok || !reflect.DeepEqual(v, interfaceNameProp)) { + obj["interfaceName"] = interfaceNameProp + } + ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipAddressProp)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { + obj["peerIpAddress"] = peerIpAddressProp + } + peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_asn"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerAsnProp)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { + obj["peerAsn"] = peerAsnProp + } + advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + obj["advertisedRoutePriority"] = advertisedRoutePriorityProp + } + advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertise_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(advertiseModeProp)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { + obj["advertiseMode"] = advertiseModeProp + } + advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { + obj["advertisedGroups"] = advertisedGroupsProp + } + advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { + obj["advertisedIpRanges"] = advertisedIpRangesProp + } + bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bfd"); !tpgresource.IsEmptyValue(reflect.ValueOf(bfdProp)) && (ok || !reflect.DeepEqual(v, bfdProp)) { + obj["bfd"] = bfdProp + } + enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { + obj["enable"] = enableProp + } + routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_appliance_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(routerApplianceInstanceProp)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { + obj["routerApplianceInstance"] = routerApplianceInstanceProp + } + enableIpv6Prop, err := expandNestedComputeRouterBgpPeerEnableIpv6(d.Get("enable_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv6"); ok || !reflect.DeepEqual(v, enableIpv6Prop) { + obj["enableIpv6"] = enableIpv6Prop + } + enableIpv4Prop, err := expandNestedComputeRouterBgpPeerEnableIpv4(d.Get("enable_ipv4"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv4"); ok || !reflect.DeepEqual(v, enableIpv4Prop) { + obj["enableIpv4"] = enableIpv4Prop + } + ipv4NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv4NexthopAddress(d.Get("ipv4_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv4_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipv4NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, ipv4NexthopAddressProp)) { + obj["ipv4NexthopAddress"] = ipv4NexthopAddressProp + } + peerIpv4NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv4NexthopAddress(d.Get("peer_ipv4_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpv4NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpv4NexthopAddressProp)) { + obj["peerIpv4NexthopAddress"] = peerIpv4NexthopAddressProp + } + ipv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv6NexthopAddress(d.Get("ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipv6NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, ipv6NexthopAddressProp)) { + obj["ipv6NexthopAddress"] = ipv6NexthopAddressProp + } + peerIpv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(d.Get("peer_ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(peerIpv6NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, peerIpv6NexthopAddressProp)) { + obj["peerIpv6NexthopAddress"] = peerIpv6NexthopAddressProp + } + md5AuthenticationKeyProp, err := expandNestedComputeRouterBgpPeerMd5AuthenticationKey(d.Get("md5_authentication_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("md5_authentication_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(md5AuthenticationKeyProp)) && (ok || !reflect.DeepEqual(v, md5AuthenticationKeyProp)) { + /*some manual handling is required here as the parent cloud router object has a different layout for keyName and keyValue. + bgpPeer blocks in cloud router only specify the keyName to be used and the cloudRouter object has another block called + md5AuthenticationKeys which is an array which specify all the keys (name and value). The constraint here is that a key must + be used by exactly one bgpPeer to be considered valid. + */ + md5AuthenticationKeyName := md5AuthenticationKeyProp.(map[string]interface{})["name"] + obj["md5AuthenticationKeyName"] = md5AuthenticationKeyName + obj["md5AuthenticationKey"] = md5AuthenticationKeyProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new RouterBgpPeer: %#v", obj) + + obj, err = resourceComputeRouterBgpPeerPatchCreateEncoder(d, meta, obj) + if err != nil { + return err + } + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating RouterBgpPeer: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime( + config, res, project, "Creating RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + // The resource didn't actually create + d.SetId("") + return fmt.Errorf("Error waiting to create RouterBgpPeer: %s", err) + } + + log.Printf("[DEBUG] Finished creating RouterBgpPeer %q: %#v", d.Id(), res) + + err = d.Set("md5_authentication_key", []interface{}{md5AuthenticationKeyProp}) + if err != nil { + return err + } + + return resourceComputeRouterBgpPeerRead(d, meta) +} + +func resourceComputeRouterBgpPeerRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("ComputeRouterBgpPeer %q", d.Id())) + } + + res, err = flattenNestedComputeRouterBgpPeer(d, meta, res) + if err != nil { + return err + } + + if res == nil { + // Object isn't there any more - remove it from the state. + log.Printf("[DEBUG] Removing ComputeRouterBgpPeer because it couldn't be matched.") + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + + if err := d.Set("name", flattenNestedComputeRouterBgpPeerName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("interface", flattenNestedComputeRouterBgpPeerInterface(res["interfaceName"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ip_address", flattenNestedComputeRouterBgpPeerIpAddress(res["ipAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ip_address", flattenNestedComputeRouterBgpPeerPeerIpAddress(res["peerIpAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_asn", flattenNestedComputeRouterBgpPeerPeerAsn(res["peerAsn"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_route_priority", flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(res["advertisedRoutePriority"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertise_mode", flattenNestedComputeRouterBgpPeerAdvertiseMode(res["advertiseMode"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_groups", flattenNestedComputeRouterBgpPeerAdvertisedGroups(res["advertisedGroups"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("advertised_ip_ranges", flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(res["advertisedIpRanges"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("management_type", flattenNestedComputeRouterBgpPeerManagementType(res["managementType"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("bfd", flattenNestedComputeRouterBgpPeerBfd(res["bfd"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable", flattenNestedComputeRouterBgpPeerEnable(res["enable"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("router_appliance_instance", flattenNestedComputeRouterBgpPeerRouterApplianceInstance(res["routerApplianceInstance"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable_ipv6", flattenNestedComputeRouterBgpPeerEnableIpv6(res["enableIpv6"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("enable_ipv4", flattenNestedComputeRouterBgpPeerEnableIpv4(res["enableIpv4"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ipv4_nexthop_address", flattenNestedComputeRouterBgpPeerIpv4NexthopAddress(res["ipv4NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ipv4_nexthop_address", flattenNestedComputeRouterBgpPeerPeerIpv4NexthopAddress(res["peerIpv4NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("ipv6_nexthop_address", flattenNestedComputeRouterBgpPeerIpv6NexthopAddress(res["ipv6NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("peer_ipv6_nexthop_address", flattenNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(res["peerIpv6NexthopAddress"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + if err := d.Set("md5_authentication_key", flattenNestedComputeRouterBgpPeerMd5AuthenticationKey(res["md5AuthenticationKeyName"], d, config)); err != nil { + return fmt.Errorf("Error reading RouterBgpPeer: %s", err) + } + + return nil +} + +func resourceComputeRouterBgpPeerUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + ipAddressProp, err := expandNestedComputeRouterBgpPeerIpAddress(d.Get("ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipAddressProp)) { + obj["ipAddress"] = ipAddressProp + } + peerIpAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpAddress(d.Get("peer_ip_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ip_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpAddressProp)) { + obj["peerIpAddress"] = peerIpAddressProp + } + peerAsnProp, err := expandNestedComputeRouterBgpPeerPeerAsn(d.Get("peer_asn"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_asn"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerAsnProp)) { + obj["peerAsn"] = peerAsnProp + } + advertisedRoutePriorityProp, err := expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(d.Get("advertised_route_priority"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_route_priority"); ok || !reflect.DeepEqual(v, advertisedRoutePriorityProp) { + obj["advertisedRoutePriority"] = advertisedRoutePriorityProp + } + advertiseModeProp, err := expandNestedComputeRouterBgpPeerAdvertiseMode(d.Get("advertise_mode"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertise_mode"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, advertiseModeProp)) { + obj["advertiseMode"] = advertiseModeProp + } + advertisedGroupsProp, err := expandNestedComputeRouterBgpPeerAdvertisedGroups(d.Get("advertised_groups"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_groups"); ok || !reflect.DeepEqual(v, advertisedGroupsProp) { + obj["advertisedGroups"] = advertisedGroupsProp + } + advertisedIpRangesProp, err := expandNestedComputeRouterBgpPeerAdvertisedIpRanges(d.Get("advertised_ip_ranges"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("advertised_ip_ranges"); ok || !reflect.DeepEqual(v, advertisedIpRangesProp) { + obj["advertisedIpRanges"] = advertisedIpRangesProp + } + bfdProp, err := expandNestedComputeRouterBgpPeerBfd(d.Get("bfd"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("bfd"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, bfdProp)) { + obj["bfd"] = bfdProp + } + enableProp, err := expandNestedComputeRouterBgpPeerEnable(d.Get("enable"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable"); ok || !reflect.DeepEqual(v, enableProp) { + obj["enable"] = enableProp + } + routerApplianceInstanceProp, err := expandNestedComputeRouterBgpPeerRouterApplianceInstance(d.Get("router_appliance_instance"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("router_appliance_instance"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, routerApplianceInstanceProp)) { + obj["routerApplianceInstance"] = routerApplianceInstanceProp + } + enableIpv6Prop, err := expandNestedComputeRouterBgpPeerEnableIpv6(d.Get("enable_ipv6"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv6"); ok || !reflect.DeepEqual(v, enableIpv6Prop) { + obj["enableIpv6"] = enableIpv6Prop + } + enableIpv4Prop, err := expandNestedComputeRouterBgpPeerEnableIpv4(d.Get("enable_ipv4"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("enable_ipv4"); ok || !reflect.DeepEqual(v, enableIpv4Prop) { + obj["enableIpv4"] = enableIpv4Prop + } + ipv4NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv4NexthopAddress(d.Get("ipv4_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv4_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(ipv4NexthopAddressProp)) && (ok || !reflect.DeepEqual(v, ipv4NexthopAddressProp)) { + obj["ipv4NexthopAddress"] = ipv4NexthopAddressProp + } + peerIpv4NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv4NexthopAddress(d.Get("peer_ipv4_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv4_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpv4NexthopAddressProp)) { + obj["peerIpv4NexthopAddress"] = peerIpv4NexthopAddressProp + } + ipv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerIpv6NexthopAddress(d.Get("ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, ipv6NexthopAddressProp)) { + obj["ipv6NexthopAddress"] = ipv6NexthopAddressProp + } + peerIpv6NexthopAddressProp, err := expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(d.Get("peer_ipv6_nexthop_address"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("peer_ipv6_nexthop_address"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, peerIpv6NexthopAddressProp)) { + obj["peerIpv6NexthopAddress"] = peerIpv6NexthopAddressProp + } + md5AuthenticationKeyProp, err := expandNestedComputeRouterBgpPeerMd5AuthenticationKey(d.Get("md5_authentication_key"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("md5_authentication_key"); !tpgresource.IsEmptyValue(reflect.ValueOf(md5AuthenticationKeyProp)) && (ok || !reflect.DeepEqual(v, md5AuthenticationKeyProp)) { + /*some manual handling is required here as the parent cloud router object has a different layout for keyName and keyValue. + bgpPeer blocks in cloud router only specify the keyName to be used and the cloudRouter object has another block called + md5AuthenticationKeys which is an array which specify all the keys (name and value). The constraint here is that a key must + be used by exactly one bgpPeer to be considered valid. + */ + md5AuthenticationKeyName := md5AuthenticationKeyProp.(map[string]interface{})["name"] + obj["md5AuthenticationKeyName"] = md5AuthenticationKeyName + obj["md5AuthenticationKey"] = md5AuthenticationKeyProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating RouterBgpPeer %q: %#v", d.Id(), obj) + + obj, err = resourceComputeRouterBgpPeerPatchUpdateEncoder(d, meta, obj) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error updating RouterBgpPeer %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating RouterBgpPeer %q: %#v", d.Id(), res) + } + + err = ComputeOperationWaitTime( + config, res, project, "Updating RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + err = d.Set("md5_authentication_key", []interface{}{md5AuthenticationKeyProp}) + if err != nil { + return err + } + + return resourceComputeRouterBgpPeerRead(d, meta) +} + +func resourceComputeRouterBgpPeerDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for RouterBgpPeer: %s", err) + } + billingProject = project + + lockName, err := tpgresource.ReplaceVars(d, config, "router/{{"{{"}}region{{"}}"}}/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + + var obj map[string]interface{} + + obj, err = resourceComputeRouterBgpPeerPatchDeleteEncoder(d, meta, obj) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterBgpPeer") + } + log.Printf("[DEBUG] Deleting RouterBgpPeer %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "RouterBgpPeer") + } + + err = ComputeOperationWaitTime( + config, res, project, "Deleting RouterBgpPeer", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting RouterBgpPeer %q: %#v", d.Id(), res) + return nil +} + +func resourceComputeRouterBgpPeerImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "^projects/(?P[^/]+)/regions/(?P[^/]+)/routers/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)/(?P[^/]+)$", + "^(?P[^/]+)/(?P[^/]+)$", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedComputeRouterBgpPeerName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerInterface(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerAsn(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "DEFAULT" + } + + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "range": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config), + "description": flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config), + }) + } + return transformed +} +func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerManagementType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerMd5AuthenticationKey(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + originalKeyValue := d.Get("md5_authentication_key").([]interface{}) + transformed := make(map[string]interface{}) + transformed["name"] = v + //key value is not returned as it is a sensitive field + if len(originalKeyValue) != 0 { + transformed["key"] = originalKeyValue[0].(map[string]interface{})["key"] + } + return []interface{}{transformed} +} + +func flattenNestedComputeRouterBgpPeerBfd(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["session_initialization_mode"] = + flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["sessionInitializationMode"], d, config) + transformed["min_transmit_interval"] = + flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["minTransmitInterval"], d, config) + transformed["min_receive_interval"] = + flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["minReceiveInterval"], d, config) + transformed["multiplier"] = + flattenNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) + return []interface{}{transformed} +} + +func flattenNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenNestedComputeRouterBgpPeerEnable(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return true + } + b, err := strconv.ParseBool(v.(string)) + if err != nil { + // If we can't convert it into a bool return value as is and let caller handle it + return v + } + return b +} + +func flattenNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return tpgresource.ConvertSelfLinkToV1(v.(string)) +} + +func flattenNestedComputeRouterBgpPeerEnableIpv6(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerEnableIpv4(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpv4NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpv4NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerIpv6NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedComputeRouterBgpPeerName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerInterface(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerAsn(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedRoutePriority(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertiseMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedGroups(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRanges(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedRange, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(original["range"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRange); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["range"] = transformedRange + } + + transformedDescription, err := expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(original["description"], d, config) + if err != nil { + return nil, err + } else { + transformed["description"] = transformedDescription + } + + req = append(req, transformed) + } + return req, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRangesRange(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerAdvertisedIpRangesDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfd(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSessionInitializationMode, err := expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(original["session_initialization_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSessionInitializationMode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sessionInitializationMode"] = transformedSessionInitializationMode + } + + transformedMinTransmitInterval, err := expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(original["min_transmit_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinTransmitInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minTransmitInterval"] = transformedMinTransmitInterval + } + + transformedMinReceiveInterval, err := expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(original["min_receive_interval"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinReceiveInterval); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minReceiveInterval"] = transformedMinReceiveInterval + } + + transformedMultiplier, err := expandNestedComputeRouterBgpPeerBfdMultiplier(original["multiplier"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMultiplier); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["multiplier"] = transformedMultiplier + } + + return transformed, nil +} + +func expandNestedComputeRouterBgpPeerMd5AuthenticationKey(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMd5AuthenticationKeyName, err := expandNestedComputeRouterBgpPeerMd5AuthenticationKeyMd5AuthenticationKeyName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMd5AuthenticationKeyName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedMd5AuthenticationKeyName + } + + transformedMd5AuthenticationKeyValue, err := expandNestedComputeRouterBgpPeerMd5AuthenticationKeyMd5AuthenticationKeyValue(original["key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMd5AuthenticationKeyValue); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["key"] = transformedMd5AuthenticationKeyValue + } + + return transformed, nil +} + +func expandNestedComputeRouterBgpPeerMd5AuthenticationKeyMd5AuthenticationKeyName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerMd5AuthenticationKeyMd5AuthenticationKeyValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdSessionInitializationMode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMinTransmitInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMinReceiveInterval(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerBfdMultiplier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerEnable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + return strings.ToUpper(strconv.FormatBool(v.(bool))), nil +} + +func expandNestedComputeRouterBgpPeerRouterApplianceInstance(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + f, err := tpgresource.ParseZonalFieldValue("instances", v.(string), "project", "zone", d, config, true) + if err != nil { + return nil, fmt.Errorf("Invalid value for router_appliance_instance: %s", err) + } + return f.RelativeLink(), nil +} + +func expandNestedComputeRouterBgpPeerEnableIpv6(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerEnableIpv4(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpv4NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpv4NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerIpv6NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedComputeRouterBgpPeerPeerIpv6NexthopAddress(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedComputeRouterBgpPeer(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["bgpPeers"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value bgpPeers. Actual value: %v", v) + } + + _, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceComputeRouterBgpPeerFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName, err := expandNestedComputeRouterBgpPeerName(d.Get("name"), d, meta.(*transport_tpg.Config)) + if err != nil { + return -1, nil, err + } + expectedFlattenedName := flattenNestedComputeRouterBgpPeerName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + item := itemRaw.(map[string]interface{}) + + itemName := flattenNestedComputeRouterBgpPeerName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + log.Printf("[DEBUG] Found item for resource %q: %#v)", d.Id(), item) + return idx, item, nil + } + return -1, nil, nil +} + +// PatchCreateEncoder handles creating request data to PATCH parent resource +// with list including new object. +func resourceComputeRouterBgpPeerPatchCreateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currBgpPeerItems, currMd5AuthenticationKeys, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + _, found, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currBgpPeerItems) + if err != nil { + return nil, err + } + + // Return error if item already created. + if found != nil { + return nil, fmt.Errorf("Unable to create RouterBgpPeer, existing object already found: %+v", found) + } + + var res map[string]interface{} + + // Return list with the resource to create appended + val, ok := obj["md5AuthenticationKey"] + + if ok { + kvp := val.(map[string]interface{}) + res = map[string]interface{}{ + "bgpPeers": append(currBgpPeerItems, obj), + "md5AuthenticationKeys": append(currMd5AuthenticationKeys, kvp), + } + + //we need to remove this key from the object as it not a part of bgpRouterPeer + delete(obj, "md5AuthenticationKey") + } else { + res = map[string]interface{}{ + "bgpPeers": append(currBgpPeerItems, obj), + } + } + + return res, nil +} + +// PatchUpdateEncoder handles creating request data to PATCH parent resource +// with list including updated object. +func resourceComputeRouterBgpPeerPatchUpdateEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + bgpPeerItems, md5AuthenticationKeys, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + log.Printf("[DEBUG] inside UpdateEncoder - bgpPeerItems: %+v, md5AuthenticationKeys - %+v", bgpPeerItems, md5AuthenticationKeys) + log.Printf("[DEBUG] inside UpdateEncoder - obj: %+v", obj) + + idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, bgpPeerItems) + if err != nil { + return nil, err + } + + // Return error if item to update does not exist. + if item == nil { + return nil, fmt.Errorf("Unable to update RouterBgpPeer %q - not found in list", d.Id()) + } + + var md5AuthenticationKey map[string]interface{} + var deletedKeyName interface{} + var wasPresent bool + val, ok := obj["md5AuthenticationKey"] + if ok { + md5AuthenticationKey = val.(map[string]interface{}) + //remove key from this map as it not needed here + delete(obj, "md5AuthenticationKey") + } else { + //check if key used to be present + deletedKeyName, wasPresent = item["md5AuthenticationKeyName"] + if wasPresent { + delete(item, "md5AuthenticationKeyName") + } + } + + //merging the bgpRouterPeer objects + for k, v := range obj { + item[k] = v + } + log.Printf("[DEBUG] UpdateEncoder - sending new object to be updated %#v", item) + + //merging the md5AuthenticationKeys objects + isKeyNew := true + log.Printf("[DEBUG] UpdateEncoder - currentMd5AuthenticationKeys %#v", md5AuthenticationKeys) + for i, val := range md5AuthenticationKeys { + key := val.(map[string]interface{}) + if key["name"] == md5AuthenticationKey["name"] { + key = md5AuthenticationKey + md5AuthenticationKeys[i] = key + isKeyNew = false + } + + if key["name"] == deletedKeyName { + //if the key was deleted, then remove it from the parent router object as well + md5AuthenticationKeys = append(md5AuthenticationKeys[:i], md5AuthenticationKeys[i+1:]...) + log.Printf("[DEBUG] deleting unused key from parent object ,md5AuthenticationKeys - %+v", md5AuthenticationKeys) + } + + } + bgpPeerItems[idx] = item + if isKeyNew { + md5AuthenticationKeys = append(md5AuthenticationKeys, md5AuthenticationKey) + } + + res := map[string]interface{}{ + "bgpPeers": bgpPeerItems, + "md5AuthenticationKeys": md5AuthenticationKeys, + } + + return res, nil +} + +// PatchDeleteEncoder handles creating request data to PATCH parent resource +// with list excluding object to delete. +func resourceComputeRouterBgpPeerPatchDeleteEncoder(d *schema.ResourceData, meta interface{}, obj map[string]interface{}) (map[string]interface{}, error) { + currItems, md5AuthenticationKeys, err := resourceComputeRouterBgpPeerListForPatch(d, meta) + if err != nil { + return nil, err + } + + idx, item, err := resourceComputeRouterBgpPeerFindNestedObjectInList(d, meta, currItems) + if err != nil { + return nil, err + } + if item == nil { + // Spoof 404 error for proper handling by Delete (i.e. no-op) + return nil, tpgresource.Fake404("nested", "ComputeRouterBgpPeer") + } + + //if the removed bgp peer has some md5AuthKey associated with it, then remove the key from the router parent object as well + keyName := item["md5AuthenticationKeyName"] + for i, val := range md5AuthenticationKeys { + key := val.(map[string]interface{}) + if key["name"] == keyName { + md5AuthenticationKeys = append(md5AuthenticationKeys[:i], md5AuthenticationKeys[i+1:]...) + } + } + + updatedItems := append(currItems[:idx], currItems[idx+1:]...) + res := map[string]interface{}{ + "bgpPeers": updatedItems, + "md5AuthenticationKeys": md5AuthenticationKeys, + } + + return res, nil +} + +// ListForPatch handles making API request to get parent resource and +// extracting list of objects. +func resourceComputeRouterBgpPeerListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, []interface{}, error) { + config := meta.(*transport_tpg.Config) + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}") + if err != nil { + return nil, nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: project, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return nil, nil, err + } + + var v interface{} + var ok bool + var ls, keys []interface{} + var lsOk, keysOk bool + + v, ok = res["bgpPeers"] + if ok && v != nil { + ls, lsOk = v.([]interface{}) + if !lsOk { + return nil, nil, fmt.Errorf(`expected list for nested field "bgpPeers"`) + } + } + v, ok = res["md5AuthenticationKeys"] + if ok && v != nil { + keys, keysOk = v.([]interface{}) + if !keysOk { + return nil, nil, fmt.Errorf(`expected list for nested field "md5AuthenticationKeys"`) + } + } + + if lsOk && keysOk { + return ls, keys, nil + } else if !lsOk && keysOk { + return nil, keys, nil + } else if lsOk && !keysOk { + return ls, nil, nil + } else { + return nil, nil, nil + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer_test.go.tmpl new file mode 100644 index 000000000000..b61ffbcca595 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_peer_test.go.tmpl @@ -0,0 +1,187 @@ +package compute_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccComputeRouterBgpPeer_routerPeerRouterAppliance(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterBgpPeerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterBgpPeer_routerPeerRouterAppliance(context), + }, + { + ResourceName: "google_compute_router_peer.peer", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"router_appliance_instance", "router", "region"}, + }, + }, + }) +} + +func testAccComputeRouterBgpPeer_routerPeerRouterAppliance(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "network" { + name = "tf-test-my-router%{random_suffix}-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "tf-test-my-router%{random_suffix}-sub" + network = google_compute_network.network.self_link + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" +} + +resource "google_compute_address" "addr_intf" { + name = "tf-test-my-router%{random_suffix}-addr-intf" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_intf_redundant" { + name = "tf-test-my-router%{random_suffix}-addr-intf-red" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_address" "addr_peer" { + name = "tf-test-my-router%{random_suffix}-addr-peer" + region = google_compute_subnetwork.subnetwork.region + subnetwork = google_compute_subnetwork.subnetwork.id + address_type = "INTERNAL" +} + +resource "google_compute_instance" "instance" { + name = "router-appliance" + zone = "us-central1-a" + machine_type = "e2-medium" + can_ip_forward = true + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network_ip = google_compute_address.addr_peer.address + subnetwork = google_compute_subnetwork.subnetwork.self_link + } +} + +resource "google_network_connectivity_hub" "hub" { + name = "tf-test-my-router%{random_suffix}-hub" +} + +resource "google_network_connectivity_spoke" "spoke" { + name = "tf-test-my-router%{random_suffix}-spoke" + location = google_compute_subnetwork.subnetwork.region + hub = google_network_connectivity_hub.hub.id + + linked_router_appliance_instances { + instances { + virtual_machine = google_compute_instance.instance.self_link + ip_address = google_compute_address.addr_peer.address + } + site_to_site_data_transfer = false + } +} + +resource "google_compute_router" "router" { + name = "tf-test-my-router%{random_suffix}-router" + region = google_compute_subnetwork.subnetwork.region + network = google_compute_network.network.self_link + bgp { + asn = 64514 + } +} + +resource "google_compute_router_interface" "interface_redundant" { + name = "tf-test-my-router%{random_suffix}-intf-red" + region = google_compute_router.router.region + router = google_compute_router.router.name + subnetwork = google_compute_subnetwork.subnetwork.self_link + private_ip_address = google_compute_address.addr_intf_redundant.address +} + +resource "google_compute_router_interface" "interface" { + name = "tf-test-my-router%{random_suffix}-intf" + region = google_compute_router.router.region + router = google_compute_router.router.name + subnetwork = google_compute_subnetwork.subnetwork.self_link + private_ip_address = google_compute_address.addr_intf.address + redundant_interface = google_compute_router_interface.interface_redundant.name +} + +resource "google_compute_router_peer" "peer" { + name = "tf-test-my-router-peer%{random_suffix}" + router = google_compute_router.router.name + region = google_compute_router.router.region + interface = google_compute_router_interface.interface.name + router_appliance_instance = google_compute_instance.instance.self_link + peer_asn = 65513 + peer_ip_address = google_compute_address.addr_peer.address +} +`, context) +} + +func testAccCheckComputeRouterBgpPeerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_router_peer" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/routers/{{"{{"}}router{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ComputeRouterBgpPeer still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_router_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_test.go new file mode 100644 index 000000000000..9bb8070b14b7 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_router_test.go @@ -0,0 +1,340 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeRouter_basic(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-%s", testId) + resourceRegion := "europe-west1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterBasic(routerName, resourceRegion), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouter_noRegion(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-%s", testId) + providerRegion := "us-central1" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterNoRegion(routerName, providerRegion), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouter_full(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-%s", testId) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterFull(routerName), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouter_update(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-%s", testId) + region := envvar.GetTestRegionFromEnv() + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterBasic(routerName, region), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterFull(routerName), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterBasic(routerName, region), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouter_updateAddRemoveBGP(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-%s", testId) + region := envvar.GetTestRegionFromEnv() + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouterBasic(routerName, region), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouter_noBGP(routerName, region), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouterBasic(routerName, region), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeRouter_addAndUpdateIdentifierRangeBgp(t *testing.T) { + t.Parallel() + + testId := acctest.RandString(t, 10) + routerName := fmt.Sprintf("tf-test-router-%s", testId) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeRouterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeRouter_addIdentifierRangeBgp(routerName), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeRouter_updateIdentifierRangeBgp(routerName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_router.foobar", "bgp.0.identifier_range", "169.254.8.8/30"), + ), + }, + { + ResourceName: "google_compute_router.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeRouterBasic(routerName, resourceRegion string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "%s" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.name + bgp { + asn = 4294967294 + } +} +`, routerName, routerName, resourceRegion, routerName) +} + +func testAccComputeRouterNoRegion(routerName, providerRegion string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "%s" +} + +resource "google_compute_router" "foobar" { + name = "%s" + network = google_compute_network.foobar.name + bgp { + asn = 64514 + } +} +`, routerName, routerName, providerRegion, routerName) +} + +func testAccComputeRouterFull(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_router" "foobar" { + name = "%s" + network = google_compute_network.foobar.name + bgp { + asn = 64514 + advertise_mode = "CUSTOM" + advertised_groups = ["ALL_SUBNETS"] + advertised_ip_ranges { + range = "1.2.3.4" + } + advertised_ip_ranges { + range = "6.7.0.0/16" + } + keepalive_interval = 25 + } +} +`, routerName, routerName) +} + +func testAccComputeRouter_noBGP(routerName, resourceRegion string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "foobar" { + name = "%s-subnet" + network = google_compute_network.foobar.self_link + ip_cidr_range = "10.0.0.0/16" + region = "%s" +} + +resource "google_compute_router" "foobar" { + name = "%s" + region = google_compute_subnetwork.foobar.region + network = google_compute_network.foobar.name +} +`, routerName, routerName, resourceRegion, routerName) +} + +func testAccComputeRouter_addIdentifierRangeBgp(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_router" "foobar" { + name = "%s" + network = google_compute_network.foobar.name + bgp { + asn = 64514 + advertise_mode = "CUSTOM" + advertised_groups = ["ALL_SUBNETS"] + advertised_ip_ranges { + range = "1.2.3.4" + } + advertised_ip_ranges { + range = "6.7.0.0/16" + } + identifier_range = "169.254.8.8/29" + keepalive_interval = 25 + } +} +`, routerName, routerName) +} + +func testAccComputeRouter_updateIdentifierRangeBgp(routerName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s-net" + auto_create_subnetworks = false +} + +resource "google_compute_router" "foobar" { + name = "%s" + network = google_compute_network.foobar.name + bgp { + asn = 64514 + advertise_mode = "CUSTOM" + advertised_groups = ["ALL_SUBNETS"] + advertised_ip_ranges { + range = "1.2.3.4" + } + advertised_ip_ranges { + range = "6.7.0.0/16" + } + identifier_range = "169.254.8.8/30" + keepalive_interval = 25 + } +} +`, routerName, routerName) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy.go.tmpl new file mode 100644 index 000000000000..9ca599e2b542 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy.go.tmpl @@ -0,0 +1,1705 @@ +package compute + +import ( + "context" + "fmt" + "log" + "strings" + + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeSecurityPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSecurityPolicyCreate, + Read: resourceComputeSecurityPolicyRead, + Update: resourceComputeSecurityPolicyUpdate, + Delete: resourceComputeSecurityPolicyDelete, + Importer: &schema.ResourceImporter{ + State: resourceSecurityPolicyStateImporter, + }, + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + rulesCustomizeDiff, + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateGCEName, + Description: `The name of the security policy.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: `An optional description of this security policy. Max size is 2048.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The type indicates the intended use of the security policy. CLOUD_ARMOR - Cloud Armor backend security policies can be configured to filter incoming HTTP requests targeting backend services. They filter requests before they hit the origin servers. CLOUD_ARMOR_EDGE - Cloud Armor edge security policies can be configured to filter incoming HTTP requests targeting backend services (including Cloud CDN-enabled) as well as backend buckets (Cloud Storage). They filter requests before the request is served from Google's cache.`, + ValidateFunc: validation.StringInSlice([]string{"CLOUD_ARMOR", "CLOUD_ARMOR_EDGE", "CLOUD_ARMOR_INTERNAL_SERVICE"}, false), + }, + + "rule": { + Type: schema.TypeSet, + Optional: true, + Computed: true, // If no rules are set, a default rule is added + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeString, + Required: true, + Description: `Action to take when match matches the request.`, + }, + + "priority": { + Type: schema.TypeInt, + Required: true, + Description: `An unique positive integer indicating the priority of evaluation for a rule. Rules are evaluated from highest priority (lowest numerically) to lowest priority (highest numerically) in order.`, + }, + + "match": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "src_ip_ranges": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 10, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Set of IP addresses or ranges (IPV4 or IPV6) in CIDR notation to match against inbound traffic. There is a limit of 10 IP ranges per rule. A value of '*' matches all IPs (can be used to override the default behavior).`, + }, + }, + }, + Description: `The configuration options available when specifying versioned_expr. This field must be specified if versioned_expr is specified and cannot be specified if versioned_expr is not specified.`, + }, + + "versioned_expr": { + Type: schema.TypeString, + Optional: true, + Default: "", + ValidateFunc: validation.StringInSlice([]string{"SRC_IPS_V1"}, false), + Description: `Predefined rule expression. If this field is specified, config must also be specified. Available options: SRC_IPS_V1: Must specify the corresponding src_ip_ranges field in config.`, + }, + + "expr": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + Description: `Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported.`, + }, + // These fields are not yet supported (Issue hashicorp/terraform-provider-google#4497: mbang) + // "title": { + // Type: schema.TypeString, + // Optional: true, + // }, + // "description": { + // Type: schema.TypeString, + // Optional: true, + // }, + // "location": { + // Type: schema.TypeString, + // Optional: true, + // }, + }, + }, + Description: `User defined CEVAL expression. A CEVAL expression is used to specify match criteria such as origin.ip, source.region_code and contents in the request header.`, + }, + + "expr_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr').`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recaptcha_options": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action_token_site_keys": { + Type: schema.TypeList, + Optional: true, + Description: `A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "session_token_site_keys": { + Type: schema.TypeList, + Optional: true, + Description: `A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created.`, + MinItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Description: `A match condition that incoming traffic is evaluated against. If it evaluates to true, the corresponding action is enforced.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "preconfigured_waf_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exclusion": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "request_header": resourceComputeSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParamsSchema( + `Request header whose value will be excluded from inspection during preconfigured WAF evaluation.`, + ), + + "request_cookie": resourceComputeSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParamsSchema( + `Request cookie whose value will be excluded from inspection during preconfigured WAF evaluation.`, + ), + + "request_uri": resourceComputeSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParamsSchema( + `Request URI from the request line to be excluded from inspection during preconfigured WAF evaluation. When specifying this field, the query or fragment part should be excluded.`, + ), + + "request_query_param": resourceComputeSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParamsSchema( + `Request query parameter whose value will be excluded from inspection during preconfigured WAF evaluation. Note that the parameter can be in the query string or in the POST body.`, + ), + + "target_rule_set": { + Type: schema.TypeString, + Required: true, + Description: `Target WAF rule set to apply the preconfigured WAF exclusion.`, + }, + + "target_rule_ids": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A list of target rule IDs under the WAF rule set to apply the preconfigured WAF exclusion. If omitted, it refers to all the rule IDs under the WAF rule set.`, + }, + }, + }, + Description: `An exclusion to apply during preconfigured WAF evaluation.`, + }, + }, + }, + Description: `Preconfigured WAF configuration to be applied for the rule. If the rule does not evaluate preconfigured WAF rules, i.e., if evaluatePreconfiguredWaf() is not used, this field will have no effect.`, + }, + {{- end }} + + "description": { + Type: schema.TypeString, + Default: "", + Optional: true, + Description: `An optional description of this rule. Max size is 64.`, + }, + + "preview": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `When set to true, the action specified above is not enforced. Stackdriver logs for requests that trigger a preview action are annotated as such.`, + }, + + "rate_limit_options": { + Type: schema.TypeList, + Optional: true, + Description: `Rate limit threshold for this security policy. Must be specified if the action is "rate_based_ban" or "throttle". Cannot be specified for any other actions.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rate_limit_threshold": { + Type: schema.TypeList, + Required: true, + Description: `Threshold at which to begin ratelimiting.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + Description: `Number of HTTP(S) requests for calculating the threshold.`, + }, + + "interval_sec": { + Type: schema.TypeInt, + Required: true, + Description: `Interval over which the threshold is computed.`, + }, + }, + }, + }, + + "conform_action": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"allow"}, false), + Description: `Action to take for requests that are under the configured rate limit threshold. Valid option is "allow" only.`, + }, + + "exceed_action": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"redirect", "deny(403)", "deny(404)", "deny(429)", "deny(502)"}, false), + Description: `Action to take for requests that are above the configured rate limit threshold, to either deny with a specified HTTP response code, or redirect to a different endpoint. Valid options are "deny()" where valid values for status are 403, 404, 429, and 502, and "redirect" where the redirect parameters come from exceedRedirectOptions below.`, + }, + + "enforce_on_key": { + Type: schema.TypeString, + Optional: true, + Description: `Determines the key to enforce the rateLimitThreshold on`, + ValidateFunc: validation.StringInSlice([]string{"ALL", "IP", "HTTP_HEADER", "XFF_IP", "HTTP_COOKIE", "HTTP_PATH", "SNI", "REGION_CODE", "TLS_JA3_FINGERPRINT", "USER_IP", ""}, false), + }, + + "enforce_on_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "enforce_on_key_configs": { + Type: schema.TypeList, + Description: `Enforce On Key Config of this security policy`, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enforce_on_key_type": { + Type: schema.TypeString, + Optional: true, + Description: `Determines the key to enforce the rate_limit_threshold on`, + ValidateFunc: validation.StringInSlice([]string{"ALL", "IP", "HTTP_HEADER", "XFF_IP", "HTTP_COOKIE", "HTTP_PATH", "SNI", "REGION_CODE", "TLS_JA3_FINGERPRINT", "USER_IP"}, false), + }, + "enforce_on_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `Rate limit key name applicable only for the following key types: HTTP_HEADER -- Name of the HTTP header whose value is taken as the key value. HTTP_COOKIE -- Name of the HTTP cookie whose value is taken as the key value.`, + }, + }, + }, + }, + {{- end }} + + "ban_threshold": { + Type: schema.TypeList, + Optional: true, + Description: `Can only be specified if the action for the rule is "rate_based_ban". If specified, the key will be banned for the configured 'banDurationSec' when the number of requests that exceed the 'rateLimitThreshold' also exceed this 'banThreshold'.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + Description: `Number of HTTP(S) requests for calculating the threshold.`, + }, + + "interval_sec": { + Type: schema.TypeInt, + Required: true, + Description: `Interval over which the threshold is computed.`, + }, + }, + }, + }, + + "ban_duration_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `Can only be specified if the action for the rule is "rate_based_ban". If specified, determines the time (in seconds) the traffic will continue to be banned by the rate limit after the rate falls below the threshold.`, + }, + + "exceed_redirect_options": { + Type: schema.TypeList, + Optional: true, + Description: `Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + Description: `Type of the redirect action.`, + ValidateFunc: validation.StringInSlice([]string{"EXTERNAL_302", "GOOGLE_RECAPTCHA"}, false), + }, + + "target": { + Type: schema.TypeString, + Optional: true, + Description: `Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA.`, + }, + }, + }, + }, + }, + }, + }, + + "redirect_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"EXTERNAL_302", "GOOGLE_RECAPTCHA"}, false), + Description: `Type of the redirect action. Available options: EXTERNAL_302: Must specify the corresponding target field in config. GOOGLE_RECAPTCHA: Cannot specify target field in config.`, + }, + + "target": { + Type: schema.TypeString, + Optional: true, + Description: `Target for the redirect action. This is required if the type is EXTERNAL_302 and cannot be specified for GOOGLE_RECAPTCHA.`, + }, + }, + }, + Description: `Parameters defining the redirect action. Cannot be specified for any other actions.`, + }, + "header_action": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Additional actions that are performed on headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "request_headers_to_adds": { + Type: schema.TypeList, + Required: true, + Description: `The list of request headers to add or overwrite if they're already present.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the header to set.`, + }, + "header_value": { + Type: schema.TypeString, + Optional: true, + Description: `The value to set the named header to.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Description: `The set of rules that belong to this policy. There must always be a default rule (rule with priority 2147483647 and match "*"). If no rules are provided when creating a security policy, a default rule with action "allow" will be added.`, + }, + + "fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `Fingerprint of this resource.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "advanced_options_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Advanced Options Config of this security policy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "json_parsing": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "STANDARD", "STANDARD_WITH_GRAPHQL"}, false), + Description: `JSON body parsing. Supported values include: "DISABLED", "STANDARD".`, + }, + "json_custom_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Custom configuration to apply the JSON parsing. Only applicable when JSON parsing is set to STANDARD.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "content_types": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A list of custom Content-Type header values to apply the JSON parsing.`, + }, + }, + }, + }, + "log_level": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"NORMAL", "VERBOSE"}, false), + Description: `Logging level. Supported values include: "NORMAL", "VERBOSE".`, + }, + "user_ip_request_headers": { + Type: schema.TypeSet, + Optional: true, + Description: `An optional list of case-insensitive request header names to use for resolving the callers client IP address.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + + "adaptive_protection_config": { + Type: schema.TypeList, + Optional: true, + Description: `Adaptive Protection Config of this security policy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "layer_7_ddos_defense_config": { + Type: schema.TypeList, + Description: `Layer 7 DDoS Defense Config of this security policy`, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable": { + Type: schema.TypeBool, + Optional: true, + Description: `If set to true, enables CAAP for L7 DDoS detection.`, + }, + "rule_visibility": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"STANDARD", "PREMIUM"}, false), + Description: `Rule visibility. Supported values include: "STANDARD", "PREMIUM".`, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "auto_deploy_config": { + Type: schema.TypeList, + Description: `Auto Deploy Config of this security policy`, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "load_threshold": { + Type: schema.TypeFloat, + Optional: true, + Description: `Identifies new attackers only when the load to the backend service that is under attack exceeds this threshold.`, + }, + "confidence_threshold": { + Type: schema.TypeFloat, + Optional: true, + Description: `Rules are only automatically deployed for alerts on potential attacks with confidence scores greater than this threshold.`, + }, + "impacted_baseline_threshold": { + Type: schema.TypeFloat, + Optional: true, + Description: `Rules are only automatically deployed when the estimated impact to baseline traffic from the suggested mitigation is below this threshold.`, + }, + "expiration_sec": { + Type: schema.TypeInt, + Optional: true, + Description: `Google Cloud Armor stops applying the action in the automatically deployed rule to an identified attacker after this duration. The rule continues to operate against new requests.`, + }, + }, + }, + }, + {{- end }} + }, + }, + }, + "recaptcha_options_config": { + Type: schema.TypeList, + Optional: true, + Description: `reCAPTCHA configuration options to be applied for the security policy.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "redirect_site_key": { + Type: schema.TypeString, + Required: true, + Description: `A field to supply a reCAPTCHA site key to be used for all the rules using the redirect action with the type of GOOGLE_RECAPTCHA under the security policy. The specified site key needs to be created from the reCAPTCHA API. The user is responsible for the validity of the specified site key. If not specified, a Google-managed site key is used.`, + }, + }, + }, + }, + }, + + UseJSONNumber: true, + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func resourceComputeSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParamsSchema(description string) *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "operator": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"EQUALS", "STARTS_WITH", "ENDS_WITH", "CONTAINS", "EQUALS_ANY"}, false), + Description: `You can specify an exact match or a partial match by using a field operator and a field value. Available options: EQUALS: The operator matches if the field value equals the specified value. STARTS_WITH: The operator matches if the field value starts with the specified value. ENDS_WITH: The operator matches if the field value ends with the specified value. CONTAINS: The operator matches if the field value contains the specified value. EQUALS_ANY: The operator matches if the field value is any value.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `A request field matching the specified value will be excluded from inspection during preconfigured WAF evaluation. The field value must be given if the field operator is not EQUALS_ANY, and cannot be given if the field operator is EQUALS_ANY.`, + }, + }, + }, + Description: description, + } +} +{{- end }} + +func rulesCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + _, n := diff.GetChange("rule") + nSet := n.(*schema.Set) + + nPriorities := map[int64]bool{} + for _, rule := range nSet.List() { + priority := int64(rule.(map[string]interface{})["priority"].(int)) + if nPriorities[priority] { + return fmt.Errorf("Two rules have the same priority, please update one of the priorities to be different.") + } + nPriorities[priority] = true + } + + return nil +} + +func resourceComputeSecurityPolicyCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + sp := d.Get("name").(string) + securityPolicy := &compute.SecurityPolicy{ + Name: sp, + Description: d.Get("description").(string), + } + + if v, ok := d.GetOk("type"); ok { + securityPolicy.Type = v.(string) + } + + if v, ok := d.GetOk("rule"); ok { + securityPolicy.Rules = expandSecurityPolicyRules(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("advanced_options_config"); ok{ + securityPolicy.AdvancedOptionsConfig = expandSecurityPolicyAdvancedOptionsConfig(v.([]interface{})) + } + + if v, ok := d.GetOk("adaptive_protection_config"); ok{ + securityPolicy.AdaptiveProtectionConfig = expandSecurityPolicyAdaptiveProtectionConfig(v.([]interface{})) + } + + log.Printf("[DEBUG] SecurityPolicy insert request: %#v", securityPolicy) + + if v, ok := d.GetOk("recaptcha_options_config"); ok{ + securityPolicy.RecaptchaOptionsConfig = expandSecurityPolicyRecaptchaOptionsConfig(v.([]interface{}), d) + } + + client := config.NewComputeClient(userAgent) + + op, err := client.SecurityPolicies.Insert(project, securityPolicy).Do() + + if err != nil { + return errwrap.Wrapf("Error creating SecurityPolicy: {{"{{"}}err{{"}}"}}", err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/global/securityPolicies/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Creating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + return resourceComputeSecurityPolicyRead(d, meta) +} + +func resourceComputeSecurityPolicyRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + sp := d.Get("name").(string) + + client := config.NewComputeClient(userAgent) + + securityPolicy, err := client.SecurityPolicies.Get(project, sp).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SecurityPolicy %q", d.Id())) + } + + if err := d.Set("name", securityPolicy.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("description", securityPolicy.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("type", securityPolicy.Type); err != nil { + return fmt.Errorf("Error setting type: %s", err) + } + if err := d.Set("rule", flattenSecurityPolicyRules(securityPolicy.Rules)); err != nil { + return err + } + if err := d.Set("fingerprint", securityPolicy.Fingerprint); err != nil { + return fmt.Errorf("Error setting fingerprint: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("self_link", tpgresource.ConvertSelfLinkToV1(securityPolicy.SelfLink)); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("advanced_options_config", flattenSecurityPolicyAdvancedOptionsConfig(securityPolicy.AdvancedOptionsConfig)); err != nil { + return fmt.Errorf("Error setting advanced_options_config: %s", err) + } + + if err := d.Set("adaptive_protection_config", flattenSecurityPolicyAdaptiveProtectionConfig(securityPolicy.AdaptiveProtectionConfig)); err != nil { + return fmt.Errorf("Error setting adaptive_protection_config: %s", err) + } + + if err := d.Set("recaptcha_options_config", flattenSecurityPolicyRecaptchaOptionConfig(securityPolicy.RecaptchaOptionsConfig)); err != nil { + return fmt.Errorf("Error setting recaptcha_options_config: %s", err) + } + + return nil +} + +func resourceComputeSecurityPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + sp := d.Get("name").(string) + + + securityPolicy := &compute.SecurityPolicy{ + Fingerprint: d.Get("fingerprint").(string), + } + + updateMask := []string{} + + if d.HasChange("type") { + securityPolicy.Type = d.Get("type").(string) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "Type") + } + + if d.HasChange("description") { + securityPolicy.Description = d.Get("description").(string) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "Description") + } + + if d.HasChange("advanced_options_config") { + securityPolicy.AdvancedOptionsConfig = expandSecurityPolicyAdvancedOptionsConfig(d.Get("advanced_options_config").([]interface{})) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdvancedOptionsConfig", "advancedOptionsConfig.jsonParsing", "advancedOptionsConfig.jsonCustomConfig", "advancedOptionsConfig.logLevel") + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "advanceOptionConfig.userIpRequestHeaders") + if len(securityPolicy.AdvancedOptionsConfig.UserIpRequestHeaders) == 0 { + // to clean this list we must send the updateMask of this field on the request. + updateMask = append(updateMask, "advanced_options_config.user_ip_request_headers") + } + } + + if d.HasChange("adaptive_protection_config") { + securityPolicy.AdaptiveProtectionConfig = expandSecurityPolicyAdaptiveProtectionConfig(d.Get("adaptive_protection_config").([]interface{})) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "AdaptiveProtectionConfig", "adaptiveProtectionConfig.layer7DdosDefenseConfig.enable", "adaptiveProtectionConfig.layer7DdosDefenseConfig.ruleVisibility") + {{- if ne $.TargetVersionName "ga" }} + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "adaptiveProtectionConfig.autoDeployConfig.loadThreshold", "adaptiveProtectionConfig.autoDeployConfig.confidenceThreshold", "adaptiveProtectionConfig.autoDeployConfig.impactedBaselineThreshold", "adaptiveProtectionConfig.autoDeployConfig.expirationSec") + {{- end }} + } + + if d.HasChange("recaptcha_options_config") { + securityPolicy.RecaptchaOptionsConfig = expandSecurityPolicyRecaptchaOptionsConfig(d.Get("recaptcha_options_config").([]interface{}), d) + securityPolicy.ForceSendFields = append(securityPolicy.ForceSendFields, "RecaptchaOptionsConfig") + } + + if len(securityPolicy.ForceSendFields) > 0 { + client := config.NewComputeClient(userAgent) + + op, err := client.SecurityPolicies.Patch(project, sp, securityPolicy).UpdateMask(strings.Join(updateMask, ",")).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + {{ if eq $.TargetVersionName `ga` }} + if d.HasChange("rule") { + o, n := d.GetChange("rule") + oSet := o.(*schema.Set) + nSet := n.(*schema.Set) + + oPriorities := map[int64]bool{} + nPriorities := map[int64]bool{} + for _, rule := range oSet.List() { + oPriorities[int64(rule.(map[string]interface{})["priority"].(int))] = true + } + + for _, rule := range nSet.List() { + priority := int64(rule.(map[string]interface{})["priority"].(int)) + nPriorities[priority] = true + if !oPriorities[priority] { + client := config.NewComputeClient(userAgent) + // If the rule is in new and its priority does not exist in old, then add it. + op, err := client.SecurityPolicies.AddRule(project, sp, expandSecurityPolicyRule(rule)).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } else if !oSet.Contains(rule) { + client := config.NewComputeClient(userAgent) + + // If the rule is in new, and its priority is in old, but its hash is different than the one in old, update it. + op, err := client.SecurityPolicies.PatchRule(project, sp, expandSecurityPolicyRule(rule)).Priority(priority).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + + for _, rule := range oSet.List() { + priority := int64(rule.(map[string]interface{})["priority"].(int)) + if !nPriorities[priority] { + client := config.NewComputeClient(userAgent) + + // If the rule's priority is in old but not new, remove it. + op, err := client.SecurityPolicies.RemoveRule(project, sp).Priority(priority).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + } + + {{- else }} + if d.HasChange("rule") { + o, n := d.GetChange("rule") + oSet := o.(*schema.Set) + nSet := n.(*schema.Set) + + oPriorities := map[int64]bool{} + nPriorities := map[int64]bool{} + oRules := make(map[int64]map[string]interface{}) + nRules := make(map[int64]map[string]interface{}) + + for _, rule := range oSet.List() { + oPriorities[int64(rule.(map[string]interface{})["priority"].(int))] = true + oRules[int64(rule.(map[string]interface{})["priority"].(int))] = rule.(map[string]interface{}) + } + + for _, rule := range nSet.List() { + nRules[int64(rule.(map[string]interface{})["priority"].(int))] = rule.(map[string]interface{}) + priority := int64(rule.(map[string]interface{})["priority"].(int)) + nPriorities[priority] = true + + if !oPriorities[priority] { + client := config.NewComputeClient(userAgent) + + // If the rule is in new and its priority does not exist in old, then add it. + op, err := client.SecurityPolicies.AddRule(project, sp, expandSecurityPolicyRule(rule)).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } else if !oSet.Contains(rule) { + + oMap := make(map[string]interface{}) + nMap := make(map[string]interface{}) + + updateMask := []string{} + + if oRules[priority]["rate_limit_options"] != nil { + for _, oValue := range oRules[priority]["rate_limit_options"].([]interface{}) { + oMap = oValue.(map[string]interface{}) + } + } + + if nRules[priority]["rate_limit_options"] != nil { + for _, nValue := range nRules[priority]["rate_limit_options"].([]interface{}) { + nMap = nValue.(map[string]interface{}) + } + } + + if fmt.Sprintf("%v", oMap["enforce_on_key"]) != fmt.Sprintf("%v", nMap["enforce_on_key"]) { + updateMask = append(updateMask, "rate_limit_options.enforce_on_key") + } + + if fmt.Sprintf("%v", oMap["enforce_on_key_configs"]) != fmt.Sprintf("%v", nMap["enforce_on_key_configs"]) { + updateMask = append(updateMask, "rate_limit_options.enforce_on_key_configs") + } + + if fmt.Sprintf("%v", oMap["enforce_on_key_name"]) != fmt.Sprintf("%v", nMap["enforce_on_key_name"]) { + updateMask = append(updateMask, "rate_limit_options.enforce_on_key_name") + } + + client := config.NewComputeClient(userAgent) + + // If the rule is in new, and its priority is in old, but its hash is different than the one in old, update it. + op, err := client.SecurityPolicies.PatchRule(project, sp, expandSecurityPolicyRule(rule)).Priority(priority).UpdateMask(strings.Join(updateMask, ",")).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + + for _, rule := range oSet.List() { + priority := int64(rule.(map[string]interface{})["priority"].(int)) + if !nPriorities[priority] { + client := config.NewComputeClient(userAgent) + + // If the rule's priority is in old but not new, remove it. + op, err := client.SecurityPolicies.RemoveRule(project, sp).Priority(priority).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating SecurityPolicy %q: {{"{{"}}err{{"}}"}}", sp), err) + } + + err = ComputeOperationWaitTime(config, op, project, fmt.Sprintf("Updating SecurityPolicy %q", sp), userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + } + } + {{- end }} + + return resourceComputeSecurityPolicyRead(d, meta) +} + +func resourceComputeSecurityPolicyDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + client := config.NewComputeClient(userAgent) + + // Delete the SecurityPolicy + op, err := client.SecurityPolicies.Delete(project, d.Get("name").(string)).Do() + if err != nil { + return errwrap.Wrapf("Error deleting SecurityPolicy: {{"{{"}}err{{"}}"}}", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Deleting SecurityPolicy", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + + d.SetId("") + return nil +} + +func expandSecurityPolicyRules(configured []interface{}) []*compute.SecurityPolicyRule { + rules := make([]*compute.SecurityPolicyRule, 0, len(configured)) + for _, raw := range configured { + rules = append(rules, expandSecurityPolicyRule(raw)) + } + return rules +} + +func expandSecurityPolicyRule(raw interface{}) *compute.SecurityPolicyRule { + data := raw.(map[string]interface{}) + return &compute.SecurityPolicyRule{ + Description: data["description"].(string), + Priority: int64(data["priority"].(int)), + Action: data["action"].(string), + Preview: data["preview"].(bool), + Match: expandSecurityPolicyMatch(data["match"].([]interface{})), + {{- if ne $.TargetVersionName "ga" }} + PreconfiguredWafConfig: expandSecurityPolicyPreconfiguredWafConfig(data["preconfigured_waf_config"].([]interface{})), + {{- end }} + RateLimitOptions: expandSecurityPolicyRuleRateLimitOptions(data["rate_limit_options"].([]interface{})), + RedirectOptions: expandSecurityPolicyRuleRedirectOptions(data["redirect_options"].([]interface{})), + HeaderAction: expandSecurityPolicyRuleHeaderAction(data["header_action"].([]interface{})), + ForceSendFields: []string{"Description", "Preview"}, + } +} + +func expandSecurityPolicyMatch(configured []interface{}) *compute.SecurityPolicyRuleMatcher { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleMatcher{ + VersionedExpr: data["versioned_expr"].(string), + Config: expandSecurityPolicyMatchConfig(data["config"].([]interface{})), + Expr: expandSecurityPolicyMatchExpr(data["expr"].([]interface{})), + ExprOptions: expandSecurityPolicyMatchExprOptions(data["expr_options"].([]interface{})), + } +} + +func expandSecurityPolicyMatchConfig(configured []interface{}) *compute.SecurityPolicyRuleMatcherConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleMatcherConfig{ + SrcIpRanges: tpgresource.ConvertStringArr(data["src_ip_ranges"].(*schema.Set).List()), + } +} + +func expandSecurityPolicyMatchExpr(expr []interface{}) *compute.Expr { + if len(expr) == 0 || expr[0] == nil { + return nil + } + + data := expr[0].(map[string]interface{}) + return &compute.Expr{ + Expression: data["expression"].(string), + // These fields are not yet supported (Issue hashicorp/terraform-provider-google#4497: mbang) + // Title: data["title"].(string), + // Description: data["description"].(string), + // Location: data["location"].(string), + } +} + +func expandSecurityPolicyMatchExprOptions(exprOptions []interface{}) *compute.SecurityPolicyRuleMatcherExprOptions { + if len(exprOptions) == 0 || exprOptions[0] == nil { + return nil + } + + data := exprOptions[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleMatcherExprOptions{ + RecaptchaOptions: expandSecurityPolicyMatchExprOptionsRecaptchaOptions(data["recaptcha_options"].([]interface{})), + } +} + +func expandSecurityPolicyMatchExprOptionsRecaptchaOptions(recaptchaOptions []interface{}) *compute.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions { + if len(recaptchaOptions) == 0 || recaptchaOptions[0] == nil { + return nil + } + + data := recaptchaOptions[0].(map[string]interface{}) + + actionTokenKeysInterface := data["action_token_site_keys"].([]interface{}) + actionTokenKeys := make([]string, len(actionTokenKeysInterface)) + for i, v := range actionTokenKeysInterface { + actionTokenKeys[i] = v.(string) + } + + sessionTokenKeysInterface := data["session_token_site_keys"].([]interface{}) + sessionTokenKeys := make([]string, len(sessionTokenKeysInterface)) + for i, v := range sessionTokenKeysInterface { + sessionTokenKeys[i] = v.(string) + } + + return &compute.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions{ + ActionTokenSiteKeys: actionTokenKeys, + SessionTokenSiteKeys: sessionTokenKeys, + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandSecurityPolicyPreconfiguredWafConfig(configured []interface{}) *compute.SecurityPolicyRulePreconfiguredWafConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRulePreconfiguredWafConfig{ + Exclusions: expandSecurityPolicyRulePreconfiguredWafConfigExclusions(data["exclusion"].([]interface{})), + } +} + +func expandSecurityPolicyRulePreconfiguredWafConfigExclusions(configured []interface{}) []*compute.SecurityPolicyRulePreconfiguredWafConfigExclusion { + exclusions := make([]*compute.SecurityPolicyRulePreconfiguredWafConfigExclusion, 0, len(configured)) + for _, raw := range configured { + exclusions = append(exclusions, expandSecurityPolicyRulePreconfiguredWafConfigExclusion(raw)) + } + return exclusions +} + +func expandSecurityPolicyRulePreconfiguredWafConfigExclusion(raw interface{}) *compute.SecurityPolicyRulePreconfiguredWafConfigExclusion { + data := raw.(map[string]interface{}) + return &compute.SecurityPolicyRulePreconfiguredWafConfigExclusion{ + RequestHeadersToExclude: expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams(data["request_header"].([]interface{})), + RequestCookiesToExclude: expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams(data["request_cookie"].([]interface{})), + RequestUrisToExclude: expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams(data["request_uri"].([]interface{})), + RequestQueryParamsToExclude: expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams(data["request_query_param"].([]interface{})), + TargetRuleSet: data["target_rule_set"].(string), + TargetRuleIds: tpgresource.ConvertStringArr(data["target_rule_ids"].(*schema.Set).List()), + } +} + +func expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams(configured []interface{}) []*compute.SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams { + params := make([]*compute.SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams, 0, len(configured)) + for _, raw := range configured { + params = append(params, expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParam(raw)) + } + return params +} + +func expandSecurityPolicyRulePreconfiguredWafConfigExclusionFieldParam(raw interface{}) *compute.SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams { + data := raw.(map[string]interface{}) + return &compute.SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams{ + Op: data["operator"].(string), + Val: data["value"].(string), + } +} +{{- end }} + +func flattenSecurityPolicyRules(rules []*compute.SecurityPolicyRule) []map[string]interface{} { + rulesSchema := make([]map[string]interface{}, 0, len(rules)) + for _, rule := range rules { + data := map[string]interface{}{ + "description": rule.Description, + "priority": rule.Priority, + "action": rule.Action, + "preview": rule.Preview, + "match": flattenMatch(rule.Match), + {{- if ne $.TargetVersionName "ga" }} + "preconfigured_waf_config": flattenPreconfiguredWafConfig(rule.PreconfiguredWafConfig), + {{- end }} + "rate_limit_options": flattenSecurityPolicyRuleRateLimitOptions(rule.RateLimitOptions), + "redirect_options": flattenSecurityPolicyRedirectOptions(rule.RedirectOptions), + "header_action": flattenSecurityPolicyRuleHeaderAction(rule.HeaderAction), + } + rulesSchema = append(rulesSchema, data) + } + return rulesSchema +} + +func flattenMatch(match *compute.SecurityPolicyRuleMatcher) []map[string]interface{} { + if match == nil { + return nil + } + + data := map[string]interface{}{ + "versioned_expr": match.VersionedExpr, + "config": flattenMatchConfig(match.Config), + "expr": flattenMatchExpr(match), + "expr_options": flattenMatchExprOptions(match.ExprOptions), + } + + return []map[string]interface{}{data} +} + +func flattenMatchConfig(conf *compute.SecurityPolicyRuleMatcherConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "src_ip_ranges": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(conf.SrcIpRanges)), + } + + return []map[string]interface{}{data} +} + +func flattenMatchExprOptions(exprOptions *compute.SecurityPolicyRuleMatcherExprOptions) []map[string]interface{} { + if exprOptions == nil { + return nil + } + + data := map[string]interface{}{ + "recaptcha_options": flattenMatchExprOptionsRecaptchaOptions(exprOptions.RecaptchaOptions), + } + + return []map[string]interface{}{data} +} + +func flattenMatchExprOptionsRecaptchaOptions(recaptchaOptions *compute.SecurityPolicyRuleMatcherExprOptionsRecaptchaOptions) []map[string]interface{} { + if recaptchaOptions == nil { + return nil + } + + data := map[string]interface{}{ + "action_token_site_keys": recaptchaOptions.ActionTokenSiteKeys, + "session_token_site_keys": recaptchaOptions.SessionTokenSiteKeys, + } + + return []map[string]interface{}{data} +} + +func flattenMatchExpr(match *compute.SecurityPolicyRuleMatcher) []map[string]interface{} { + if match.Expr == nil { + return nil + } + + data := map[string]interface{}{ + "expression": match.Expr.Expression, + // These fields are not yet supported (Issue hashicorp/terraform-provider-google#4497: mbang) + // "title": match.Expr.Title, + // "description": match.Expr.Description, + // "location": match.Expr.Location, + } + + return []map[string]interface{}{data} +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenPreconfiguredWafConfig(config *compute.SecurityPolicyRulePreconfiguredWafConfig) []map[string]interface{} { + if config == nil { + return nil + } + + data := map[string]interface{}{ + "exclusion": flattenPreconfiguredWafConfigExclusions(config.Exclusions), + } + + return []map[string]interface{}{data} +} + +func flattenPreconfiguredWafConfigExclusions(exclusions []*compute.SecurityPolicyRulePreconfiguredWafConfigExclusion) []map[string]interface{} { + exclusionsSchema := make([]map[string]interface{}, 0, len(exclusions)) + for _, exclusion := range exclusions { + data := map[string]interface{}{ + "request_header": flattenPreconfiguredWafConfigExclusionField(exclusion.RequestHeadersToExclude), + "request_cookie": flattenPreconfiguredWafConfigExclusionField(exclusion.RequestCookiesToExclude), + "request_uri": flattenPreconfiguredWafConfigExclusionField(exclusion.RequestUrisToExclude), + "request_query_param": flattenPreconfiguredWafConfigExclusionField(exclusion.RequestQueryParamsToExclude), + "target_rule_set": exclusion.TargetRuleSet, + "target_rule_ids": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(exclusion.TargetRuleIds)), + } + + exclusionsSchema = append(exclusionsSchema, data) + } + return exclusionsSchema +} + +func flattenPreconfiguredWafConfigExclusionField(fieldParams []*compute.SecurityPolicyRulePreconfiguredWafConfigExclusionFieldParams) []map[string]interface{} { + fieldSchema := make([]map[string]interface{}, 0, len(fieldParams)) + for _, field := range fieldParams { + data := map[string]interface{}{ + "operator": &field.Op, + "value": &field.Val, + } + fieldSchema = append(fieldSchema, data) + } + return fieldSchema +} +{{- end }} + +func expandSecurityPolicyAdvancedOptionsConfig(configured []interface{}) *compute.SecurityPolicyAdvancedOptionsConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyAdvancedOptionsConfig{ + JsonParsing: data["json_parsing"].(string), + JsonCustomConfig: expandSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(data["json_custom_config"].([]interface{})), + LogLevel: data["log_level"].(string), + UserIpRequestHeaders: tpgresource.ConvertStringArr(data["user_ip_request_headers"].(*schema.Set).List()), + } +} + +func flattenSecurityPolicyAdvancedOptionsConfig(conf *compute.SecurityPolicyAdvancedOptionsConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "json_parsing": conf.JsonParsing, + "json_custom_config": flattenSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(conf.JsonCustomConfig), + "log_level": conf.LogLevel, + "user_ip_request_headers": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(conf.UserIpRequestHeaders)), + } + + return []map[string]interface{}{data} +} + +func expandSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(configured []interface{}) *compute.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig { + if len(configured) == 0 || configured[0] == nil { + // If configuration is unset, return an empty JsonCustomConfig; this ensures the ContentTypes list can be cleared + return &compute.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig{} + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig{ + ContentTypes: tpgresource.ConvertStringArr(data["content_types"].(*schema.Set).List()), + } +} + +func flattenSecurityPolicyAdvancedOptionsConfigJsonCustomConfig(conf *compute.SecurityPolicyAdvancedOptionsConfigJsonCustomConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "content_types": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(conf.ContentTypes)), + } + + return []map[string]interface{}{data} +} + +func expandSecurityPolicyAdaptiveProtectionConfig(configured []interface{}) *compute.SecurityPolicyAdaptiveProtectionConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyAdaptiveProtectionConfig{ + Layer7DdosDefenseConfig: expandLayer7DdosDefenseConfig(data["layer_7_ddos_defense_config"].([]interface{})), + {{- if ne $.TargetVersionName "ga" }} + AutoDeployConfig: expandAutoDeployConfig(data["auto_deploy_config"].([]interface{})), + {{- end }} + + } +} + +func expandLayer7DdosDefenseConfig(configured []interface{}) *compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig{ + Enable: data["enable"].(bool), + RuleVisibility: data["rule_visibility"].(string), + ForceSendFields: []string{"Enable"}, + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandAutoDeployConfig(configured []interface{}) *compute.SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig{ + LoadThreshold: data["load_threshold"].(float64), + ConfidenceThreshold: data["confidence_threshold"].(float64), + ImpactedBaselineThreshold: data["impacted_baseline_threshold"].(float64), + ExpirationSec: int64(data["expiration_sec"].(int)), + } +} +{{- end }} + +func flattenSecurityPolicyAdaptiveProtectionConfig(conf *compute.SecurityPolicyAdaptiveProtectionConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "layer_7_ddos_defense_config": flattenLayer7DdosDefenseConfig(conf.Layer7DdosDefenseConfig), + {{- if ne $.TargetVersionName "ga" }} + "auto_deploy_config": flattenAutoDeployConfig(conf.AutoDeployConfig), + {{- end }} + } + + return []map[string]interface{}{data} +} + +func flattenLayer7DdosDefenseConfig(conf *compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "enable": conf.Enable, + "rule_visibility": conf.RuleVisibility, + } + + return []map[string]interface{}{data} +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenAutoDeployConfig(conf *compute.SecurityPolicyAdaptiveProtectionConfigAutoDeployConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "load_threshold": conf.LoadThreshold, + "confidence_threshold": conf.ConfidenceThreshold, + "impacted_baseline_threshold": conf.ImpactedBaselineThreshold, + "expiration_sec": conf.ExpirationSec, + } + + return []map[string]interface{}{data} +} +{{- end }} + +func expandSecurityPolicyRuleRateLimitOptions(configured []interface{}) *compute.SecurityPolicyRuleRateLimitOptions { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleRateLimitOptions{ + BanThreshold: expandThreshold(data["ban_threshold"].([]interface{})), + RateLimitThreshold: expandThreshold(data["rate_limit_threshold"].([]interface{})), + ExceedAction: data["exceed_action"].(string), + ConformAction: data["conform_action"].(string), + EnforceOnKey: data["enforce_on_key"].(string), + EnforceOnKeyName: data["enforce_on_key_name"].(string), + {{- if ne $.TargetVersionName "ga" }} + EnforceOnKeyConfigs: expandSecurityPolicyEnforceOnKeyConfigs(data["enforce_on_key_configs"].([]interface{})), + {{- end }} + BanDurationSec: int64(data["ban_duration_sec"].(int)), + ExceedRedirectOptions: expandSecurityPolicyRuleRedirectOptions(data["exceed_redirect_options"].([]interface{})), + {{- if eq $.TargetVersionName "ga" }} + ForceSendFields: []string{"EnforceOnKey", "EnforceOnKeyName"}, + {{- else }} + ForceSendFields: []string{"EnforceOnKey", "EnforceOnKeyName", "EnforceOnKeyConfigs"}, + {{- end }} + } +} + +func expandThreshold(configured []interface{}) *compute.SecurityPolicyRuleRateLimitOptionsThreshold { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleRateLimitOptionsThreshold{ + Count: int64(data["count"].(int)), + IntervalSec: int64(data["interval_sec"].(int)), + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandSecurityPolicyEnforceOnKeyConfigs(configured []interface{}) []*compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig { + params := make([]*compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig, 0, len(configured)) + + for _, raw := range configured { + params = append(params, expandSecurityPolicyEnforceOnKeyConfigsFields(raw)) + } + + return params +} + +func expandSecurityPolicyEnforceOnKeyConfigsFields(raw interface{}) *compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig { + data := raw.(map[string]interface{}) + + return &compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig{ + EnforceOnKeyType: data["enforce_on_key_type"].(string), + EnforceOnKeyName: data["enforce_on_key_name"].(string), + } +} +{{- end }} + +func flattenSecurityPolicyRuleRateLimitOptions(conf *compute.SecurityPolicyRuleRateLimitOptions) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "ban_threshold": flattenThreshold(conf.BanThreshold), + "rate_limit_threshold": flattenThreshold(conf.RateLimitThreshold), + "exceed_action": conf.ExceedAction, + "conform_action": conf.ConformAction, + "enforce_on_key": conf.EnforceOnKey, + "enforce_on_key_name": conf.EnforceOnKeyName, + {{- if ne $.TargetVersionName "ga" }} + "enforce_on_key_configs": flattenSecurityPolicyEnforceOnKeyConfigs(conf.EnforceOnKeyConfigs), + {{- end }} + "ban_duration_sec": conf.BanDurationSec, + "exceed_redirect_options": flattenSecurityPolicyRedirectOptions(conf.ExceedRedirectOptions), + } + + return []map[string]interface{}{data} +} + +func flattenThreshold(conf *compute.SecurityPolicyRuleRateLimitOptionsThreshold) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "count": conf.Count, + "interval_sec": conf.IntervalSec, + } + + return []map[string]interface{}{data} +} + +func expandSecurityPolicyRuleRedirectOptions(configured []interface{}) *compute.SecurityPolicyRuleRedirectOptions { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + return &compute.SecurityPolicyRuleRedirectOptions{ + Type: data["type"].(string), + Target: data["target"].(string), + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenSecurityPolicyEnforceOnKeyConfigs(conf []*compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) []map[string]interface{} { + if conf == nil || len(conf) == 0 { + return nil + } + + transformed := make([]map[string]interface{}, 0, len(conf)) + for _, raw := range conf { + transformed = append(transformed, flattenSecurityPolicyEnforceOnKeyConfigsFields(raw)) + } + return transformed +} + +func flattenSecurityPolicyEnforceOnKeyConfigsFields(conf *compute.SecurityPolicyRuleRateLimitOptionsEnforceOnKeyConfig) map[string]interface{} { + if conf == nil { + return nil + } + + return map[string]interface{}{ + "enforce_on_key_name": conf.EnforceOnKeyName, + "enforce_on_key_type": conf.EnforceOnKeyType, + } +} +{{- end }} + +func flattenSecurityPolicyRedirectOptions(conf *compute.SecurityPolicyRuleRedirectOptions) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "type": conf.Type, + "target": conf.Target, + } + + return []map[string]interface{}{data} +} + +func expandSecurityPolicyRecaptchaOptionsConfig(configured []interface{}, d *schema.ResourceData) *compute.SecurityPolicyRecaptchaOptionsConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + data := configured[0].(map[string]interface{}) + + return &compute.SecurityPolicyRecaptchaOptionsConfig{ + RedirectSiteKey: data["redirect_site_key"].(string), + ForceSendFields: []string{"RedirectSiteKey"}, + } +} + +func flattenSecurityPolicyRecaptchaOptionConfig(conf *compute.SecurityPolicyRecaptchaOptionsConfig) []map[string]interface{} { + if conf == nil { + return nil + } + + data := map[string]interface{}{ + "redirect_site_key": conf.RedirectSiteKey, + } + + return []map[string]interface{}{data} +} + +func expandSecurityPolicyRuleHeaderAction(configured []interface{}) *compute.SecurityPolicyRuleHttpHeaderAction { + if len(configured) == 0 || configured[0] == nil { + // If header action is unset, return an empty object; this ensures the header action can be cleared + return &compute.SecurityPolicyRuleHttpHeaderAction{} + } + + data := configured[0].(map[string]interface{}) + + return &compute.SecurityPolicyRuleHttpHeaderAction{ + RequestHeadersToAdds: expandSecurityPolicyRequestHeadersToAdds(data["request_headers_to_adds"].([]interface{})), + } +} + +func expandSecurityPolicyRequestHeadersToAdds(configured []interface{}) []*compute.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption { + transformed := make([]*compute.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption, 0, len(configured)) + + for _, raw := range configured { + transformed = append(transformed, expandSecurityPolicyRequestHeader(raw)) + } + + return transformed +} + +func expandSecurityPolicyRequestHeader(configured interface{}) *compute.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption { + data := configured.(map[string]interface{}) + + return &compute.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption{ + HeaderName: data["header_name"].(string), + HeaderValue: data["header_value"].(string), + } +} + +func flattenSecurityPolicyRuleHeaderAction(conf *compute.SecurityPolicyRuleHttpHeaderAction) []map[string]interface{} { + if conf == nil || conf.RequestHeadersToAdds == nil { + return nil + } + + transformed := map[string]interface{}{ + "request_headers_to_adds": flattenSecurityPolicyRequestHeadersToAdds(conf.RequestHeadersToAdds), + } + + return []map[string]interface{}{transformed} +} + +func flattenSecurityPolicyRequestHeadersToAdds(conf []*compute.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption) []map[string]interface{} { + if conf == nil || len(conf) == 0 { + return nil + } + + transformed := make([]map[string]interface{}, 0, len(conf)) + for _, raw := range conf { + transformed = append(transformed, flattenSecurityPolicyRequestHeader(raw)) + } + + return transformed +} + +func flattenSecurityPolicyRequestHeader(conf *compute.SecurityPolicyRuleHttpHeaderActionHttpHeaderOption) map[string]interface{} { + if conf == nil { + return nil + } + + return map[string]interface{}{ + "header_name": conf.HeaderName, + "header_value": conf.HeaderValue, + } +} + +func resourceSecurityPolicyStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/global/securityPolicies/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/global/securityPolicies/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_rule_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_rule_test.go new file mode 100644 index 000000000000..ea4386d897bb --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_rule_test.go @@ -0,0 +1,1052 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccComputeSecurityPolicyRule_basicUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_preBasicUpdate(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_postBasicUpdate(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_withRuleExpr(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRuleExpr(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_extendedUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_extPreUpdate(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_extPosUpdateSamePriority(context), + ExpectError: regexp.MustCompile("Cannot have rules with the same priorities."), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_extPosUpdate(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_withPreconfiguredWafConfig(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withPreconfiguredWafConfig_create(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withPreconfiguredWafConfig_update(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withPreconfiguredWafConfig_clear(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_compute_security_policy_rule.policy_rule", "preconfigured_waf_config.0"), + ), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_withRateLimitOptions(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptionsCreate(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptionsUpdate(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_withRateLimit_withEnforceOnKeyConfigs(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs2(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func TestAccComputeSecurityPolicyRule_EnforceOnKeyUpdates(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withoutRateLimitOptions(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyName(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKey(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKey(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyName(spName), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_withExprOptions(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withExprOptions(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicyRule_modifyExprOptions(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicyRule_withRuleExpr(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_withExprOptions(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicyRule_modifyExprOptions(context), + }, + { + ResourceName: "google_compute_security_policy_rule.policy_rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeSecurityPolicyRule_preBasicUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "basic rule pre update" + action = "allow" + priority = 100 + preview = false + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["192.168.0.0/16", "10.0.0.0/8"] + } + } +} +`, context) +} + +func testAccComputeSecurityPolicyRule_postBasicUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" + type = "CLOUD_ARMOR" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "basic rule post update" + action = "deny(403)" + priority = 100 + preview = true + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["172.16.0.0/12"] + } + } +} +`, context) +} + +func testAccComputeSecurityPolicyRule_withRuleExpr(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "basic description" + action = "allow" + priority = "2000" + match { + expr { + expression = "evaluatePreconfiguredExpr('xss-canary')" + } + } + preview = true +} +`, context) +} + +func testAccComputeSecurityPolicyRule_extPreUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "basic description" + action = "allow" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true +} +`, context) +} + +func testAccComputeSecurityPolicyRule_extPosUpdateSamePriority(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +//add this +resource "google_compute_security_policy_rule" "policy_rule2" { + security_policy = google_compute_security_policy.default.name + description = "basic description" + action = "deny(403)" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true +} + +//keep this +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "basic description" + action = "allow" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true +} +`, context) +} + +func testAccComputeSecurityPolicyRule_extPosUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +//add this +resource "google_compute_security_policy_rule" "policy_rule2" { + security_policy = google_compute_security_policy.default.name + description = "basic description" + action = "deny(403)" + priority = "1000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true +} + +//update this +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "updated description" + action = "allow" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true +} +`, context) +} + +func testAccComputeSecurityPolicyRule_withPreconfiguredWafConfig_create(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "policy" { + name = "tf-test%{random_suffix}" + description = "Global security policy - create" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "Rule with preconfiguredWafConfig - create" + action = "deny" + priority = "1000" + match { + expr { + expression = "evaluatePreconfiguredWaf('sqli-stable')" + } + } + preconfigured_waf_config { + exclusion { + request_cookie { + operator = "EQUALS_ANY" + } + request_header { + operator = "EQUALS" + value = "Referer" + } + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + request_query_param { + operator = "EQUALS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + target_rule_set = "sqli-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + target_rule_set = "xss-stable" + } + } + preview = false +} +`, context) +} + +func testAccComputeSecurityPolicyRule_withPreconfiguredWafConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "policy" { + name = "tf-test%{random_suffix}" + description = "Global security policy - update" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "Rule with preconfiguredWafConfig - update" + action = "deny" + priority = "1000" + match { + expr { + expression = "evaluatePreconfiguredWaf('rce-stable') || evaluatePreconfiguredWaf('xss-stable')" + } + } + preconfigured_waf_config { + exclusion { + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + target_rule_set = "rce-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + request_query_param { + operator = "EQUALS" + value = "description" + } + request_cookie { + operator = "CONTAINS" + value = "TokenExpired" + } + target_rule_set = "xss-stable" + target_rule_ids = [ + "owasp-crs-v030001-id941330-xss", + "owasp-crs-v030001-id941340-xss", + ] + } + } + preview = false +} +`, context) +} + +func testAccComputeSecurityPolicyRule_withPreconfiguredWafConfig_clear(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "policy" { + name = "tf-test%{random_suffix}" + description = "Global security policy - clear" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "Rule with preconfiguredWafConfig - clear" + action = "deny" + priority = "1000" + match { + expr { + expression = "evaluatePreconfiguredWaf('rce-stable') || evaluatePreconfiguredWaf('xss-stable')" + } + } + preview = false +} +`, context) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOptionsCreate(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" + } + + resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "rule create with rate limit" + priority = 101 + action = "rate_based_ban" + rate_limit_options { + rate_limit_threshold { + count = 500 + interval_sec = 10 + } + conform_action = "allow" + exceed_action = "deny(404)" + enforce_on_key = "ALL" + ban_threshold { + count = 750 + interval_sec = 180 + } + ban_duration_sec = 180 + } + match { + config { + src_ip_ranges = [ + "*" + ] + } + versioned_expr = "SRC_IPS_V1" + } + } +`, context) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOptionsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" + } + + resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "rule update with rate limit update" + priority = 101 + action = "rate_based_ban" + rate_limit_options { + rate_limit_threshold { + count = 1000 + interval_sec = 30 + } + conform_action = "allow" + exceed_action = "deny(404)" + enforce_on_key = "ALL" + ban_threshold { + count = 2000 + interval_sec = 180 + } + ban_duration_sec = 300 + } + match { + config { + src_ip_ranges = [ + "*" + ] + } + versioned_expr = "SRC_IPS_V1" + } + } +`, context) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKey(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic policy base" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "throttle rule withEnforceOnKey" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + + enforce_on_key = "IP" + + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyConfigs(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic policy base" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "throttle rule withEnforceOnKeyConfigs" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + + enforce_on_key = "" + + enforce_on_key_configs { + enforce_on_key_type = "IP" + } + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic policy base" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "throttle rule with withMultipleEnforceOnKeyConfigs" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(429)" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + + enforce_on_key = "" + + enforce_on_key_configs { + enforce_on_key_type = "HTTP_PATH" + } + + enforce_on_key_configs { + enforce_on_key_type = "HTTP_HEADER" + enforce_on_key_name = "user-agent" + } + + enforce_on_key_configs { + enforce_on_key_type = "REGION_CODE" + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOption_withMultipleEnforceOnKeyConfigs2(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic policy base" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "throttle rule withMultipleEnforceOnKeyConfigs2" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(429)" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + + enforce_on_key = "" + + enforce_on_key_configs { + enforce_on_key_type = "REGION_CODE" + } + + enforce_on_key_configs { + enforce_on_key_type = "TLS_JA3_FINGERPRINT" + } + + enforce_on_key_configs { + enforce_on_key_type = "USER_IP" + } + } +} + +`, spName) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOptions_withoutRateLimitOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic policy base" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "basic policy rule withoutRateLimitOptions" + action = "deny(403)" + priority = "100" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } +} + +`, spName) +} + +func testAccComputeSecurityPolicyRule_withRateLimitOptions_withEnforceOnKeyName(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic policy base" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.policy.name + description = "throttle rule withEnforceOnKeyName" + action = "throttle" + priority = "100" + + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + + enforce_on_key = "HTTP_HEADER" + enforce_on_key_name = "user-agent" + + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicyRule_withExprOptions(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "reCAPTCHA rule" + action = "deny(403)" + priority = "2000" + preview = true + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-01", + "placeholder-recaptcha-action-site-key-02" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1", + "placeholder-recaptcha-session-site-key-2" + ] + } + } + } +} +`, context) +} + +func testAccComputeSecurityPolicyRule_modifyExprOptions(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_security_policy" "default" { + name = "tf-test%{random_suffix}" + description = "basic global security policy" +} + +resource "google_compute_security_policy_rule" "policy_rule" { + security_policy = google_compute_security_policy.default.name + description = "modified reCAPTCHA rule" + action = "deny(403)" + priority = "2000" + preview = true + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-09", + "placeholder-recaptcha-action-site-key-08", + "placeholder-recaptcha-action-site-key-07" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1" + ] + } + } + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_test.go.tmpl new file mode 100644 index 000000000000..10a003631fc8 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_security_policy_test.go.tmpl @@ -0,0 +1,1907 @@ +package compute_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccComputeSecurityPolicy_basic(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_basic(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withRule(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRule(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withRuleExpr(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRuleExpr(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeSecurityPolicy_withPreconfiguredWafConfig(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withPreconfiguredWafConfig(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withPreconfiguredWafConfig_update(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withPreconfiguredWafConfig_clear(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeSecurityPolicy_update(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRule(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + + { + Config: testAccComputeSecurityPolicy_updateSamePriority(spName), + ExpectError: regexp.MustCompile("Two rules have the same priority, please update one of the priorities to be different."), + }, + + { + Config: testAccComputeSecurityPolicy_update(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + + { + Config: testAccComputeSecurityPolicy_withRule(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withAdvancedOptionsConfig(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_basic(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withAdvancedOptionsConfig(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + // change all AdvancedOptionConfig values. + { + Config: testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update2(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + // Swap to json_parsing = STANDARD_WITH_GRAPHQL + { + Config: testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update3(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_basic(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + + }, + }) +} + +func TestAccComputeSecurityPolicy_withAdaptiveProtection(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_enabled(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_update(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withoutAdaptiveProtection(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Can create with layer 7 protection disabled + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_disabled(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + // Can update to layer 7 protection enabled + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_enabled(spName), + }, + { + // Can update to layer 7 protection disabled again + Config: testAccComputeSecurityPolicy_withAdaptiveProtection_disabled(spName), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeSecurityPolicy_withAdaptiveProtectionAutoDeployConfig(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withAdaptiveProtectionAutoDeployConfig(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withAdaptiveProtectionAutoDeployConfig_update(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccComputeSecurityPolicy_withRateLimitOptions(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withRateLimitWithRedirectOptions(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRateLimitWithRedirectOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeSecurityPolicy_withRateLimit_withEnforceOnKeyConfigs(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withRateLimitOption_withMultipleEnforceOnKeyConfigs(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRateLimitOption_withMultipleEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRateLimitOption_withMultipleEnforceOnKeyConfigs2(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func TestAccComputeSecurityPolicy_EnforceOnKeyUpdates(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withoutRateLimitOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKeyName(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKey(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKeyConfigs(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKey(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKeyName(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +{{ end }} + +func TestAccComputeSecurityPolicy_withRecaptchaOptionsConfig(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_basic(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRecaptchaOptionsConfig(project, spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRedirectSiteKeyUpdate(project, spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withEmptyRedirectSiteKey(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withHeadAction(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + headerName := fmt.Sprintf("tf-test-header-name-%s", acctest.RandString(t, 10)) + headerNameUpdate := fmt.Sprintf("tf-test-header-name-update-%s", acctest.RandString(t, 10)) + headerValue := fmt.Sprintf("tf-test-header-value-%s", acctest.RandString(t, 10)) + headerValueUpdate := fmt.Sprintf("tf-test-header-value-update-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withoutHeadAction(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withHeadAction(spName, headerName, headerValue), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withHeadAction(spName, headerNameUpdate, headerValueUpdate), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withMultipleHeaders(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withoutHeadAction(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withExprOptions(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withExprOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_modifyExprOptions(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRule(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withExprOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_modifyExprOptions(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeSecurityPolicy_withRecaptchaOptionsConfig(project, spName string) string { + return fmt.Sprintf(` +resource "google_recaptcha_enterprise_key" "primary" { + display_name = "test" + + labels = { + label-one = "value-one" + } + + project = "%s" + + web_settings { + integration_type = "INVISIBLE" + allow_all_domains = true + allowed_domains = ["localhost"] + } +} + +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic security policy" + type = "CLOUD_ARMOR" + + recaptcha_options_config { + redirect_site_key = google_recaptcha_enterprise_key.primary.name + } +} +`, project, spName) +} + +func testAccComputeSecurityPolicy_withRedirectSiteKeyUpdate(project, spName string) string { + return fmt.Sprintf(` +resource "google_recaptcha_enterprise_key" "primary1" { + display_name = "test" + + labels = { + label-one = "value-one" + } + + project = "%s" + + web_settings { + integration_type = "INVISIBLE" + allow_all_domains = true + allowed_domains = ["localhost"] + } +} + +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic security policy" + type = "CLOUD_ARMOR" + + recaptcha_options_config { + redirect_site_key = google_recaptcha_enterprise_key.primary1.name + } +} +`, project, spName) +} + +func testAccComputeSecurityPolicy_withEmptyRedirectSiteKey(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic security policy" + type = "CLOUD_ARMOR" + + recaptcha_options_config { + redirect_site_key = "" + } +} +`, spName) +} + +func testAccCheckComputeSecurityPolicyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_security_policy" { + continue + } + + pol := rs.Primary.Attributes["name"] + + _, err := config.NewComputeClient(config.UserAgent).SecurityPolicies.Get(config.Project, pol).Do() + if err == nil { + return fmt.Errorf("Security policy %q still exists", pol) + } + } + + return nil + } +} + +func testAccComputeSecurityPolicy_basic(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "basic security policy" + type = "CLOUD_ARMOR" +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRule(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "allow" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_updateSamePriority(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + // keep this + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + // add this + rule { + action = "deny(403)" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.1.0/24"] + } + } + } + + rule { + action = "allow" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + preview = true + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_update(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + // keep this + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + // add this + rule { + action = "deny(403)" + priority = "1000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.1.0/24"] + } + } + } + + // update this + rule { + action = "allow" + priority = "2000" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["10.0.0.0/24"] + } + } + description = "updated description" + preview = false + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRuleExpr(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "allow" + priority = "2000" + match { + expr { + // These fields are not yet supported (Issue hashicorp/terraform-provider-google#4497: mbang) + // title = "Has User" + // description = "Determines whether the request has a user account" + expression = "evaluatePreconfiguredExpr('xss-canary')" + } + } + preview = true + } +} +`, spName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeSecurityPolicy_withPreconfiguredWafConfig(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "deny" + priority = "1000" + match { + expr { + expression = "evaluatePreconfiguredWaf('sqli-stable')" + } + } + preconfigured_waf_config { + exclusion { + request_cookie { + operator = "EQUALS_ANY" + } + request_header { + operator = "EQUALS" + value = "Referer" + } + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + request_query_param { + operator = "EQUALS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + target_rule_set = "sqli-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + target_rule_set = "xss-stable" + } + } + preview = false + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withPreconfiguredWafConfig_update(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "deny" + priority = "1000" + match { + expr { + expression = "evaluatePreconfiguredWaf('rce-stable') || evaluatePreconfiguredWaf('xss-stable')" + } + } + preconfigured_waf_config { + exclusion { + request_uri { + operator = "STARTS_WITH" + value = "/admin" + } + target_rule_set = "rce-stable" + } + exclusion { + request_query_param { + operator = "CONTAINS" + value = "password" + } + request_query_param { + operator = "STARTS_WITH" + value = "freeform" + } + request_query_param { + operator = "EQUALS" + value = "description" + } + target_rule_set = "xss-stable" + target_rule_ids = [ + "owasp-crs-v030001-id941330-xss", + "owasp-crs-v030001-id941340-xss", + ] + } + } + preview = false + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withPreconfiguredWafConfig_clear(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "deny" + priority = "1000" + match { + expr { + expression = "evaluatePreconfiguredWaf('rce-stable') || evaluatePreconfiguredWaf('xss-stable')" + } + } + preconfigured_waf_config { + // ensure empty waf config // + } + preview = false + } +} +`, spName) +} +{{- end }} + +func testAccComputeSecurityPolicy_withoutHeadAction(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "allow" + priority = "1000" + match { + expr { + expression = "request.path.matches(\"/login.html\") && token.recaptcha_session.score < 0.2" + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withHeadAction(spName, headerName, headerValue string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "allow" + priority = "1000" + match { + expr { + expression = "request.path.matches(\"/login.html\") && token.recaptcha_session.score < 0.2" + } + } + + header_action { + request_headers_to_adds { + header_name = "%s" + header_value = "%s" + } + } + } +} +`, spName, headerName, headerValue) +} + +func testAccComputeSecurityPolicy_withMultipleHeaders(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "allow" + priority = "1000" + match { + expr { + expression = "request.path.matches(\"/login.html\") && token.recaptcha_session.score < 0.2" + } + } + + header_action { + request_headers_to_adds { + header_name = "reCAPTCHA-Warning" + header_value = "high" + } + + request_headers_to_adds { + header_name = "X-Hello" + header_value = "World" + } + + request_headers_to_adds { + header_name = "X-Resource" + header_value = "test" + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdvancedOptionsConfig(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + advanced_options_config { + json_parsing = "STANDARD" + json_custom_config { + content_types = [ + "application/json", + "application/vnd.api+json", + "application/vnd.collection+json", + "application/vnd.hyper+json" + ] + } + log_level = "VERBOSE" + user_ip_request_headers = [ + "True-Client-IP", + "x-custom-ip" + ] + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description changing the user_ip" + + advanced_options_config { + json_parsing = "STANDARD" + json_custom_config { + content_types = [ + "application/json", + "application/vnd.api+json", + "application/vnd.collection+json", + "application/vnd.hyper+json" + ] + } + log_level = "VERBOSE" + user_ip_request_headers = [ + "x-custom-ip", + ] + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update2(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description changing all advancedOptionsConfig values" + + advanced_options_config { + json_parsing = "DISABLED" + json_custom_config { + content_types = [ + "application/json", + "application/vnd.hyper+json" + ] + } + log_level = "NORMAL" + user_ip_request_headers = [ + ] + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdvancedOptionsConfig_update3(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description changing json_parsing to STANDARD_WITH_GRAPHQL" + + advanced_options_config { + json_parsing = "STANDARD_WITH_GRAPHQL" + json_custom_config { + content_types = [ + "application/json", + "application/vnd.hyper+json" + ] + } + log_level = "NORMAL" + user_ip_request_headers = [ + ] + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withoutAdaptiveProtection(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdaptiveProtection_disabled(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + adaptive_protection_config { + layer_7_ddos_defense_config { + enable = false + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdaptiveProtection_enabled(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + adaptive_protection_config { + layer_7_ddos_defense_config { + enable = true + rule_visibility = "STANDARD" + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdaptiveProtection_update(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + adaptive_protection_config { + layer_7_ddos_defense_config { + enable = false + rule_visibility = "STANDARD" + } + } +} +`, spName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeSecurityPolicy_withAdaptiveProtectionAutoDeployConfig(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + adaptive_protection_config { + auto_deploy_config { + load_threshold = 0.8 + confidence_threshold = 0.5 + impacted_baseline_threshold = 0.01 + expiration_sec = 7200 + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withAdaptiveProtectionAutoDeployConfig_update(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + adaptive_protection_config { + auto_deploy_config { + load_threshold = 0.9 + confidence_threshold = 0.6 + impacted_baseline_threshold = 0.03 + expiration_sec = 8000 + } + } +} +`, spName) +} +{{- end }} + +func testAccComputeSecurityPolicy_withRateLimitOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "throttle" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = [ + "0.0.0.0/32", + ] + } + } + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(403)" + enforce_on_key = "IP" + rate_limit_threshold { + count = 100 + interval_sec = 60 + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRateLimitWithRedirectOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "updated description" + + rule { + action = "allow" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + } + + rule { + action = "throttle" + priority = 100 + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = [ + "0.0.0.0/32", + ] + } + } + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + enforce_on_key = "IP" + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + rate_limit_threshold { + count = 100 + interval_sec = 60 + } + } + } +} +`, spName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKey(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "throttle rule with enforce_on_key_configs" + + rule { + action = "throttle" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule withEnforceOnKey" + + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + + enforce_on_key = "IP" + + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRateLimitOptions_withoutRateLimitOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "throttle rule with enforce_on_key_configs" + + rule { + action = "deny(403)" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule withoutRateLimitOptions" + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKeyName(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "throttle rule with enforce_on_key_configs" + + rule { + action = "throttle" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule withEnforceOnKeyName" + + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + + enforce_on_key = "HTTP_HEADER" + enforce_on_key_name = "user-agent" + + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRateLimitOptions_withEnforceOnKeyConfigs(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "throttle rule with enforce_on_key_configs" + + rule { + action = "throttle" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule withEnforceOnKeyConfigs" + + rate_limit_options { + conform_action = "allow" + exceed_action = "redirect" + + enforce_on_key = "" + + enforce_on_key_configs { + enforce_on_key_type = "IP" + } + exceed_redirect_options { + type = "EXTERNAL_302" + target = "https://www.example.com" + } + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRateLimitOption_withMultipleEnforceOnKeyConfigs(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "throttle rule with enforce_on_key_configs" + + rule { + action = "throttle" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule withMultipleEnforceOnKeyConfigs" + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(429)" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + + enforce_on_key = "" + + enforce_on_key_configs { + enforce_on_key_type = "HTTP_PATH" + } + + enforce_on_key_configs { + enforce_on_key_type = "HTTP_HEADER" + enforce_on_key_name = "user-agent" + } + + enforce_on_key_configs { + enforce_on_key_type = "REGION_CODE" + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRateLimitOption_withMultipleEnforceOnKeyConfigs2(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + description = "throttle rule with enforce_on_key_configs" + + rule { + action = "throttle" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule withMultipleEnforceOnKeyConfigs2" + + rate_limit_options { + conform_action = "allow" + exceed_action = "deny(429)" + + rate_limit_threshold { + count = 10 + interval_sec = 60 + } + + enforce_on_key = "" + + enforce_on_key_configs { + enforce_on_key_type = "REGION_CODE" + } + + enforce_on_key_configs { + enforce_on_key_type = "TLS_JA3_FINGERPRINT" + } + + enforce_on_key_configs { + enforce_on_key_type = "USER_IP" + } + } + } +} +`, spName) +} +{{- end }} + +func TestAccComputeSecurityPolicy_withRedirectOptionsRecaptcha(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRedirectOptionsRecaptcha(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withRedirectOptionsUpdate(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRedirectOptionsRecaptcha(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSecurityPolicy_withRedirectOptionsExternal(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSecurityPolicy_withRedirectOptionsExternal(t *testing.T) { + t.Parallel() + + spName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSecurityPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSecurityPolicy_withRedirectOptionsExternal(spName), + }, + { + ResourceName: "google_compute_security_policy.policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeSecurityPolicy_withRedirectOptionsRecaptcha(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "redirect" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + redirect_options { + type = "GOOGLE_RECAPTCHA" + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withRedirectOptionsExternal(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "redirect" + priority = "2147483647" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + description = "default rule" + redirect_options { + type = "EXTERNAL_302" + target = "https://example.com" + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_withExprOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + description = "default rule" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + } + + rule { + action = "deny(403)" + priority = "2000" + description = "reCAPTCHA rule" + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-01", + "placeholder-recaptcha-action-site-key-02" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1", + "placeholder-recaptcha-session-site-key-2" + ] + } + } + } + } +} +`, spName) +} + +func testAccComputeSecurityPolicy_modifyExprOptions(spName string) string { + return fmt.Sprintf(` +resource "google_compute_security_policy" "policy" { + name = "%s" + + rule { + action = "allow" + priority = "2147483647" + description = "default rule" + match { + versioned_expr = "SRC_IPS_V1" + config { + src_ip_ranges = ["*"] + } + } + } + + rule { + action = "deny(403)" + priority = "2000" + description = "reCAPTCHA rule" + match { + expr { + expression = "request.path.endsWith('RegisterWithEmail') && token.recaptcha_action.score >= 0.8 && (token.recaptcha_action.valid)" + } + expr_options { + recaptcha_options { + action_token_site_keys = [ + "placeholder-recaptcha-action-site-key-09", + "placeholder-recaptcha-action-site-key-08", + "placeholder-recaptcha-action-site-key-07" + ] + session_token_site_keys = [ + "placeholder-recaptcha-session-site-key-1" + ] + } + } + } + } +} +`, spName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_shared_vpc_service_project.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_shared_vpc_service_project.go.tmpl new file mode 100644 index 000000000000..3cda6bb02ef0 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_shared_vpc_service_project.go.tmpl @@ -0,0 +1,194 @@ +package compute + +import ( + "fmt" + "strings" + "time" + "log" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "google.golang.org/api/googleapi" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceComputeSharedVpcServiceProject() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSharedVpcServiceProjectCreate, + Read: resourceComputeSharedVpcServiceProjectRead, + Delete: resourceComputeSharedVpcServiceProjectDelete, + Update: resourceComputeSharedVpcServiceProjectUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "host_project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of a host project to associate.`, + }, + "service_project": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The ID of the project that will serve as a Shared VPC service project.`, + }, + "deletion_policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Description: `The deletion policy for the shared VPC service. Setting ABANDON allows the resource + to be abandoned rather than deleted. Possible values are: "ABANDON".`, + ValidateFunc: validation.StringInSlice([]string{"ABANDON", ""}, false), + }, + }, + UseJSONNumber: true, + } +} + +func resourceComputeSharedVpcServiceProjectCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + hostProject := d.Get("host_project").(string) + serviceProject := d.Get("service_project").(string) + + req := &compute.ProjectsEnableXpnResourceRequest{ + XpnResource: &compute.XpnResourceId{ + Id: serviceProject, + Type: "PROJECT", + }, + } + op, err := config.NewComputeClient(userAgent).Projects.EnableXpnResource(hostProject, req).Do() + if err != nil { + return err + } + err = ComputeOperationWaitTime(config, op, hostProject, "Enabling Shared VPC Resource", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + d.SetId(fmt.Sprintf("%s/%s", hostProject, serviceProject)) + + return nil +} + +func resourceComputeSharedVpcServiceProjectRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + split := strings.Split(d.Id(), "/") + if len(split) != 2 { + return fmt.Errorf("Error parsing resource ID %s", d.Id()) + } + hostProject := split[0] + serviceProject := split[1] + + associatedHostProject, err := config.NewComputeClient(userAgent).Projects.GetXpnHost(serviceProject).Do() + if err != nil { + log.Printf("[WARN] Removing shared VPC service. The service project is not associated with any host") + + d.SetId("") + return nil + } + + if hostProject != associatedHostProject.Name { + log.Printf("[WARN] Removing shared VPC service. Expected associated host project to be '%s', got '%s'", hostProject, associatedHostProject.Name) + d.SetId("") + return nil + } + + if err := d.Set("host_project", hostProject); err != nil { + return fmt.Errorf("Error setting host_project: %s", err) + } + if err := d.Set("service_project", serviceProject); err != nil { + return fmt.Errorf("Error setting service_project: %s", err) + } + + return nil +} + +func resourceComputeSharedVpcServiceProjectDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + hostProject := d.Get("host_project").(string) + serviceProject := d.Get("service_project").(string) + + if deletionPolicy := d.Get("deletion_policy"); deletionPolicy == "ABANDON" { + log.Printf("[WARN] Shared VPC service project %q deletion_policy is set to 'ABANDON', skip disabling shared VPC service project", d.Id()) + d.SetId("") + return nil + } + + if err := disableXpnResource(d, config, hostProject, serviceProject); err != nil { + // Don't fail if the service project is already disabled. + if !isDisabledXpnResourceError(err) { + return fmt.Errorf("Error disabling Shared VPC Resource %q: %s", serviceProject, err) + } + } + + return nil +} + +func disableXpnResource(d *schema.ResourceData, config *transport_tpg.Config, hostProject, project string) error { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + req := &compute.ProjectsDisableXpnResourceRequest{ + XpnResource: &compute.XpnResourceId{ + Id: project, + Type: "PROJECT", + }, + } + op, err := config.NewComputeClient(userAgent).Projects.DisableXpnResource(hostProject, req).Do() + if err != nil { + return err + } + err = ComputeOperationWaitTime(config, op, hostProject, "Disabling Shared VPC Resource", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil +} + +func isDisabledXpnResourceError(err error) bool { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 400 && len(gerr.Errors) > 0 && gerr.Errors[0].Reason == "invalidResourceUsage" { + return true + } + } + return false +} + +func resourceComputeSharedVpcServiceProjectUpdate(d *schema.ResourceData, meta interface{}) error{ + // This update method is no-op because the only updatable fields + // are state/config-only, i.e. they aren't sent in requests to the API. + return nil +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_ssl_policy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_ssl_policy_test.go.tmpl new file mode 100644 index 000000000000..37bf28f24afd --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_ssl_policy_test.go.tmpl @@ -0,0 +1,227 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeSslPolicy_update(t *testing.T) { + t.Parallel() + + var sslPolicy compute.SslPolicy + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSslUpdate1(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslPolicyExists( + t, "google_compute_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "profile", "MODERN"), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "min_tls_version", "TLS_1_0"), + ), + }, + { + ResourceName: "google_compute_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSslUpdate2(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslPolicyExists( + t, "google_compute_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "profile", "RESTRICTED"), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "min_tls_version", "TLS_1_2"), + ), + }, + { + ResourceName: "google_compute_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSslPolicy_update_to_custom(t *testing.T) { + t.Parallel() + + var sslPolicy compute.SslPolicy + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSslUpdate1(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslPolicyExists( + t, "google_compute_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "profile", "MODERN"), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "min_tls_version", "TLS_1_0"), + ), + }, + { + ResourceName: "google_compute_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSslUpdate3(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslPolicyExists( + t, "google_compute_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "profile", "CUSTOM"), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "min_tls_version", "TLS_1_1"), + ), + }, + { + ResourceName: "google_compute_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSslPolicy_update_from_custom(t *testing.T) { + t.Parallel() + + var sslPolicy compute.SslPolicy + sslPolicyName := fmt.Sprintf("test-ssl-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSslPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSslUpdate3(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslPolicyExists( + t, "google_compute_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "profile", "CUSTOM"), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "min_tls_version", "TLS_1_1"), + ), + }, + { + ResourceName: "google_compute_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSslUpdate1(sslPolicyName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSslPolicyExists( + t, "google_compute_ssl_policy.update", &sslPolicy), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "profile", "MODERN"), + resource.TestCheckResourceAttr( + "google_compute_ssl_policy.update", "min_tls_version", "TLS_1_0"), + ), + }, + { + ResourceName: "google_compute_ssl_policy.update", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckComputeSslPolicyExists(t *testing.T, n string, sslPolicy *compute.SslPolicy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).SslPolicies.Get( + project, name).Do() + if err != nil { + return fmt.Errorf("Error Reading SSL Policy %s: %s", name, err) + } + + if found.Name != name { + return fmt.Errorf("SSL Policy not found") + } + + *sslPolicy = *found + + return nil + } +} + +func testAccComputeSslUpdate1(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_ssl_policy" "update" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_0" + profile = "MODERN" +} +`, resourceName) +} + +func testAccComputeSslUpdate2(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_ssl_policy" "update" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_2" + profile = "RESTRICTED" +} +`, resourceName) +} + +func testAccComputeSslUpdate3(resourceName string) string { + return fmt.Sprintf(` +resource "google_compute_ssl_policy" "update" { + name = "%s" + description = "Generated by TF provider acceptance test" + min_tls_version = "TLS_1_1" + profile = "CUSTOM" + custom_features = ["TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384"] +} +`, resourceName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl new file mode 100644 index 000000000000..964898c6b158 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_subnetwork_test.go.tmpl @@ -0,0 +1,843 @@ +package compute_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +// Unit tests + +func TestIsShrinkageIpCidr(t *testing.T) { + cases := map[string]struct { + Old, New string + Shrinkage bool + }{ + "Expansion same network ip": { + Old: "10.0.0.0/24", + New: "10.0.0.0/16", + Shrinkage: false, + }, + "Expansion different network ip": { + Old: "10.0.1.0/24", + New: "10.0.0.0/16", + Shrinkage: false, + }, + "Shrinkage same network ip": { + Old: "10.0.0.0/16", + New: "10.0.0.0/24", + Shrinkage: true, + }, + "Shrinkage different network ip": { + Old: "10.0.0.0/16", + New: "10.1.0.0/16", + Shrinkage: true, + }, + } + + for tn, tc := range cases { + if tpgcompute.IsShrinkageIpCidr(context.Background(), tc.Old, tc.New, nil) != tc.Shrinkage { + t.Errorf("%s failed: Shrinkage should be %t", tn, tc.Shrinkage) + } + } +} + +// Acceptance tests + +func TestAccComputeSubnetwork_basic(t *testing.T) { + t.Parallel() + + var subnetwork1 compute.Subnetwork + var subnetwork2 compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetwork1Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetwork2Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetwork3Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_basic(cnName, subnetwork1Name, subnetwork2Name, subnetwork3Name), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-ref-by-url", &subnetwork1), + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-ref-by-name", &subnetwork2), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-ref-by-url", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_compute_subnetwork.network-with-private-google-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSubnetwork_update(t *testing.T) { + t.Parallel() + + var subnetwork compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_update1(cnName, "10.2.0.0/24", subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-private-google-access", &subnetwork), + ), + }, + { + // Expand IP CIDR range and update private_ip_google_access + Config: testAccComputeSubnetwork_update2(cnName, "10.2.0.0/16", subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-private-google-access", &subnetwork), + ), + }, + { + // Shrink IP CIDR range and update private_ip_google_access + Config: testAccComputeSubnetwork_update2(cnName, "10.2.0.0/24", subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-private-google-access", &subnetwork), + ), + }, + { + // Add a secondary range and enable flow logs at once + Config: testAccComputeSubnetwork_update3(cnName, "10.2.0.0/24", subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-private-google-access", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-private-google-access", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) + + if subnetwork.PrivateIpGoogleAccess { + t.Errorf("Expected PrivateIpGoogleAccess to be false, got %v", subnetwork.PrivateIpGoogleAccess) + } +} + +func TestAccComputeSubnetwork_secondaryIpRanges(t *testing.T) { + t.Parallel() + + var subnetwork compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + ), + }, + { + Config: testAccComputeSubnetwork_secondaryIpRanges_update2(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), + ), + }, + { + Config: testAccComputeSubnetwork_secondaryIpRanges_update3(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), + ), + }, + { + Config: testAccComputeSubnetwork_secondaryIpRanges_update4(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), + ), + }, + { + Config: testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists(t, "google_compute_subnetwork.network-with-private-secondary-ip-ranges", &subnetwork), + testAccCheckComputeSubnetworkHasSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update1", "192.168.10.0/24"), + testAccCheckComputeSubnetworkHasNotSecondaryIpRange(&subnetwork, "tf-test-secondary-range-update2", "192.168.11.0/24"), + ), + }, + }, + }) +} + +func TestAccComputeSubnetwork_flowLogs(t *testing.T) { + t.Parallel() + + var subnetwork compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_flowLogs(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_flowLogsUpdate1(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_flowLogsUpdate2(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_flowLogsUpdate3(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_flowLogsDelete(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSubnetwork_flowLogsMigrate(t *testing.T) { + t.Parallel() + + var subnetwork compute.Subnetwork + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_flowLogsMigrate(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_flowLogsMigrate2(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_flowLogsMigrate3(cnName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeSubnetworkExists( + t, "google_compute_subnetwork.network-with-flow-logs", &subnetwork), + ), + }, + { + ResourceName: "google_compute_subnetwork.network-with-flow-logs", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSubnetwork_ipv6(t *testing.T) { + t.Parallel() + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_ipv4(cnName, subnetworkName), + }, + { + ResourceName: "google_compute_subnetwork.subnetwork", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeSubnetwork_ipv6(cnName, subnetworkName), + }, + { + ResourceName: "google_compute_subnetwork.subnetwork", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeSubnetwork_internal_ipv6(t *testing.T) { + t.Parallel() + + cnName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + subnetworkName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeSubnetworkDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeSubnetwork_internal_ipv6(cnName, subnetworkName), + }, + { + ResourceName: "google_compute_subnetwork.subnetwork", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckComputeSubnetworkExists(t *testing.T, n string, subnetwork *compute.Subnetwork) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + region := rs.Primary.Attributes["region"] + subnet_name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).Subnetworks.Get( + config.Project, region, subnet_name).Do() + if err != nil { + return err + } + + if found.Name != subnet_name { + return fmt.Errorf("Subnetwork not found") + } + + *subnetwork = *found + + return nil + } +} + +func testAccCheckComputeSubnetworkHasSecondaryIpRange(subnetwork *compute.Subnetwork, rangeName, ipCidrRange string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, secondaryRange := range subnetwork.SecondaryIpRanges { + if secondaryRange.RangeName == rangeName { + if secondaryRange.IpCidrRange == ipCidrRange { + return nil + } + return fmt.Errorf("Secondary range %s has the wrong ip_cidr_range. Expected %s, got %s", rangeName, ipCidrRange, secondaryRange.IpCidrRange) + } + } + + return fmt.Errorf("Secondary range %s not found", rangeName) + } +} + +func testAccCheckComputeSubnetworkHasNotSecondaryIpRange(subnetwork *compute.Subnetwork, rangeName, ipCidrRange string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, secondaryRange := range subnetwork.SecondaryIpRanges { + if secondaryRange.RangeName == rangeName { + if secondaryRange.IpCidrRange == ipCidrRange { + return fmt.Errorf("Secondary range %s has the wrong ip_cidr_range. Expected %s, got %s", rangeName, ipCidrRange, secondaryRange.IpCidrRange) + } + } + } + + return nil + } +} + +func testAccComputeSubnetwork_basic(cnName, subnetwork1Name, subnetwork2Name, subnetwork3Name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-ref-by-url" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link +} + +resource "google_compute_subnetwork" "network-ref-by-name" { + name = "%s" + ip_cidr_range = "10.1.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.name +} + +resource "google_compute_subnetwork" "network-with-private-google-access" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + private_ip_google_access = true +} +`, cnName, subnetwork1Name, subnetwork2Name, subnetwork3Name) +} + +func testAccComputeSubnetwork_update1(cnName, cidrRange, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-google-access" { + name = "%s" + ip_cidr_range = "%s" + region = "us-central1" + network = google_compute_network.custom-test.self_link + private_ip_google_access = true +} +`, cnName, subnetworkName, cidrRange) +} + +func testAccComputeSubnetwork_update2(cnName, cidrRange, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-google-access" { + name = "%s" + ip_cidr_range = "%s" + region = "us-central1" + network = google_compute_network.custom-test.self_link +} +`, cnName, subnetworkName, cidrRange) +} + +func testAccComputeSubnetwork_update3(cnName, cidrRange, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-google-access" { + name = "%s" + ip_cidr_range = "%s" + region = "us-central1" + network = google_compute_network.custom-test.self_link + + secondary_ip_range { + range_name = "tf-test-secondary-range-update" + ip_cidr_range = "192.168.10.0/24" + } +} +`, cnName, subnetworkName, cidrRange) +} + +func testAccComputeSubnetwork_secondaryIpRanges_update1(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_secondaryIpRanges_update2(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } + secondary_ip_range { + range_name = "tf-test-secondary-range-update2" + ip_cidr_range = "192.168.11.0/24" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_secondaryIpRanges_update3(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range { + range_name = "tf-test-secondary-range-update2" + ip_cidr_range = "192.168.11.0/24" + } + secondary_ip_range { + range_name = "tf-test-secondary-range-update1" + ip_cidr_range = "192.168.10.0/24" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_secondaryIpRanges_update4(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-private-secondary-ip-ranges" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + secondary_ip_range = [] +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogs(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_5_SEC" + flow_sampling = 0.5 + metadata = "INCLUDE_ALL_METADATA" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsUpdate1(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_30_SEC" + flow_sampling = 0.8 + metadata = "EXCLUDE_ALL_METADATA" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsUpdate2(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_30_SEC" + flow_sampling = 0.8 + metadata = "CUSTOM_METADATA" + metadata_fields = [ + "src_gke_details", + "dest_gke_details", + ] + filter_expr = "inIpRange(connection.src_ip, '10.0.0.0/8')" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsUpdate3(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_30_SEC" + flow_sampling = 0.8 + metadata = "INCLUDE_ALL_METADATA" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsDelete(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsMigrate(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_30_SEC" + flow_sampling = 0.6 + metadata = "INCLUDE_ALL_METADATA" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsMigrate2(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_30_SEC" + flow_sampling = 0.7 + metadata = "INCLUDE_ALL_METADATA" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_flowLogsMigrate3(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "network-with-flow-logs" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + log_config { + aggregation_interval = "INTERVAL_30_SEC" + flow_sampling = 0.8 + metadata = "INCLUDE_ALL_METADATA" + } +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_ipv4(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_ipv6(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" +} +`, cnName, subnetworkName) +} + +func testAccComputeSubnetwork_internal_ipv6(cnName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "custom-test" { + name = "%s" + auto_create_subnetworks = false + enable_ula_internal_ipv6 = true +} + +resource "google_compute_subnetwork" "subnetwork" { + name = "%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.custom-test.self_link + stack_type = "IPV4_IPV6" + ipv6_access_type = "INTERNAL" +} +`, cnName, subnetworkName) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl new file mode 100644 index 000000000000..b66d344c9d48 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl @@ -0,0 +1,343 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +{{- if eq $.TargetVersionName "ga" }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +const ( + canonicalSslCertificateTemplate = "https://www.googleapis.com/compute/v1/projects/%s/global/sslCertificates/%s" + canonicalCertificateMapTemplate = "//certificatemanager.googleapis.com/projects/%s/locations/global/certificateMaps/%s" +) + +func TestAccComputeTargetHttpsProxy_update(t *testing.T) { + t.Parallel() + + var proxy compute.TargetHttpsProxy + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetHttpsProxy_basic1(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), + testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert1-"+resourceSuffix, &proxy), + ), + }, + + { + Config: testAccComputeTargetHttpsProxy_basic2(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), + testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert1-"+resourceSuffix, &proxy), + testAccComputeTargetHttpsProxyHasSslCertificate(t, "tf-test-httpsproxy-cert2-"+resourceSuffix, &proxy), + ), + }, + }, + }) +} + +func TestAccComputeTargetHttpsProxy_certificateMap(t *testing.T) { + t.Parallel() + + var proxy compute.TargetHttpsProxy + resourceSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetHttpsProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetHttpsProxy_certificateMap(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetHttpsProxyExists( + t, "google_compute_target_https_proxy.foobar", &proxy), + testAccComputeTargetHttpsProxyDescription("Resource created for Terraform acceptance testing", &proxy), + testAccComputeTargetHttpsProxyHasCertificateMap(t, "tf-test-certmap-"+resourceSuffix, &proxy), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetHttpsProxyExists(t *testing.T, n string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).TargetHttpsProxies.Get( + config.Project, name).Do() + if err != nil { + return err + } + + if found.Name != name { + return fmt.Errorf("TargetHttpsProxy not found") + } + + *proxy = *found + + return nil + } +} + +func testAccComputeTargetHttpsProxyDescription(description string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + if proxy.Description != description { + return fmt.Errorf("Wrong description: expected '%s' got '%s'", description, proxy.Description) + } + return nil + } +} + +func testAccComputeTargetHttpsProxyHasSslCertificate(t *testing.T, cert string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + certUrl := fmt.Sprintf(canonicalSslCertificateTemplate, config.Project, cert) + + for _, sslCertificate := range proxy.SslCertificates { + if tpgresource.ConvertSelfLinkToV1(sslCertificate) == certUrl { + return nil + } + } + + return fmt.Errorf("Ssl certificate not found: expected'%s'", certUrl) + } +} + +func testAccComputeTargetHttpsProxyHasCertificateMap(t *testing.T, certificateMap string, proxy *compute.TargetHttpsProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + certificateMapUrl := fmt.Sprintf(canonicalCertificateMapTemplate, config.Project, certificateMap) + + if tpgresource.ConvertSelfLinkToV1(proxy.CertificateMap) == certificateMapUrl { + return nil + } + + return fmt.Errorf("certificate map not found: expected'%s'", certificateMapUrl) + } +} + +func testAccComputeTargetHttpsProxy_basic1(id string) string { + return fmt.Sprintf(` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [google_compute_ssl_certificate.foobar1.self_link] + ssl_policy = google_compute_ssl_policy.foobar.self_link +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_backend_service.foobar.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.foobar.self_link + } +} + +resource "google_compute_ssl_policy" "foobar" { + name = "tf-test-sslproxy-%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" +} + +resource "google_compute_ssl_certificate" "foobar1" { + name = "tf-test-httpsproxy-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_ssl_certificate" "foobar2" { + name = "tf-test-httpsproxy-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id) +} + +func testAccComputeTargetHttpsProxy_basic2(id string) string { + return fmt.Sprintf(` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + ssl_certificates = [ + google_compute_ssl_certificate.foobar1.self_link, + google_compute_ssl_certificate.foobar2.self_link, + ] + quic_override = "ENABLE" +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "boop" + path_rule { + paths = ["/*"] + service = google_compute_backend_service.foobar.self_link + } + } + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.foobar.self_link + } +} + +resource "google_compute_ssl_policy" "foobar" { + name = "tf-test-sslproxy-%s" + description = "my-description" + min_tls_version = "TLS_1_2" + profile = "MODERN" +} + +resource "google_compute_ssl_certificate" "foobar1" { + name = "tf-test-httpsproxy-cert1-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_ssl_certificate" "foobar2" { + name = "tf-test-httpsproxy-cert2-%s" + description = "very descriptive" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} +`, id, id, id, id, id, id, id) +} + +func testAccComputeTargetHttpsProxy_certificateMap(id string) string { + return fmt.Sprintf(` +resource "google_compute_target_https_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-httpsproxy-%s" + url_map = google_compute_url_map.foobar.self_link + certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.map.id}" +} + +resource "google_compute_backend_service" "foobar" { + name = "tf-test-httpsproxy-backend-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "tf-test-httpsproxy-check-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "tf-test-httpsproxy-urlmap-%s" + default_service = google_compute_backend_service.foobar.self_link +} + +resource "google_certificate_manager_certificate_map" "map" { + name = "tf-test-certmap-%s" +} + +resource "google_certificate_manager_certificate_map_entry" "map_entry" { + name = "tf-test-certmapentry-%s" + map = google_certificate_manager_certificate_map.map.name + certificates = [google_certificate_manager_certificate.certificate.id] + matcher = "PRIMARY" +} + +resource "google_certificate_manager_certificate" "certificate" { + name = "tf-test-cert-%s" + scope = "DEFAULT" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + ] + } +} + +resource "google_certificate_manager_dns_authorization" "instance" { + name = "tf-test-dnsauthz-%s" + domain = "mysite.com" +} + +`, id, id, id, id, id, id, id, id) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_instance_test.go.tmpl new file mode 100644 index 000000000000..644aef35d759 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_instance_test.go.tmpl @@ -0,0 +1,157 @@ +package compute_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeTargetInstance_withSecurityPolicy(t *testing.T) { + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeTargetInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetInstance_withSecurityPolicy(context, "google_compute_region_security_policy.regionsecuritypolicy1.self_link", true), + }, + { + ResourceName: "google_compute_target_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone"}, + }, + { + Config: testAccComputeTargetInstance_withSecurityPolicy(context, "google_compute_region_security_policy.regionsecuritypolicy2.self_link", true), + }, + { + ResourceName: "google_compute_target_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone"}, + }, + { + Config: testAccComputeTargetInstance_withSecurityPolicy(context, "\"\"", true), + }, + { + ResourceName: "google_compute_target_instance.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance", "zone"}, + }, + { + Config: testAccComputeTargetInstance_withSecurityPolicy(context, "\"\"", false), + }, + }, + }) +} + +func testAccComputeTargetInstance_withSecurityPolicy(context map[string]interface{}, policySet string, preventDestroy bool) string { + context["policy_set"] = policySet + context["lifecycle_block"] = "" + if preventDestroy { + context["lifecycle_block"] = ` + lifecycle { + prevent_destroy = true + }` + } + + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-up-pol-net%{random_suffix}" + auto_create_subnetworks = false + routing_mode = "REGIONAL" +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-up-pol-subnet%{random_suffix}" + ip_cidr_range = "10.1.2.0/24" + network = google_compute_network.default.id + private_ipv6_google_access = "DISABLE_GOOGLE_ACCESS" + purpose = "PRIVATE" + region = "southamerica-east1" + stack_type = "IPV4_ONLY" +} + +data "google_compute_image" "vmimage" { + provider = google-beta + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "target-vm" { + provider = google-beta + name = "tf-test-up-pol-target-vm%{random_suffix}" + machine_type = "e2-medium" + zone = "southamerica-east1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.vmimage.self_link + } + } + + network_interface { + network = google_compute_network.default.self_link + subnetwork = google_compute_subnetwork.default.self_link + access_config { + } + } +} + +resource "google_compute_region_security_policy" "policyddosprotection" { + provider = google-beta + region = "southamerica-east1" + name = "tf-test-up-pol-policyddos%{random_suffix}" + description = "ddos protection security policy to set target instance" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + provider = google-beta + region = "southamerica-east1" + name = "tf-test-up-pol-edgesec%{random_suffix}" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_region_security_policy" "regionsecuritypolicy1" { + provider = google-beta + name = "tf-test-up-pol-region-secpolicy1%{random_suffix}" + region = "southamerica-east1" + description = "basic security policy one for target instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy" "regionsecuritypolicy2" { + provider = google-beta + name = "tf-test-up-pol-region-secpolicy2%{random_suffix}" + region = "southamerica-east1" + description = "basic security policy two for target instance" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_target_instance" "default" { + provider = google-beta + name = "tf-test-up-pol-target-instance%{random_suffix}" + zone = "southamerica-east1-a" + instance = google_compute_instance.target-vm.id + security_policy = %{policy_set} + %{lifecycle_block} +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool.go.tmpl new file mode 100644 index 000000000000..d5617b8e3e96 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool.go.tmpl @@ -0,0 +1,590 @@ +package compute + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/googleapi" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +var instancesSelfLinkPattern = regexp.MustCompile(fmt.Sprintf(tpgresource.ZonalLinkBasePattern, "instances")) + +func ResourceComputeTargetPool() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeTargetPoolCreate, + Read: resourceComputeTargetPoolRead, + Delete: resourceComputeTargetPoolDelete, + Update: resourceComputeTargetPoolUpdate, + Importer: &schema.ResourceImporter{ + State: resourceTargetPoolStateImporter, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.DefaultProviderRegion, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `A unique name for the resource, required by GCE. Changing this forces a new resource to be created.`, + }, + + "backup_pool": { + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Description: `URL to the backup target pool. Must also set failover_ratio.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Textual description field.`, + }, + + "failover_ratio": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + Description: `Ratio (0 to 1) of failed nodes before using the backup pool (which must also be set).`, + }, + + "health_checks": { + Type: schema.TypeList, + Optional: true, + ForceNew: false, + MaxItems: 1, + Elem: &schema.Schema{ + Type: schema.TypeString, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + }, + Description: `List of zero or one health check name or self_link. Only legacy google_compute_http_health_check is supported.`, + }, + + "instances": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: false, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return canonicalizeInstanceRef(v.(string)) + }, + }, + Set: func(v interface{}) int { + return schema.HashString(canonicalizeInstanceRef(v.(string))) + }, + Description: `List of instances in the pool. They can be given as URLs, or in the form of "zone/name". Note that the instances need not exist at the time of target pool creation, so there is no need to use the Terraform interpolators to create a dependency on the instances from the target pool.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `Where the target pool resides. Defaults to project region.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "session_affinity": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "NONE", + Description: `How to distribute load. Options are "NONE" (no affinity). "CLIENT_IP" (hash of the source/dest addresses / ports), and "CLIENT_IP_PROTO" also includes the protocol (default "NONE").`, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "security_policy": { + Type: schema.TypeString, + Optional: true, + Description: `The resource URL for the security policy associated with this target pool.`, + }, + {{- end }} + }, + UseJSONNumber: true, + } +} + +func canonicalizeInstanceRef(instanceRef string) string { + // instances can also be specified in the config as a URL or / + parts := instancesSelfLinkPattern.FindStringSubmatch(instanceRef) + // parts[0] = full match + // parts[1] = project + // parts[2] = zone + // parts[3] = instance name + + if len(parts) < 4 { + return instanceRef + } + + return fmt.Sprintf("%s/%s", parts[2], parts[3]) + // return fmt.Sprintf("%s/%s/%s", parts[1], parts[2], parts[3]) +} + +// Healthchecks need to exist before being referred to from the target pool. +func convertHealthChecks(healthChecks []interface{}, d *schema.ResourceData, config *transport_tpg.Config) ([]string, error) { + if len(healthChecks) == 0 { + return []string{}, nil + } + + hc, err := tpgresource.ParseHttpHealthCheckFieldValue(healthChecks[0].(string), d, config) + if err != nil { + return nil, err + } + + return []string{hc.RelativeLink()}, nil +} + +// Instances do not need to exist yet, so we simply generate URLs. +// Instances can be full URLS or zone/name +func convertInstancesToUrls(d *schema.ResourceData, config *transport_tpg.Config, project string, names *schema.Set) ([]string, error) { + urls := make([]string, len(names.List())) + for i, nameI := range names.List() { + name := nameI.(string) + // assume that any URI will start with https:// + if strings.HasPrefix(name, "https://") { + urls[i] = name + } else { + splitName := strings.Split(name, "/") + if len(splitName) != 2 { + return nil, fmt.Errorf("Invalid instance name, require URL or zone/name: %s", name) + } else { + url, err := tpgresource.ReplaceVars(d, config, fmt.Sprintf( + "{{"{{"}}ComputeBasePath{{"}}"}}projects/%s/zones/%s/instances/%s", + project, splitName[0], splitName[1])) + if err != nil { + return nil, err + } + urls[i] = url + } + } + } + return urls, nil +} + +func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + hchkUrls, err := convertHealthChecks(d.Get("health_checks").([]interface{}), d, config) + if err != nil { + return err + } + + instanceUrls, err := convertInstancesToUrls(d, config, project, d.Get("instances").(*schema.Set)) + if err != nil { + return err + } + + // Build the parameter + tpool := &compute.TargetPool{ + BackupPool: d.Get("backup_pool").(string), + Description: d.Get("description").(string), + HealthChecks: hchkUrls, + Instances: instanceUrls, + Name: d.Get("name").(string), + SessionAffinity: d.Get("session_affinity").(string), + } + if d.Get("failover_ratio") != nil { + tpool.FailoverRatio = d.Get("failover_ratio").(float64) + } + log.Printf("[DEBUG] TargetPool insert request: %#v", tpool) + op, err := config.NewComputeClient(userAgent).TargetPools.Insert( + project, region, tpool).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 && strings.Contains(gerr.Message, "httpHealthChecks") { + return fmt.Errorf("Health check %s is not a valid HTTP health check", d.Get("health_checks").([]interface{})[0]) + } + return fmt.Errorf("Error creating TargetPool: %s", err) + } + + // It probably maybe worked, so store the ID now + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/targetPools/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = ComputeOperationWaitTime(config, op, project, "Creating Target Pool", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + {{ if ne $.TargetVersionName `ga` -}} + // security_policy isn't set by Create + if o, n := d.GetChange("security_policy"); o.(string) != n.(string) { + pol, err := tpgresource.ParseSecurityPolicyRegionalFieldValue(n.(string), d, config) + if err != nil { + return fmt.Errorf("Error parsing TargetPool security policy: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + spr := emptySecurityPolicyReference() + spr.SecurityPolicy = pol.RelativeLink() + + op, err := config.NewComputeClient(userAgent).TargetPools.SetSecurityPolicy(project, region, d.Get("name").(string), spr).Do() + if err != nil { + return fmt.Errorf("Error setting TargetPool security policy:: %s", err) + } + + waitErr := ComputeOperationWaitTime(config, op, project, "Setting TargetPool Security Policy", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + return waitErr + } + } + {{- end }} + + return resourceComputeTargetPoolRead(d, meta) +} + +func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + + d.Partial(true) + + if d.HasChange("health_checks") { + + from_, to_ := d.GetChange("health_checks") + fromUrls, err := convertHealthChecks(from_.([]interface{}), d, config) + if err != nil { + return err + } + toUrls, err := convertHealthChecks(to_.([]interface{}), d, config) + if err != nil { + return err + } + add, remove := tpgresource.CalcAddRemove(fromUrls, toUrls) + + removeReq := &compute.TargetPoolsRemoveHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(remove)), + } + for i, v := range remove { + removeReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err := config.NewComputeClient(userAgent).TargetPools.RemoveHealthCheck( + project, region, name, removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + addReq := &compute.TargetPoolsAddHealthCheckRequest{ + HealthChecks: make([]*compute.HealthCheckReference, len(add)), + } + for i, v := range add { + addReq.HealthChecks[i] = &compute.HealthCheckReference{HealthCheck: v} + } + op, err = config.NewComputeClient(userAgent).TargetPools.AddHealthCheck( + project, region, name, addReq).Do() + if err != nil { + return fmt.Errorf("Error updating health_check: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + if d.HasChange("instances") { + + old_, new_ := d.GetChange("instances") + old := old_.(*schema.Set) + new := new_.(*schema.Set) + + addUrls, err := convertInstancesToUrls(d, config, project, new.Difference(old)) + if err != nil { + return err + } + removeUrls, err := convertInstancesToUrls(d, config, project, old.Difference(new)) + if err != nil { + return err + } + + addReq := &compute.TargetPoolsAddInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(addUrls)), + } + for i, v := range addUrls { + addReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err := config.NewComputeClient(userAgent).TargetPools.AddInstance( + project, region, name, addReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + removeReq := &compute.TargetPoolsRemoveInstanceRequest{ + Instances: make([]*compute.InstanceReference, len(removeUrls)), + } + for i, v := range removeUrls { + removeReq.Instances[i] = &compute.InstanceReference{Instance: v} + } + op, err = config.NewComputeClient(userAgent).TargetPools.RemoveInstance( + project, region, name, removeReq).Do() + if err != nil { + return fmt.Errorf("Error updating instances: %s", err) + } + err = ComputeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + if d.HasChange("backup_pool") { + bpool_name := d.Get("backup_pool").(string) + tref := &compute.TargetReference{ + Target: bpool_name, + } + op, err := config.NewComputeClient(userAgent).TargetPools.SetBackup( + project, region, name, tref).Do() + if err != nil { + return fmt.Errorf("Error updating backup_pool: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Updating Target Pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + {{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("security_policy") { + sp := d.Get("security_policy").(string) + pol, err := tpgresource.ParseSecurityPolicyRegionalFieldValue(sp, d, config) + if err != nil { + return fmt.Errorf("Error parsing TargetPool security policy: %s", err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + spr := emptySecurityPolicyReference() + spr.SecurityPolicy = pol.RelativeLink() + + op, err := config.NewComputeClient(userAgent).TargetPools.SetSecurityPolicy(project, region, d.Get("name").(string), spr).Do() + if err != nil { + return fmt.Errorf("Error updating TargetPool security policy:: %s", err) + } + + waitErr := ComputeOperationWaitTime(config, op, project, "Updating TargetPool Security Policy", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + return waitErr + } + } + {{- end }} + + d.Partial(false) + + return resourceComputeTargetPoolRead(d, meta) +} + +func convertInstancesFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + instance := fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) + result = append(result, instance) + } + return result +} + +func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + tpool, err := config.NewComputeClient(userAgent).TargetPools.Get( + project, region, d.Get("name").(string)).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Target Pool %q", d.Get("name").(string))) + } + + if err := d.Set("self_link", tpool.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("backup_pool", tpool.BackupPool); err != nil { + return fmt.Errorf("Error setting backup_pool: %s", err) + } + if err := d.Set("description", tpool.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("failover_ratio", tpool.FailoverRatio); err != nil { + return fmt.Errorf("Error setting failover_ratio: %s", err) + } + if err := d.Set("health_checks", tpool.HealthChecks); err != nil { + return fmt.Errorf("Error setting health_checks: %s", err) + } + if tpool.Instances != nil { + if err := d.Set("instances", convertInstancesFromUrls(tpool.Instances)); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + } else { + if err := d.Set("instances", nil); err != nil { + return fmt.Errorf("Error setting instances: %s", err) + } + } + if err := d.Set("name", tpool.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("region", tpgresource.GetResourceNameFromSelfLink(tpool.Region)); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("session_affinity", tpool.SessionAffinity); err != nil { + return fmt.Errorf("Error setting session_affinity: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + {{- if ne $.TargetVersionName "ga" }} + if err := d.Set("security_policy", tpool.SecurityPolicy); err != nil { + return fmt.Errorf("Error setting security_policy: %s", err) + } + {{- end }} + return nil +} + +func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Delete the TargetPool + op, err := config.NewComputeClient(userAgent).TargetPools.Delete( + project, region, d.Get("name").(string)).Do() + if err != nil { + return fmt.Errorf("Error deleting TargetPool: %s", err) + } + + err = ComputeOperationWaitTime(config, op, project, "Deleting Target Pool", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceTargetPoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/regions/(?P[^/]+)/targetPools/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/targetPools/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool_test.go.tmpl new file mode 100644 index 000000000000..d1eaf50da0d4 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_pool_test.go.tmpl @@ -0,0 +1,344 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccComputeTargetPool_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetPoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetPool_basic(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetPoolExists( + t, "google_compute_target_pool.foo"), + testAccCheckComputeTargetPoolHealthCheck("google_compute_target_pool.foo", "google_compute_http_health_check.foobar"), + testAccCheckComputeTargetPoolExists( + t, "google_compute_target_pool.bar"), + testAccCheckComputeTargetPoolHealthCheck("google_compute_target_pool.bar", "google_compute_http_health_check.foobar"), + ), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeTargetPool_update(t *testing.T) { + t.Parallel() + + tpname := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + name1 := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + name2 := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetPoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Create target pool with no instances attached + Config: testAccComputeTargetPool_update(tpname, "", name1, name2), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + // Add the two instances to the pool + Config: testAccComputeTargetPool_update(tpname, + `google_compute_instance.foo.self_link, google_compute_instance.bar.self_link`, + name1, name2), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + // Reversing the order of instances or changing import format shouldn't matter + Config: testAccComputeTargetPool_update(tpname, + fmt.Sprintf(`google_compute_instance.bar.self_link, "us-central1-a/%s"`, name1), + name1, name2), + PlanOnly: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeTargetPool_withSecurityPolicy(t *testing.T) { + tpname := fmt.Sprintf("tf-tp-test-%s", acctest.RandString(t, 10)) + ddosPolicy := fmt.Sprintf("tf-tp-ddos-pol-test-%s", acctest.RandString(t, 10)) + edgeSecService := fmt.Sprintf("tf-tp-edge-sec-test-%s", acctest.RandString(t, 10)) + pol1 := fmt.Sprintf("tf-tp-pol1-test-%s", acctest.RandString(t, 10)) + pol2 := fmt.Sprintf("tf-tp-pol2-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetPoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Create target pool with no security policy attached + Config: testAccComputeTargetPool_withSecurityPolicy(ddosPolicy, edgeSecService, pol1, pol2, tpname, "\"\""), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + // Add the first security policy to the pool + Config: testAccComputeTargetPool_withSecurityPolicy(ddosPolicy, edgeSecService, pol1, pol2, tpname, + `google_compute_region_security_policy.policytargetpool1.self_link`), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + // Change to the second security policy in the pool + Config: testAccComputeTargetPool_withSecurityPolicy(ddosPolicy, edgeSecService, pol1, pol2, tpname, + `google_compute_region_security_policy.policytargetpool2.self_link`), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + // Clean the security policy from the pool + Config: testAccComputeTargetPool_withSecurityPolicy(ddosPolicy, edgeSecService, pol1, pol2, tpname, "\"\""), + }, + { + ResourceName: "google_compute_target_pool.foo", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ end }} +func testAccCheckComputeTargetPoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_target_pool" { + continue + } + + _, err := config.NewComputeClient(config.UserAgent).TargetPools.Get( + config.Project, config.Region, rs.Primary.Attributes["name"]).Do() + if err == nil { + return fmt.Errorf("TargetPool still exists") + } + } + + return nil + } +} + +func testAccCheckComputeTargetPoolExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewComputeClient(config.UserAgent).TargetPools.Get( + config.Project, config.Region, rs.Primary.Attributes["name"]).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.Attributes["name"] { + return fmt.Errorf("TargetPool not found") + } + + return nil + } +} + +func testAccCheckComputeTargetPoolHealthCheck(targetPool, healthCheck string) resource.TestCheckFunc { + return func(s *terraform.State) error { + targetPoolRes, ok := s.RootModule().Resources[targetPool] + if !ok { + return fmt.Errorf("Not found: %s", targetPool) + } + + healthCheckRes, ok := s.RootModule().Resources[healthCheck] + if !ok { + return fmt.Errorf("Not found: %s", healthCheck) + } + + hcLink := healthCheckRes.Primary.Attributes["self_link"] + if tpgresource.ConvertSelfLinkToV1(targetPoolRes.Primary.Attributes["health_checks.0"]) != hcLink { + return fmt.Errorf("Health check not set up. Expected %q to equal %q", tpgresource.ConvertSelfLinkToV1(targetPoolRes.Primary.Attributes["health_checks.0"]), hcLink) + } + + return nil + } +} + +func testAccComputeTargetPool_basic(suffix string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_http_health_check" "foobar" { + name = "healthcheck-test-%s" + host = "example.com" +} + +resource "google_compute_instance" "foobar" { + name = "tf-test-%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_target_pool" "foo" { + description = "Resource created for Terraform acceptance testing" + instances = [google_compute_instance.foobar.self_link, "us-central1-b/bar"] + name = "tpool-test-%s" + session_affinity = "CLIENT_IP_PROTO" + health_checks = [ + google_compute_http_health_check.foobar.name, + ] +} + +resource "google_compute_target_pool" "bar" { + description = "Resource created for Terraform acceptance testing" + name = "tpool-test-2-%s" + health_checks = [ + google_compute_http_health_check.foobar.self_link, + ] +} +`, suffix, suffix, suffix, suffix) +} + +func testAccComputeTargetPool_update(tpname, instances, name1, name2 string) string { + return fmt.Sprintf(` +resource "google_compute_target_pool" "foo" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + instances = [%s] +} + +resource "google_compute_instance" "foo" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network = "default" + } +} + +resource "google_compute_instance" "bar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = "debian-cloud/debian-11" + } + } + + network_interface { + network = "default" + } +} +`, tpname, instances, name1, name2) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeTargetPool_withSecurityPolicy(ddosPolicy, edgeSecService, pol1, pol2, tpname, polToSet string) string { + return fmt.Sprintf(` +resource "google_compute_region_security_policy" "policyddosprotection" { + region = "us-south1" + name = "%s" + description = "region security policy for load balancers target pool" + type = "CLOUD_ARMOR_NETWORK" + ddos_protection_config { + ddos_protection = "ADVANCED_PREVIEW" + } +} + +resource "google_compute_network_edge_security_service" "edge_sec_service" { + name = "%s" + region = "us-south1" + description = "edge security service with security policy" + security_policy = google_compute_region_security_policy.policyddosprotection.self_link +} + +resource "google_compute_region_security_policy" "policytargetpool1" { + region = "us-south1" + name = "%s" + description = "region security policy one" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_region_security_policy" "policytargetpool2" { + region = "us-south1" + name = "%s" + description = "region security policy two" + type = "CLOUD_ARMOR_NETWORK" + depends_on = [google_compute_network_edge_security_service.edge_sec_service] +} + +resource "google_compute_target_pool" "foo" { + region = "us-south1" + description = "Setting SecurityPolicy to targetPool" + name = "%s" + security_policy = %s +} +`, ddosPolicy, edgeSecService, pol1, pol2, tpname, polToSet) +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_ssl_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_ssl_proxy_test.go.tmpl new file mode 100644 index 000000000000..246af61a1fd0 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_ssl_proxy_test.go.tmpl @@ -0,0 +1,345 @@ +package compute_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func TestAccComputeTargetSslProxy_update(t *testing.T) { + target := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + sslPolicy := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + cert1 := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + cert2 := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + backend1 := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + backend2 := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + hc := fmt.Sprintf("tf-test-tssl-%s", acctest.RandString(t, 10)) + + resourceSuffix := acctest.RandString(t, 10) + var proxy compute.TargetSslProxy + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeTargetSslProxyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeTargetSslProxy_basic1(target, sslPolicy, cert1, backend1, hc), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetSslProxyExists( + t, "google_compute_target_ssl_proxy.foobar", &proxy), + testAccCheckComputeTargetSslProxyHeader(t, "NONE", &proxy), + testAccCheckComputeTargetSslProxyHasSslCertificate(t, cert1, &proxy), + ), + }, + { + Config: testAccComputeTargetSslProxy_basic2(target, sslPolicy, cert1, cert2, backend1, backend2, hc), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetSslProxyExists( + t, "google_compute_target_ssl_proxy.foobar", &proxy), + testAccCheckComputeTargetSslProxyHeader(t, "PROXY_V1", &proxy), + testAccCheckComputeTargetSslProxyHasSslCertificate(t, cert2, &proxy), + ), + }, + { + Config: testAccComputeTargetSslProxy_certificateMap1(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetSslProxyExists( + t, "google_compute_target_ssl_proxy.with_certificate_map", &proxy), + testAccCheckComputeTargetSslProxyHasCertificateMap(t, "tf-test-certmap-1-"+resourceSuffix, &proxy), + ), + }, + { + Config: testAccComputeTargetSslProxy_certificateMap2(resourceSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeTargetSslProxyExists( + t, "google_compute_target_ssl_proxy.with_certificate_map", &proxy), + testAccCheckComputeTargetSslProxyHasCertificateMap(t, "tf-test-certmap-2-"+resourceSuffix, &proxy), + ), + }, + }, + }) +} + +func testAccCheckComputeTargetSslProxyExists(t *testing.T, n string, proxy *compute.TargetSslProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).TargetSslProxies.Get( + config.Project, name).Do() + if err != nil { + return err + } + + if found.Name != name { + return fmt.Errorf("TargetSslProxy not found") + } + + *proxy = *found + + return nil + } +} + +func testAccCheckComputeTargetSslProxyHeader(t *testing.T, proxyHeader string, proxy *compute.TargetSslProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + if proxy.ProxyHeader != proxyHeader { + return fmt.Errorf("Wrong proxy header. Expected '%s', got '%s'", proxyHeader, proxy.ProxyHeader) + } + return nil + } +} + +func testAccCheckComputeTargetSslProxyHasSslCertificate(t *testing.T, cert string, proxy *compute.TargetSslProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + certURL := fmt.Sprintf(canonicalSslCertificateTemplate, config.Project, cert) + + for _, sslCertificate := range proxy.SslCertificates { + if tpgresource.ConvertSelfLinkToV1(sslCertificate) == certURL { + return nil + } + } + + return fmt.Errorf("Ssl certificate not found: expected'%s'", certURL) + } +} + +func testAccCheckComputeTargetSslProxyHasCertificateMap(t *testing.T, certificateMap string, proxy *compute.TargetSslProxy) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + wantCertMapURL := fmt.Sprintf(canonicalCertificateMapTemplate, config.Project, certificateMap) + gotCertMapURL := tpgresource.ConvertSelfLinkToV1(proxy.CertificateMap) + if wantCertMapURL != gotCertMapURL { + return fmt.Errorf("certificate map not found: got %q, want %q", gotCertMapURL, wantCertMapURL) + } + return nil + } +} + +func testAccComputeTargetSslProxy_basic1(target, sslPolicy, sslCert, backend, hc string) string { + return fmt.Sprintf(` +resource "google_compute_target_ssl_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + backend_service = google_compute_backend_service.foo.self_link + ssl_certificates = [google_compute_ssl_certificate.foo.self_link] + proxy_header = "NONE" + ssl_policy = google_compute_ssl_policy.foo.self_link +} + +resource "google_compute_ssl_policy" "foo" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + min_tls_version = "TLS_1_2" + profile = "MODERN" +} + +resource "google_compute_ssl_certificate" "foo" { + name = "%s" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_backend_service" "foo" { + name = "%s" + protocol = "SSL" + health_checks = [google_compute_health_check.zero.self_link] +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "443" + } +} +`, target, sslPolicy, sslCert, backend, hc) +} + +func testAccComputeTargetSslProxy_basic2(target, sslPolicy, sslCert1, sslCert2, backend1, backend2, hc string) string { + return fmt.Sprintf(` +resource "google_compute_target_ssl_proxy" "foobar" { + description = "Resource created for Terraform acceptance testing" + name = "%s" + backend_service = google_compute_backend_service.bar.self_link + ssl_certificates = [google_compute_ssl_certificate.bar.name] + proxy_header = "PROXY_V1" +} + +resource "google_compute_ssl_policy" "foo" { + name = "%s" + description = "Resource created for Terraform acceptance testing" + min_tls_version = "TLS_1_2" + profile = "MODERN" +} + +resource "google_compute_ssl_certificate" "foo" { + name = "%s" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_ssl_certificate" "bar" { + name = "%s" + private_key = file("test-fixtures/test.key") + certificate = file("test-fixtures/test.crt") +} + +resource "google_compute_backend_service" "foo" { + name = "%s" + protocol = "SSL" + health_checks = [google_compute_health_check.zero.self_link] +} + +resource "google_compute_backend_service" "bar" { + name = "%s" + protocol = "SSL" + health_checks = [google_compute_health_check.zero.self_link] +} + +resource "google_compute_health_check" "zero" { + name = "%s" + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "443" + } +} +`, target, sslPolicy, sslCert1, sslCert2, backend1, backend2, hc) +} + +func testAccComputeTargetSslProxy_certificateMap1(id string) string { + return fmt.Sprintf(` +resource "google_compute_target_ssl_proxy" "with_certificate_map" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-ssl-proxy-%s" + backend_service = google_compute_backend_service.foo.self_link + certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.map1.id}" +} + +resource "google_compute_backend_service" "foo" { + name = "tf-test-backend-%s" + protocol = "SSL" + health_checks = [google_compute_health_check.zero.self_link] +} + +resource "google_compute_health_check" "zero" { + name = "tf-test-check-%s" + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "443" + } +} + +resource "google_certificate_manager_certificate_map" "map1" { + name = "tf-test-certmap-1-%s" +} +resource "google_certificate_manager_certificate_map_entry" "map_entry" { + name = "tf-test-certmapentry-%s" + map = google_certificate_manager_certificate_map.map1.name + certificates = [google_certificate_manager_certificate.certificate.id] + matcher = "PRIMARY" +} + +resource "google_certificate_manager_certificate" "certificate" { + name = "tf-test-cert-%s" + scope = "DEFAULT" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + ] + } +} + +resource "google_certificate_manager_dns_authorization" "instance" { + name = "tf-test-dnsauthz-%s" + domain = "mysite.com" +} +`, id, id, id, id, id, id, id) +} + +func testAccComputeTargetSslProxy_certificateMap2(id string) string { + return fmt.Sprintf(` +resource "google_compute_target_ssl_proxy" "with_certificate_map" { + description = "Resource created for Terraform acceptance testing" + name = "tf-test-ssl-proxy-%s" + backend_service = google_compute_backend_service.foo.self_link + certificate_map = "//certificatemanager.googleapis.com/${google_certificate_manager_certificate_map.map2.id}" +} + +resource "google_compute_backend_service" "foo" { + name = "tf-test-backend-%s" + protocol = "SSL" + health_checks = [google_compute_health_check.zero.self_link] +} + +resource "google_compute_health_check" "zero" { + name = "tf-test-check-%s" + check_interval_sec = 1 + timeout_sec = 1 + tcp_health_check { + port = "443" + } +} + +resource "google_certificate_manager_certificate_map" "map1" { + name = "tf-test-certmap-1-%s" +} + +resource "google_certificate_manager_certificate_map" "map2" { + name = "tf-test-certmap-2-%s" +} + +resource "google_certificate_manager_certificate_map_entry" "map_entry" { + name = "tf-test-certmapentry-%s" + map = google_certificate_manager_certificate_map.map1.name + certificates = [google_certificate_manager_certificate.certificate.id] + matcher = "PRIMARY" +} + +resource "google_certificate_manager_certificate" "certificate" { + name = "tf-test-cert-%s" + scope = "DEFAULT" + managed { + domains = [ + google_certificate_manager_dns_authorization.instance.domain, + ] + dns_authorizations = [ + google_certificate_manager_dns_authorization.instance.id, + ] + } +} + +resource "google_certificate_manager_dns_authorization" "instance" { + name = "tf-test-dnsauthz-%s" + domain = "mysite.com" +} +`, id, id, id, id, id, id, id, id) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go new file mode 100644 index 000000000000..1e54d1a01e05 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go @@ -0,0 +1,1687 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { + t.Parallel() + + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + + { + Config: testAccComputeUrlMap_basic2(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_advanced(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_advanced1(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + + { + Config: testAccComputeUrlMap_advanced2(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultRouteActionPathUrlRewrite(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionPathUrlRewrite(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + { + Config: testAccComputeUrlMap_defaultRouteActionPathUrlRewrite_update(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultRouteActionUrlRewrite(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionUrlRewrite(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + + { + Config: testAccComputeUrlMap_defaultRouteActionUrlRewrite_update(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_noPathRulesWithUpdate(t *testing.T) { + t.Parallel() + + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_noPathRules(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + { + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeUrlMapExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).UrlMaps.Get( + config.Project, name).Do() + if err != nil { + return err + } + + if found.Name != name { + return fmt.Errorf("Url map not found") + } + return nil + } +} + +func TestAccComputeUrlMap_defaultRouteActionTrafficDirectorPathUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirectorPath(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirectorPathUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultRouteActionTrafficDirectorUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirector(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirectorUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_trafficDirectorUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_trafficDirector(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_trafficDirectorUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_trafficDirectorPathUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_trafficDirectorPath(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_trafficDirectorPathUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_trafficDirectorRemoveRouteRule(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_trafficDirector(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_trafficDirectorRemoveRouteRule(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultUrlRedirect(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultUrlRedirectConfig(randomSuffix), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "boop" + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.foobar.self_link + } + } + + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.foobar.self_link + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_basic2(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } + + test { + host = "mysite.com" + path = "/test" + service = google_compute_backend_service.foobar.self_link + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_advanced1(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blop" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blop" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_advanced2(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + host_rule { + hosts = ["myleastfavoritesite.com"] + path_matcher = "blub" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blub" + + path_rule { + paths = ["/*", "/blub"] + service = google_compute_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionPathUrlRewrite(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_backend_service.foobar.self_link + } + + default_route_action { + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionPathUrlRewrite_update(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_backend_service.foobar.self_link + } + + default_route_action { + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionUrlRewrite(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + default_route_action { + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionUrlRewrite_update(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + default_route_action { + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_noPathRules(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "boop" + } + + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.foobar.self_link + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_trafficDirector(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = "${google_compute_backend_service.home.self_link}" + + route_rules { + priority = 1 + header_action { + request_headers_to_remove = ["RemoveMe2"] + request_headers_to_add { + header_name = "AddSomethingElse" + header_value = "MyOtherValue" + replace = true + } + response_headers_to_remove = ["RemoveMe3"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + match_rules { + full_path_match = "a full path" + header_matches { + header_name = "someheader" + exact_match = "match this exactly" + invert_match = true + } + ignore_case = true + metadata_filters { + filter_match_criteria = "MATCH_ANY" + filter_labels { + name = "PLANET" + value = "MARS" + } + } + query_parameter_matches { + name = "a query parameter" + present_match = true + } + } + url_redirect { + host_redirect = "A host" + https_redirect = false + path_redirect = "some/path" + redirect_response_code = "TEMPORARY_REDIRECT" + strip_query = true + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home2.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = "${google_compute_backend_service.home2.self_link}" + + route_rules { + priority = 2 + header_action { + request_headers_to_remove = ["RemoveMe2", "AndMe"] + request_headers_to_add { + header_name = "AddSomethingElseUpdated" + header_value = "MyOtherValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMe3", "AndMe4"] + } + match_rules { + full_path_match = "a full path to match" + header_matches { + header_name = "someheaderfoo" + exact_match = "match this exactly again" + invert_match = false + } + ignore_case = false + metadata_filters { + filter_match_criteria = "MATCH_ALL" + filter_labels { + name = "PLANET" + value = "EARTH" + } + } + } + url_redirect { + host_redirect = "A host again" + https_redirect = true + path_redirect = "some/path/twice" + redirect_response_code = "TEMPORARY_REDIRECT" + strip_query = false + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorRemoveRouteRule(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home2.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = "${google_compute_backend_service.home2.self_link}" + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorPath(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = "${google_compute_backend_service.home.self_link}" + + path_rule { + paths = ["/home"] + route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origin_regexes = ["abc.*"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = true + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = "${google_compute_backend_service.home.self_link}" + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = "${google_compute_backend_service.home.self_link}" + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorPathUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home2.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = "${google_compute_backend_service.home.self_link}" + + path_rule { + paths = ["/homeupdated"] + route_action { + cors_policy { + allow_credentials = false + allow_headers = ["Allowed content updated"] + allow_methods = ["PUT"] + allow_origin_regexes = ["abcdef.*"] + allow_origins = ["Allowed origin updated"] + expose_headers = ["Exposed header updated"] + max_age = 31 + disabled = false + } + fault_injection_policy { + abort { + http_status = 235 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 40000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = "${google_compute_backend_service.home.self_link}" + } + retry_policy { + num_retries = 5 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = "${google_compute_backend_service.home.self_link}" + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMeUpdated"] + request_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMeUpdated"] + response_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = true + } + } + } + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultRouteActionTrafficDirectorPath(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.home.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + + default_route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origin_regexes = ["abc.*"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = true + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home.self_link + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = google_compute_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + } + + test { + service = google_compute_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultRouteActionTrafficDirectorPathUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.home2.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + + default_route_action { + cors_policy { + allow_credentials = false + allow_headers = ["Allowed content updated"] + allow_methods = ["PUT"] + allow_origin_regexes = ["abcdef.*"] + allow_origins = ["Allowed origin updated"] + expose_headers = ["Exposed header updated"] + max_age = 31 + disabled = false + } + fault_injection_policy { + abort { + http_status = 235 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 40000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home.self_link + } + retry_policy { + num_retries = 5 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = google_compute_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMeUpdated"] + request_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMeUpdated"] + response_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = true + } + } + } + } + } + + test { + service = google_compute_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + + +func testAccComputeUrlMap_defaultRouteActionTrafficDirector(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + + default_route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origin_regexes = ["abc.*"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = true + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home.self_link + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = google_compute_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + + test { + service = google_compute_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultRouteActionTrafficDirectorUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + + default_route_action { + cors_policy { + allow_credentials = false + allow_headers = ["Allowed content updated"] + allow_methods = ["PUT"] + allow_origin_regexes = ["abcdef.*"] + allow_origins = ["Allowed origin updated"] + expose_headers = ["Exposed header updated"] + max_age = 31 + disabled = false + } + fault_injection_policy { + abort { + http_status = 235 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 40000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home2.self_link + } + retry_policy { + num_retries = 5 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = google_compute_backend_service.home2.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMeUpdated"] + request_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMeUpdated"] + response_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = true + } + } + } + } + + test { + service = google_compute_backend_service.home2.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultUrlRedirectConfig(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_url_redirect { + https_redirect = true + strip_query = false + } +} +`, randomSuffix) +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_usage_export_bucket.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_usage_export_bucket.go.tmpl new file mode 100644 index 000000000000..c14d94a7dd6c --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_usage_export_bucket.go.tmpl @@ -0,0 +1,165 @@ +package compute + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +func ResourceProjectUsageBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceProjectUsageBucketCreate, + Read: resourceProjectUsageBucketRead, + Delete: resourceProjectUsageBucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceProjectUsageBucketImportState, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(4 * time.Minute), + Delete: schema.DefaultTimeout(4 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "bucket_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The bucket to store reports in.`, + }, + "prefix": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `A prefix for the reports, for instance, the project name.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The project to set the export bucket on. If it is not provided, the provider project is used.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceProjectUsageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + p, err := config.NewComputeClient(userAgent).Projects.Get(project).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Project data for project %s", project)) + } + + if p.UsageExportLocation == nil { + log.Printf("[WARN] Removing usage export location resource %s because it's not enabled server-side.", project) + d.SetId("") + return nil + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("prefix", p.UsageExportLocation.ReportNamePrefix); err != nil { + return fmt.Errorf("Error setting prefix: %s", err) + } + if err := d.Set("bucket_name", p.UsageExportLocation.BucketName); err != nil { + return fmt.Errorf("Error setting bucket_name: %s", err) + } + return nil +} + +func resourceProjectUsageBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + op, err := config.NewComputeClient(userAgent).Projects.SetUsageExportBucket(project, &compute.UsageExportLocation{ + ReportNamePrefix: d.Get("prefix").(string), + BucketName: d.Get("bucket_name").(string), + }).Do() + if err != nil { + return err + } + d.SetId(project) + err = ComputeOperationWaitTime(config, op, project, "Setting usage export bucket.", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + d.SetId("") + return err + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return resourceProjectUsageBucketRead(d, meta) +} + +func resourceProjectUsageBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + op, err := config.NewComputeClient(userAgent).Projects.SetUsageExportBucket(project, nil).Do() + if err != nil { + return err + } + + err = ComputeOperationWaitTime(config, op, project, + "Setting usage export bucket to nil, automatically disabling usage export.", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + d.SetId("") + + return nil +} + +func resourceProjectUsageBucketImportState(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + project := d.Id() + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + return []*schema.ResourceData{d}, nil +} diff --git a/mmv1/third_party/terraform/services/compute/go/security_policy_association_utils.go.tmpl b/mmv1/third_party/terraform/services/compute/go/security_policy_association_utils.go.tmpl new file mode 100644 index 000000000000..0fa4d0c9bd99 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/security_policy_association_utils.go.tmpl @@ -0,0 +1,28 @@ +package compute +{{- if ne $.TargetVersionName "ga" }} + +import ( + "log" + "strings" + + "github.com/hashicorp/errwrap" + "google.golang.org/api/googleapi" +) + +func transformSecurityPolicyAssociationReadError(err error) error { + if gErr, ok := errwrap.GetType(err, &googleapi.Error{}).(*googleapi.Error); ok { + if gErr.Code == 400 && strings.Contains(gErr.Message, "An association with that name does not exist") { + // This error occurs when attempting a GET after deleting the security policy association. It leads to to + // inconsistent behavior as HandleNotFoundError(...) expects an error code of 404 when a resource does not + // exist. To get the desired behavior from HandleNotFoundError, modify the return code to 404 so that + // HandleNotFoundError(...) will treat this as a NotFound error + gErr.Code = 404 + } + + log.Printf("[DEBUG] Transformed security policy association error") + return gErr + } + + return err +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index e9c1a0b5c3cb..220f8ddf3099 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -1509,7 +1509,7 @@ func resourceComputeInstanceRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("metadata_fingerprint", instance.Metadata.Fingerprint); err != nil { return fmt.Errorf("Error setting metadata_fingerprint: %s", err) } - + <% unless version == 'ga' -%> if instance.PartnerMetadata != nil { partnerMetadata, err := flattenPartnerMetadata(instance.PartnerMetadata) diff --git a/mmv1/third_party/terraform/services/container/go/container_operation.go.tmpl b/mmv1/third_party/terraform/services/container/go/container_operation.go.tmpl new file mode 100644 index 000000000000..8efbfdc4be1c --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/container_operation.go.tmpl @@ -0,0 +1,130 @@ +package container + +import ( + "context" + "errors" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/container/v1" +{{- else }} + container "google.golang.org/api/container/v1beta1" +{{- end }} +) + +type ContainerOperationWaiter struct { + Service *container.Service + Context context.Context + Op *container.Operation + Project string + Location string + UserProjectOverride bool +} + +func (w *ContainerOperationWaiter) State() string { + if w == nil || w.Op == nil { + return "" + } + return w.Op.Status +} + +func (w *ContainerOperationWaiter) Error() error { + if w == nil || w.Op == nil { + return nil + } + + // Error gets called during operation polling to see if there is an error. + // Since container's operation doesn't have an "error" field, we must wait + // until it's done and check the status message + for _, pending := range w.PendingStates() { + if w.Op.Status == pending { + return nil + } + } + + if w.Op.StatusMessage != "" { + return fmt.Errorf(w.Op.StatusMessage) + } + + return nil +} + +func (w *ContainerOperationWaiter) IsRetryable(error) bool { + return false +} + +func (w *ContainerOperationWaiter) SetOp(op interface{}) error { + var ok bool + w.Op, ok = op.(*container.Operation) + if !ok { + return fmt.Errorf("Unable to set operation. Bad type!") + } + return nil +} + +func (w *ContainerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil || w.Op == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + name := fmt.Sprintf("projects/%s/locations/%s/operations/%s", + w.Project, w.Location, w.Op.Name) + + var op *container.Operation + select { + case <-w.Context.Done(): + log.Println("[WARN] request has been cancelled early") + return op, errors.New("unable to finish polling, context has been cancelled") + default: + // default must be here to keep the previous case from blocking + } + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (opErr error) { + opGetCall := w.Service.Projects.Locations.Operations.Get(name) + if w.UserProjectOverride { + opGetCall.Header().Add("X-Goog-User-Project", w.Project) + } + op, opErr = opGetCall.Do() + return opErr + }, + Timeout: transport_tpg.DefaultRequestTimeout, + }) + + return op, err +} + +func (w *ContainerOperationWaiter) OpName() string { + if w == nil || w.Op == nil { + return "" + } + return w.Op.Name +} + +func (w *ContainerOperationWaiter) PendingStates() []string { + return []string{"PENDING", "RUNNING"} +} + +func (w *ContainerOperationWaiter) TargetStates() []string { + return []string{"DONE"} +} + +func ContainerOperationWait(config *transport_tpg.Config, op *container.Operation, project, location, activity, userAgent string, timeout time.Duration) error { + w := &ContainerOperationWaiter{ + Service: config.NewContainerClient(userAgent), + Context: config.Context, + Op: op, + Project: project, + Location: location, + UserProjectOverride: config.UserProjectOverride, + } + + if err := w.SetOp(op); err != nil { + return err + } + + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl new file mode 100644 index 000000000000..a7f233b697fd --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/node_config.go.tmpl @@ -0,0 +1,1837 @@ +package container + +import ( +{{- if ne $.TargetVersionName "ga" }} + "strings" +{{- end }} + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/container/v1" +{{- else }} + container "google.golang.org/api/container/v1beta1" +{{- end }} +) + +// Matches gke-default scope from https://cloud.google.com/sdk/gcloud/reference/container/clusters/create +var defaultOauthScopes = []string{ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", +} + +func schemaContainerdConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Description: "Parameters for containerd configuration.", + MaxItems: 1, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "private_registry_access_config": { + Type: schema.TypeList, + Optional: true, + Description: "Parameters for private container registries configuration.", + MaxItems: 1, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Whether or not private registries are configured.", + }, + "certificate_authority_domain_config": { + Type: schema.TypeList, + Optional: true, + Description: "Parameters for configuring CA certificate and domains.", + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "fqdns": { + Type: schema.TypeList, + Required: true, + Description: "List of fully-qualified-domain-names. IPv4s and port specification are supported.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "gcp_secret_manager_certificate_config": { + Type: schema.TypeList, + Required: true, + Description: "Parameters for configuring a certificate hosted in GCP SecretManager.", + MaxItems: 1, + Elem: &schema.Resource{Schema: map[string]*schema.Schema{ + "secret_uri": { + Type: schema.TypeString, + Required: true, + Description: "URI for the secret that hosts a certificate. Must be in the format 'projects/PROJECT_NUM/secrets/SECRET_NAME/versions/VERSION_OR_LATEST'.", + }, + }}, + }, + }}, + }, + }}, + }, + }}, + } +} + +func schemaLoggingVariant() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Type of logging agent that is used as the default value for node pools in the cluster. Valid values include DEFAULT and MAX_THROUGHPUT.`, + ValidateFunc: validation.StringInSlice([]string{"DEFAULT", "MAX_THROUGHPUT"}, false), + } +} + +func schemaGcfsConfig(forceNew bool) *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `GCFS configuration for this node.`, + ForceNew: forceNew, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: forceNew, + Description: `Whether or not GCFS is enabled`, + }, + }, + }, + } +} + +func schemaNodeConfig() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The configuration of the nodepool`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containerd_config": schemaContainerdConfig(), + "disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(10), + Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, + }, + + "disk_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Type of the disk attached to each node. Such as pd-standard, pd-balanced or pd-ssd`, + }, + + "guest_accelerator": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + // Legacy config mode allows removing GPU's from an existing resource + // See https://www.terraform.io/docs/configuration/attr-as-blocks.html + ConfigMode: schema.SchemaConfigModeAttr, + Description: `List of the type and count of accelerator cards attached to the instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of the accelerator cards exposed to an instance.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The accelerator type resource name.`, + }, + "gpu_driver_installation_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + ConfigMode: schema.SchemaConfigModeAttr, + Description: `Configuration for auto installation of GPU driver.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gpu_driver_version": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Mode for how the GPU driver is installed.`, + ValidateFunc: validation.StringInSlice([]string{"GPU_DRIVER_VERSION_UNSPECIFIED", "INSTALLATION_DISABLED", "DEFAULT", "LATEST"}, false), + }, + }, + }, + }, + "gpu_partition_size": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Size of partitions to create on the GPU. Valid values are described in the NVIDIA mig user guide (https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning)`, + }, + "gpu_sharing_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ForceNew: true, + ConfigMode: schema.SchemaConfigModeAttr, + Description: `Configuration for GPU sharing.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gpu_sharing_strategy": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The type of GPU sharing strategy to enable on the GPU node. Possible values are described in the API package (https://pkg.go.dev/google.golang.org/api/container/v1#GPUSharingConfig)`, + }, + "max_shared_clients_per_gpu": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The maximum number of containers that can share a GPU.`, + }, + }, + }, + }, + }, + }, + }, + + "image_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CaseDiffSuppress, + Description: `The image type to use for this node. Note that for a given image type, the latest version of it will be used.`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + // Computed=true because GKE Sandbox will automatically add labels to nodes that can/cannot run sandboxed pods. + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The map of Kubernetes labels (key/value pairs) to be applied to each node. These will added in addition to any default label(s) that Kubernetes may apply to the node.`, + {{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} + DiffSuppressFunc: containerNodePoolLabelsSuppress, + {{- end }} + }, + + "resource_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the node pool.`, + }, + + "local_ssd_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of local SSD disks to be attached to the node.`, + }, + + "logging_variant": schemaLoggingVariant(), + + {{ if ne $.TargetVersionName `ga` -}} + "ephemeral_storage_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_ssd_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.`, + }, + }, + }, + }, + {{- end }} + + "ephemeral_storage_local_ssd_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters for the ephemeral storage filesystem. If unspecified, ephemeral storage is backed by the boot disk.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_ssd_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Number of local SSDs to use to back ephemeral storage. Uses NVMe interfaces. Each local SSD must be 375 or 3000 GB in size, and all local SSDs must share the same size.`, + }, + }, + }, + }, + + "local_nvme_ssd_block_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters for raw-block local NVMe SSDs.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local_ssd_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Number of raw-block local NVMe SSD disks to be attached to the node. Each local SSD is 375 GB in size.`, + }, + }, + }, + }, + + "secondary_boot_disks": { + Type: schema.TypeList, + Optional: true, + MaxItems: 127, + Description: `Secondary boot disks for preloading data or container images.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disk_image": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Disk image to create the secondary boot disk from`, + }, + "mode": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Mode for how the secondary boot disk is used.`, + }, + }, + }, + }, + + "gcfs_config": schemaGcfsConfig(true), + + "gvnic": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Enable or disable gvnic in the node pool.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether or not gvnic is enabled`, + }, + }, + }, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The name of a Google Compute Engine machine type.`, + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The metadata key/value pairs assigned to instances in the cluster.`, + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform.`, + }, + + "oauth_scopes": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The set of Google API scopes to be made available on all of the node VMs.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(v interface{}) string { + return tpgresource.CanonicalizeServiceScope(v.(string)) + }, + }, + DiffSuppressFunc: containerClusterAddedScopesSuppress, + Set: tpgresource.StringScopeHashcode, + }, + + "preemptible": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Whether the nodes are created as preemptible VM instances.`, + }, + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The reservation affinity configuration for the node pool.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Corresponds to the type of reservation consumption.`, + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "NO_RESERVATION", "ANY_RESERVATION", "SPECIFIC_RESERVATION"}, false), + }, + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The label key of a reservation resource.`, + }, + "values": { + Type: schema.TypeSet, + Description: "The label values of the reservation resource.", + ForceNew: true, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "spot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Whether the nodes are created as spot VM instances.`, + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, + }, + + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of instance tags applied to all nodes.`, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Shielded Instance options.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Defines whether the instance has Secure Boot enabled.`, + }, + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: true, + Description: `Defines whether the instance has integrity monitoring enabled.`, + }, + }, + }, + }, + + "taint": { + Type: schema.TypeList, + Optional: true, + Description: `List of Kubernetes taints to be applied to each node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + Description: `Key for taint.`, + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: `Value for taint.`, + }, + "effect": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"NO_SCHEDULE", "PREFER_NO_SCHEDULE", "NO_EXECUTE"}, false), + Description: `Effect for taint.`, + }, + }, + }, + }, + + "effective_taints": { + Type: schema.TypeList, + Computed: true, + Description: `List of kubernetes taints applied to each node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Computed: true, + Description: `Key for taint.`, + }, + "value": { + Type: schema.TypeString, + Computed: true, + Description: `Value for taint.`, + }, + "effect": { + Type: schema.TypeString, + Computed: true, + Description: `Effect for taint.`, + }, + }, + }, + }, + + "workload_metadata_config": { + Computed: true, + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The workload metadata configuration for this node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"MODE_UNSPECIFIED", "GCE_METADATA", "GKE_METADATA"}, false), + Description: `Mode is the configuration for how to expose metadata to workloads running on the node.`, + }, + }, + }, + }, + + {{ if ne $.TargetVersionName `ga` -}} + "sandbox_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `Sandbox configuration for this node.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sandbox_type": { + Type: schema.TypeString, + Required: true, + Description: `Type of the sandbox to use for the node (e.g. 'gvisor')`, + ValidateFunc: validation.StringInSlice([]string{"gvisor"}, false), + }, + }, + }, + }, + {{- end }} + "boot_disk_kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, + }, + // Note that AtLeastOneOf can't be set because this schema is reused by + // two different resources. + "kubelet_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Node kubelet configs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cpu_manager_policy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"static", "none", ""}, false), + Description: `Control the CPU management policy on the node.`, + }, + "cpu_cfs_quota": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable CPU CFS quota enforcement for containers that specify CPU limits.`, + }, + "cpu_cfs_quota_period": { + Type: schema.TypeString, + Optional: true, + Description: `Set the CPU CFS quota period value 'cpu.cfs_period_us'.`, + }, + "pod_pids_limit": { + Type: schema.TypeInt, + Optional: true, + Description: `Controls the maximum number of processes allowed to run in a pod.`, + }, + }, + }, + }, + + "linux_node_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Parameters that can be configured on Linux nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sysctls": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The Linux kernel parameters to be applied to the nodes and all pods running on the nodes.`, + }, + "cgroup_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"CGROUP_MODE_UNSPECIFIED", "CGROUP_MODE_V1", "CGROUP_MODE_V2"}, false), + Description: `cgroupMode specifies the cgroup mode to be used on the node.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("CGROUP_MODE_UNSPECIFIED"), + }, + }, + }, + }, + "node_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Setting this field will assign instances of this pool to run on the specified node group. This is useful for running workloads on sole tenant nodes.`, + }, + + "advanced_machine_features": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies options for controlling advanced machine features.`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threads_per_core": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + Description: `The number of threads per physical core. To disable simultaneous multithreading (SMT) set this to 1. If unset, the maximum number of threads supported per core by the underlying processor is assumed.`, + }, + "enable_nested_virtualization": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether the node should have nested virtualization enabled.`, + }, + }, + }, + }, + "sole_tenant_config": { + Type: schema.TypeList, + Optional: true, + Description: `Node affinity options for sole tenant node pools.`, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_affinity": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: `.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "key": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `.`, + }, + "operator": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `.`, + ValidateFunc: validation.StringInSlice([]string{"IN", "NOT_IN"}, false), + }, + "values": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: `.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + }, + }, + }, + "host_maintenance_policy": { + Type: schema.TypeList, + Optional: true, + Description: `The maintenance policy for the hosts on which the GKE VMs run on.`, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "maintenance_interval": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `.`, + ValidateFunc: validation.StringInSlice([]string{"MAINTENANCE_INTERVAL_UNSPECIFIED", "AS_NEEDED", "PERIODIC"}, false), + }, + }, + }, + }, + "confidential_nodes": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after pool creation without deleting and recreating the entire pool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether Confidential Nodes feature is enabled for all nodes in this pool.`, + }, + }, + }, + }, + "fast_socket": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Enable or disable NCCL Fast Socket in the node pool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not NCCL Fast Socket is enabled`, + }, + }, + }, + }, + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + "enable_confidential_storage": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `If enabled boot disks are configured with confidential mode.`, + }, + }, + }, + } +} + +func expandNodeConfigDefaults(configured interface{}) *container.NodeConfigDefaults { + configs := configured.([]interface{}) + if len(configs) == 0 || configs[0] == nil { + return nil + } + config := configs[0].(map[string]interface{}) + + nodeConfigDefaults := &container.NodeConfigDefaults{} + nodeConfigDefaults.ContainerdConfig = expandContainerdConfig(config["containerd_config"]) + if variant, ok := config["logging_variant"]; ok { + nodeConfigDefaults.LoggingConfig = &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: variant.(string), + }, + } + } +{{- if ne $.TargetVersionName "ga" }} + if v, ok := config["gcfs_config"]; ok && len(v.([]interface{})) > 0 { + gcfsConfig := v.([]interface{})[0].(map[string]interface{}) + nodeConfigDefaults.GcfsConfig = &container.GcfsConfig{ + Enabled: gcfsConfig["enabled"].(bool), + } + } +{{- end }} + return nodeConfigDefaults +} + +func expandNodeConfig(v interface{}) *container.NodeConfig { + nodeConfigs := v.([]interface{}) + nc := &container.NodeConfig{ + // Defaults can't be set on a list/set in the schema, so set the default on create here. + OauthScopes: defaultOauthScopes, + } + if len(nodeConfigs) == 0 { + return nc + } + + nodeConfig := nodeConfigs[0].(map[string]interface{}) + + if v, ok := nodeConfig["containerd_config"]; ok { + nc.ContainerdConfig = expandContainerdConfig(v) + } + + if v, ok := nodeConfig["machine_type"]; ok { + nc.MachineType = v.(string) + } + + if v, ok := nodeConfig["guest_accelerator"]; ok { + accels := v.([]interface{}) + guestAccelerators := make([]*container.AcceleratorConfig, 0, len(accels)) + for _, raw := range accels { + data := raw.(map[string]interface{}) + if data["count"].(int) == 0 { + continue + } + guestAcceleratorConfig := &container.AcceleratorConfig{ + AcceleratorCount: int64(data["count"].(int)), + AcceleratorType: data["type"].(string), + GpuPartitionSize: data["gpu_partition_size"].(string), + } + + if v, ok := data["gpu_driver_installation_config"]; ok && len(v.([]interface{})) > 0 { + gpuDriverInstallationConfig := data["gpu_driver_installation_config"].([]interface{})[0].(map[string]interface{}) + guestAcceleratorConfig.GpuDriverInstallationConfig = &container.GPUDriverInstallationConfig{ + GpuDriverVersion: gpuDriverInstallationConfig["gpu_driver_version"].(string), + } + } + + if v, ok := data["gpu_sharing_config"]; ok && len(v.([]interface{})) > 0 { + gpuSharingConfig := data["gpu_sharing_config"].([]interface{})[0].(map[string]interface{}) + guestAcceleratorConfig.GpuSharingConfig = &container.GPUSharingConfig{ + GpuSharingStrategy: gpuSharingConfig["gpu_sharing_strategy"].(string), + MaxSharedClientsPerGpu: int64(gpuSharingConfig["max_shared_clients_per_gpu"].(int)), + } + } + + guestAccelerators = append(guestAccelerators, guestAcceleratorConfig) + } + nc.Accelerators = guestAccelerators + } + + if v, ok := nodeConfig["disk_size_gb"]; ok { + nc.DiskSizeGb = int64(v.(int)) + } + + if v, ok := nodeConfig["disk_type"]; ok { + nc.DiskType = v.(string) + } + + if v, ok := nodeConfig["local_ssd_count"]; ok { + nc.LocalSsdCount = int64(v.(int)) + } + + if v, ok := nodeConfig["logging_variant"]; ok { + nc.LoggingConfig = &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: v.(string), + }, + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := nodeConfig["ephemeral_storage_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.EphemeralStorageConfig = &container.EphemeralStorageConfig{ + LocalSsdCount: int64(conf["local_ssd_count"].(int)), + } + } +{{- end }} + if v, ok := nodeConfig["local_nvme_ssd_block_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.LocalNvmeSsdBlockConfig = &container.LocalNvmeSsdBlockConfig{ + LocalSsdCount: int64(conf["local_ssd_count"].(int)), + } + } + + if v, ok := nodeConfig["ephemeral_storage_local_ssd_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.EphemeralStorageLocalSsdConfig = &container.EphemeralStorageLocalSsdConfig{ + LocalSsdCount: int64(conf["local_ssd_count"].(int)), + } + } + + if v, ok := nodeConfig["secondary_boot_disks"]; ok && len(v.([]interface{})) > 0 { + conf, confOK := v.([]interface{})[0].(map[string]interface{}) + if confOK { + modeValue, modeOK := conf["mode"] + diskImage := conf["disk_image"].(string) + if modeOK { + nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ + DiskImage: diskImage, + Mode: modeValue.(string), + }) + } else { + nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ + DiskImage: diskImage, + }) + } + } else { + nc.SecondaryBootDisks = append(nc.SecondaryBootDisks, &container.SecondaryBootDisk{ + DiskImage: "", + }) + } + } + + if v, ok := nodeConfig["gcfs_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.GcfsConfig = &container.GcfsConfig{ + Enabled: conf["enabled"].(bool), + } + } + + if v, ok := nodeConfig["gvnic"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.Gvnic = &container.VirtualNIC{ + Enabled: conf["enabled"].(bool), + } + } + + if v, ok := nodeConfig["fast_socket"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.FastSocket = &container.FastSocket{ + Enabled: conf["enabled"].(bool), + } + } + + if v, ok := nodeConfig["reservation_affinity"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + valuesSet := conf["values"].(*schema.Set) + values := make([]string, valuesSet.Len()) + for i, value := range valuesSet.List() { + values[i] = value.(string) + } + + nc.ReservationAffinity = &container.ReservationAffinity{ + ConsumeReservationType: conf["consume_reservation_type"].(string), + Key: conf["key"].(string), + Values: values, + } + } + + if scopes, ok := nodeConfig["oauth_scopes"]; ok { + scopesSet := scopes.(*schema.Set) + scopes := make([]string, scopesSet.Len()) + for i, scope := range scopesSet.List() { + scopes[i] = tpgresource.CanonicalizeServiceScope(scope.(string)) + } + + nc.OauthScopes = scopes + } + + if v, ok := nodeConfig["service_account"]; ok { + nc.ServiceAccount = v.(string) + } + + if v, ok := nodeConfig["metadata"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + nc.Metadata = m + } + + if v, ok := nodeConfig["image_type"]; ok { + nc.ImageType = v.(string) + } + + if v, ok := nodeConfig["labels"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + nc.Labels = m + } + + if v, ok := nodeConfig["resource_labels"]; ok { + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + nc.ResourceLabels = m + } + + if v, ok := nodeConfig["resource_manager_tags"]; ok && len(v.(map[string]interface{})) > 0 { + nc.ResourceManagerTags = expandResourceManagerTags(v) + } + + if v, ok := nodeConfig["tags"]; ok { + tagsList := v.([]interface{}) + tags := []string{} + for _, v := range tagsList { + if v != nil { + tags = append(tags, v.(string)) + } + } + nc.Tags = tags + } + + if v, ok := nodeConfig["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ + EnableSecureBoot: conf["enable_secure_boot"].(bool), + EnableIntegrityMonitoring: conf["enable_integrity_monitoring"].(bool), + } + } + + // Preemptible Is Optional+Default, so it always has a value + nc.Preemptible = nodeConfig["preemptible"].(bool) + + // Spot Is Optional+Default, so it always has a value + nc.Spot = nodeConfig["spot"].(bool) + + if v, ok := nodeConfig["min_cpu_platform"]; ok { + nc.MinCpuPlatform = v.(string) + } + + if v, ok := nodeConfig["taint"]; ok && len(v.([]interface{})) > 0 { + taints := v.([]interface{}) + nodeTaints := make([]*container.NodeTaint, 0, len(taints)) + for _, raw := range taints { + data := raw.(map[string]interface{}) + taint := &container.NodeTaint{ + Key: data["key"].(string), + Value: data["value"].(string), + Effect: data["effect"].(string), + } + + nodeTaints = append(nodeTaints, taint) + } + + nc.Taints = nodeTaints + } + + if v, ok := nodeConfig["workload_metadata_config"]; ok { + nc.WorkloadMetadataConfig = expandWorkloadMetadataConfig(v) + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := nodeConfig["sandbox_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + nc.SandboxConfig = &container.SandboxConfig{ + SandboxType: conf["sandbox_type"].(string), + } + } +{{- end }} + if v, ok := nodeConfig["boot_disk_kms_key"]; ok { + nc.BootDiskKmsKey = v.(string) + } + + if v, ok := nodeConfig["kubelet_config"]; ok { + nc.KubeletConfig = expandKubeletConfig(v) + } + + if v, ok := nodeConfig["linux_node_config"]; ok { + nc.LinuxNodeConfig = expandLinuxNodeConfig(v) + } + + if v, ok := nodeConfig["node_group"]; ok { + nc.NodeGroup = v.(string) + } + + if v, ok := nodeConfig["advanced_machine_features"]; ok && len(v.([]interface{})) > 0 { + advanced_machine_features := v.([]interface{})[0].(map[string]interface{}) + nc.AdvancedMachineFeatures = &container.AdvancedMachineFeatures{ + ThreadsPerCore: int64(advanced_machine_features["threads_per_core"].(int)), + EnableNestedVirtualization: advanced_machine_features["enable_nested_virtualization"].(bool), + } + } + + if v, ok := nodeConfig["sole_tenant_config"]; ok && len(v.([]interface{})) > 0 { + nc.SoleTenantConfig = expandSoleTenantConfig(v) + } + + if v,ok := nodeConfig["enable_confidential_storage"]; ok { + nc.EnableConfidentialStorage = v.(bool) + } + + {{ if ne $.TargetVersionName `ga` -}} + if v, ok := nodeConfig["host_maintenance_policy"]; ok { + nc.HostMaintenancePolicy = expandHostMaintenancePolicy(v) + } + {{- end }} + + if v, ok := nodeConfig["confidential_nodes"]; ok { + nc.ConfidentialNodes = expandConfidentialNodes(v) + } + + return nc +} + +func expandResourceManagerTags(v interface{}) *container.ResourceManagerTags { + rmts := make(map[string]string) + + if v != nil { + rmts = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + + return &container.ResourceManagerTags{ + Tags: rmts, + ForceSendFields: []string{"Tags"}, + } +} + +func expandWorkloadMetadataConfig(v interface{}) *container.WorkloadMetadataConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + wmc := &container.WorkloadMetadataConfig{} + + cfg := ls[0].(map[string]interface{}) + + if v, ok := cfg["mode"]; ok { + wmc.Mode = v.(string) + } + + return wmc +} + +func expandKubeletConfig(v interface{}) *container.NodeKubeletConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + kConfig := &container.NodeKubeletConfig{} + if cpuManagerPolicy, ok := cfg["cpu_manager_policy"]; ok { + kConfig.CpuManagerPolicy = cpuManagerPolicy.(string) + } + if cpuCfsQuota, ok := cfg["cpu_cfs_quota"]; ok { + kConfig.CpuCfsQuota = cpuCfsQuota.(bool) + kConfig.ForceSendFields = append(kConfig.ForceSendFields, "CpuCfsQuota") + } + if cpuCfsQuotaPeriod, ok := cfg["cpu_cfs_quota_period"]; ok { + kConfig.CpuCfsQuotaPeriod = cpuCfsQuotaPeriod.(string) + } + if podPidsLimit, ok := cfg["pod_pids_limit"]; ok { + kConfig.PodPidsLimit = int64(podPidsLimit.(int)) + } + return kConfig +} + +func expandLinuxNodeConfig(v interface{}) *container.LinuxNodeConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + if ls[0] == nil { + return &container.LinuxNodeConfig{} + } + cfg := ls[0].(map[string]interface{}) + + linuxNodeConfig := &container.LinuxNodeConfig{} + sysctls := expandSysctls(cfg) + if sysctls != nil { + linuxNodeConfig.Sysctls = sysctls + } + cgroupMode := expandCgroupMode(cfg) + if len(cgroupMode) != 0 { + linuxNodeConfig.CgroupMode = cgroupMode + } + + return linuxNodeConfig +} + +func expandSysctls(cfg map[string]interface{}) map[string]string { + sysCfgRaw, ok := cfg["sysctls"] + if !ok { + return nil + } + sysctls := make(map[string]string) + for k, v := range sysCfgRaw.(map[string]interface{}) { + sysctls[k] = v.(string) + } + return sysctls +} + +func expandCgroupMode(cfg map[string]interface{}) string { + cgroupMode, ok := cfg["cgroup_mode"] + if !ok { + return "" + } + + return cgroupMode.(string) +} + +func expandContainerdConfig(v interface{}) *container.ContainerdConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + if ls[0] == nil { + return &container.ContainerdConfig{} + } + cfg := ls[0].(map[string]interface{}) + + cc := &container.ContainerdConfig{} + cc.PrivateRegistryAccessConfig = expandPrivateRegistryAccessConfig(cfg["private_registry_access_config"]) + return cc +} + +func expandPrivateRegistryAccessConfig(v interface{}) *container.PrivateRegistryAccessConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + if ls[0] == nil { + return &container.PrivateRegistryAccessConfig{} + } + cfg := ls[0].(map[string]interface{}) + + pracc := &container.PrivateRegistryAccessConfig{} + if enabled, ok := cfg["enabled"]; ok { + pracc.Enabled = enabled.(bool) + } + if caCfgRaw, ok := cfg["certificate_authority_domain_config"]; ok { + ls := caCfgRaw.([]interface{}) + pracc.CertificateAuthorityDomainConfig = make([]*container.CertificateAuthorityDomainConfig, len(ls)) + for i, caCfg := range ls { + pracc.CertificateAuthorityDomainConfig[i] = expandCADomainConfig(caCfg) + } + } + + return pracc +} + +func expandCADomainConfig(v interface{}) *container.CertificateAuthorityDomainConfig { + if v == nil { + return nil + } + cfg := v.(map[string]interface{}) + + caConfig := &container.CertificateAuthorityDomainConfig{} + if v, ok := cfg["fqdns"]; ok { + fqdns := v.([]interface{}) + caConfig.Fqdns = make([]string, len(fqdns)) + for i, dn := range fqdns { + caConfig.Fqdns[i] = dn.(string) + } + } + + caConfig.GcpSecretManagerCertificateConfig = expandGCPSecretManagerCertificateConfig(cfg["gcp_secret_manager_certificate_config"]) + + return caConfig +} + +func expandGCPSecretManagerCertificateConfig(v interface{}) *container.GCPSecretManagerCertificateConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + if ls[0] == nil { + return &container.GCPSecretManagerCertificateConfig{} + } + cfg := ls[0].(map[string]interface{}) + + gcpSMConfig := &container.GCPSecretManagerCertificateConfig{} + if v, ok := cfg["secret_uri"]; ok { + gcpSMConfig.SecretUri = v.(string) + } + return gcpSMConfig +} + +func expandSoleTenantConfig(v interface{}) *container.SoleTenantConfig { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + affinitiesRaw, ok := cfg["node_affinity"] + if !ok { + return nil + } + affinities := make([]*container.NodeAffinity, 0) + for _, v := range affinitiesRaw.(*schema.Set).List() { + na := v.(map[string]interface{}) + + affinities = append(affinities, &container.NodeAffinity{ + Key: na["key"].(string), + Operator: na["operator"].(string), + Values: tpgresource.ConvertStringArr(na["values"].([]interface{})), + }) + } + return &container.SoleTenantConfig{ + NodeAffinities: affinities, + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandHostMaintenancePolicy(v interface{}) *container.HostMaintenancePolicy { + if v == nil { + return nil + } + ls := v.([]interface{}) + if len(ls) == 0 { + return nil + } + cfg := ls[0].(map[string]interface{}) + mPolicy := &container.HostMaintenancePolicy{} + if maintenanceInterval, ok := cfg["maintenance_interval"]; ok { + mPolicy.MaintenanceInterval = maintenanceInterval.(string) + } + + return mPolicy +} +{{- end }} + +func expandConfidentialNodes(configured interface{}) *container.ConfidentialNodes { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.ConfidentialNodes{ + Enabled: config["enabled"].(bool), + } +} + +func flattenNodeConfigDefaults(c *container.NodeConfigDefaults) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if c == nil { + return result + } + + result = append(result, map[string]interface{}{}) + + result[0]["containerd_config"] = flattenContainerdConfig(c.ContainerdConfig) + + result[0]["logging_variant"] = flattenLoggingVariant(c.LoggingConfig) + +{{ if ne $.TargetVersionName `ga` -}} + result[0]["gcfs_config"] = flattenGcfsConfig(c.GcfsConfig) +{{- end }} + return result +} + +// v == old state of `node_config` +func flattenNodeConfig(c *container.NodeConfig, v interface{}) []map[string]interface{} { + config := make([]map[string]interface{}, 0, 1) + + if c == nil { + return config + } + + // default to no prior taint state if there are any issues + oldTaints := []interface{}{} + oldNodeConfigSchemaContainer := v.([]interface{}) + if len(oldNodeConfigSchemaContainer) != 0 { + oldNodeConfigSchema := oldNodeConfigSchemaContainer[0].(map[string]interface{}) + if vt, ok := oldNodeConfigSchema["taint"]; ok && len(vt.([]interface{})) > 0 { + oldTaints = vt.([]interface{}) + } + } + + config = append(config, map[string]interface{}{ + "machine_type": c.MachineType, + "containerd_config": flattenContainerdConfig(c.ContainerdConfig), + "disk_size_gb": c.DiskSizeGb, + "disk_type": c.DiskType, + "guest_accelerator": flattenContainerGuestAccelerators(c.Accelerators), + "local_ssd_count": c.LocalSsdCount, + "logging_variant": flattenLoggingVariant(c.LoggingConfig), +{{- if ne $.TargetVersionName "ga" }} + "ephemeral_storage_config": flattenEphemeralStorageConfig(c.EphemeralStorageConfig), +{{- end }} + "local_nvme_ssd_block_config": flattenLocalNvmeSsdBlockConfig(c.LocalNvmeSsdBlockConfig), + "ephemeral_storage_local_ssd_config": flattenEphemeralStorageLocalSsdConfig(c.EphemeralStorageLocalSsdConfig), + "gcfs_config": flattenGcfsConfig(c.GcfsConfig), + "gvnic": flattenGvnic(c.Gvnic), + "reservation_affinity": flattenGKEReservationAffinity(c.ReservationAffinity), + "service_account": c.ServiceAccount, + "metadata": c.Metadata, + "image_type": c.ImageType, + "labels": c.Labels, + "resource_labels": c.ResourceLabels, + "tags": c.Tags, + "preemptible": c.Preemptible, + "secondary_boot_disks": flattenSecondaryBootDisks(c.SecondaryBootDisks), + "spot": c.Spot, + "min_cpu_platform": c.MinCpuPlatform, + "shielded_instance_config": flattenShieldedInstanceConfig(c.ShieldedInstanceConfig), + "taint": flattenTaints(c.Taints, oldTaints), + "effective_taints": flattenEffectiveTaints(c.Taints), + "workload_metadata_config": flattenWorkloadMetadataConfig(c.WorkloadMetadataConfig), +{{- if ne $.TargetVersionName "ga" }} + "sandbox_config": flattenSandboxConfig(c.SandboxConfig), + "host_maintenance_policy": flattenHostMaintenancePolicy(c.HostMaintenancePolicy), +{{- end }} + "confidential_nodes": flattenConfidentialNodes(c.ConfidentialNodes), + "boot_disk_kms_key": c.BootDiskKmsKey, + "kubelet_config": flattenKubeletConfig(c.KubeletConfig), + "linux_node_config": flattenLinuxNodeConfig(c.LinuxNodeConfig), + "node_group": c.NodeGroup, + "advanced_machine_features": flattenAdvancedMachineFeaturesConfig(c.AdvancedMachineFeatures), + "sole_tenant_config": flattenSoleTenantConfig(c.SoleTenantConfig), + "fast_socket": flattenFastSocket(c.FastSocket), + "resource_manager_tags": flattenResourceManagerTags(c.ResourceManagerTags), + "enable_confidential_storage": c.EnableConfidentialStorage, + }) + + if len(c.OauthScopes) > 0 { + config[0]["oauth_scopes"] = schema.NewSet(tpgresource.StringScopeHashcode, tpgresource.ConvertStringArrToInterface(c.OauthScopes)) + } + + return config +} + +func flattenResourceManagerTags(c *container.ResourceManagerTags) map[string]interface{} { + rmt := make(map[string]interface{}) + + if c != nil { + for k, v := range c.Tags { + rmt[k] = v + } + } + + return rmt +} + +func flattenAdvancedMachineFeaturesConfig(c *container.AdvancedMachineFeatures) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "threads_per_core": c.ThreadsPerCore, + "enable_nested_virtualization": c.EnableNestedVirtualization, + }) + } + return result +} + +func flattenContainerGuestAccelerators(c []*container.AcceleratorConfig) []map[string]interface{} { + result := []map[string]interface{}{} + for _, accel := range c { + accelerator := map[string]interface{}{ + "count": accel.AcceleratorCount, + "type": accel.AcceleratorType, + "gpu_partition_size": accel.GpuPartitionSize, + } + if accel.GpuDriverInstallationConfig != nil { + accelerator["gpu_driver_installation_config"] = []map[string]interface{}{ + { + "gpu_driver_version": accel.GpuDriverInstallationConfig.GpuDriverVersion, + }, + } + } + if accel.GpuSharingConfig != nil { + accelerator["gpu_sharing_config"] = []map[string]interface{}{ + { + "gpu_sharing_strategy": accel.GpuSharingConfig.GpuSharingStrategy, + "max_shared_clients_per_gpu": accel.GpuSharingConfig.MaxSharedClientsPerGpu, + }, + } + } + result = append(result, accelerator) + } + return result +} + +func flattenShieldedInstanceConfig(c *container.ShieldedInstanceConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enable_secure_boot": c.EnableSecureBoot, + "enable_integrity_monitoring": c.EnableIntegrityMonitoring, + }) + } + return result +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenEphemeralStorageConfig(c *container.EphemeralStorageConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "local_ssd_count": c.LocalSsdCount, + }) + } + return result +} +{{- end }} + +func flattenLocalNvmeSsdBlockConfig(c *container.LocalNvmeSsdBlockConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "local_ssd_count": c.LocalSsdCount, + }) + } + return result +} + +func flattenEphemeralStorageLocalSsdConfig(c *container.EphemeralStorageLocalSsdConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "local_ssd_count": c.LocalSsdCount, + }) + } + return result +} + +func flattenSecondaryBootDisks(c []*container.SecondaryBootDisk) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + for _, disk := range c { + secondaryBootDisk := map[string]interface{}{ + "disk_image": disk.DiskImage, + "mode": disk.Mode, + } + result = append(result, secondaryBootDisk) + } + } + return result +} + +func flattenLoggingVariant(c *container.NodePoolLoggingConfig) string { + variant := "DEFAULT" + if c != nil && c.VariantConfig != nil && c.VariantConfig.Variant != "" { + variant = c.VariantConfig.Variant + } + return variant +} + +func flattenGcfsConfig(c *container.GcfsConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + +func flattenGvnic(c *container.VirtualNIC) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + +func flattenGKEReservationAffinity(c *container.ReservationAffinity) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "consume_reservation_type": c.ConsumeReservationType, + "key": c.Key, + "values": c.Values, + }) + } + return result +} + +// flattenTaints records the set of taints already present in state. +func flattenTaints(c []*container.NodeTaint, oldTaints []interface{}) []map[string]interface{} { + taintKeys := map[string]struct{}{} + for _, raw := range oldTaints { + data := raw.(map[string]interface{}) + taintKey := data["key"].(string) + taintKeys[taintKey] = struct{}{} + } + + result := []map[string]interface{}{} + for _, taint := range c { + if _, ok := taintKeys[taint.Key]; ok { + result = append(result, map[string]interface{}{ + "key": taint.Key, + "value": taint.Value, + "effect": taint.Effect, + }) + } + } + + return result +} + +// flattenEffectiveTaints records the complete set of taints returned from GKE. +func flattenEffectiveTaints(c []*container.NodeTaint) []map[string]interface{} { + result := []map[string]interface{}{} + for _, taint := range c { + result = append(result, map[string]interface{}{ + "key": taint.Key, + "value": taint.Value, + "effect": taint.Effect, + }) + } + + return result +} + +func flattenWorkloadMetadataConfig(c *container.WorkloadMetadataConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "mode": c.Mode, + }) + } + return result +} +{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }} +func flattenSandboxConfig(c *container.SandboxConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "sandbox_type": c.SandboxType, + }) + } + return result +} + +func containerNodePoolLabelsSuppress(k, old, new string, d *schema.ResourceData) bool { + // Node configs are embedded into multiple resources (container cluster and + // container node pool) so we determine the node config key dynamically. + idx := strings.Index(k, ".labels.") + if idx < 0 { + return false + } + + root := k[:idx] + + // Right now, GKE only applies its own out-of-band labels when you enable + // Sandbox. We only need to perform diff suppression in this case; + // otherwise, the default Terraform behavior is fine. + o, n := d.GetChange(root + ".sandbox_config.0.sandbox_type") + if o == nil || n == nil { + return false + } + + // Pull the entire changeset as a list rather than trying to deal with each + // element individually. + o, n = d.GetChange(root + ".labels") + if o == nil || n == nil { + return false + } + + labels := n.(map[string]interface{}) + + // Remove all current labels, skipping GKE-managed ones if not present in + // the new configuration. + for key, value := range o.(map[string]interface{}) { + if nv, ok := labels[key]; ok && nv == value { + delete(labels, key) + } else if !strings.HasPrefix(key, "sandbox.gke.io/") { + // User-provided label removed in new configuration. + return false + } + } + + // If, at this point, the map still has elements, the new configuration + // added an additional taint. + if len(labels) > 0 { + return false + } + + return true +} +{{- end }} + +func flattenKubeletConfig(c *container.NodeKubeletConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "cpu_cfs_quota": c.CpuCfsQuota, + "cpu_cfs_quota_period": c.CpuCfsQuotaPeriod, + "cpu_manager_policy": c.CpuManagerPolicy, + "pod_pids_limit": c.PodPidsLimit, + }) + } + return result +} + +func flattenLinuxNodeConfig(c *container.LinuxNodeConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "sysctls": c.Sysctls, + "cgroup_mode": c.CgroupMode, + }) + } + return result +} + +func flattenContainerdConfig(c *container.ContainerdConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c == nil { + return result + } + r := map[string]interface{}{} + if c.PrivateRegistryAccessConfig != nil { + r["private_registry_access_config"] = flattenPrivateRegistryAccessConfig(c.PrivateRegistryAccessConfig) + } + return append(result, r) +} + +func flattenPrivateRegistryAccessConfig(c *container.PrivateRegistryAccessConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c == nil { + return result + } + r := map[string]interface{}{ + "enabled": c.Enabled, + } + if c.CertificateAuthorityDomainConfig != nil { + caConfigs := make([]interface{}, len(c.CertificateAuthorityDomainConfig)) + for i, caCfg := range c.CertificateAuthorityDomainConfig { + caConfigs[i] = flattenCADomainConfig(caCfg) + } + r["certificate_authority_domain_config"] = caConfigs + } + return append(result, r) +} + +// func flattenCADomainConfig(c *container.CertificateAuthorityDomainConfig) []map[string]interface{} { +// result := []map[string]interface{}{} +// if c == nil { +// return result +// } +// r := map[string]interface{}{ +// "fqdns": c.Fqdns, +// } +// if c.GcpSecretManagerCertificateConfig != nil { +// r["gcp_secret_manager_certificate_config"] = flattenGCPSecretManagerCertificateConfig(c.GcpSecretManagerCertificateConfig) +// } +// return append(result, r) +// } + +func flattenCADomainConfig(c *container.CertificateAuthorityDomainConfig) map[string]interface{} { + if c == nil { + return nil + } + r := map[string]interface{}{ + "fqdns": c.Fqdns, + } + if c.GcpSecretManagerCertificateConfig != nil { + r["gcp_secret_manager_certificate_config"] = flattenGCPSecretManagerCertificateConfig(c.GcpSecretManagerCertificateConfig) + } + return r +} + +func flattenGCPSecretManagerCertificateConfig(c *container.GCPSecretManagerCertificateConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c == nil { + return result + } + r := map[string]interface{}{ + "secret_uri": c.SecretUri, + } + return append(result, r) +} + +func flattenConfidentialNodes(c *container.ConfidentialNodes) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + +func flattenSoleTenantConfig(c *container.SoleTenantConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c == nil { + return result + } + affinities := []map[string]interface{}{} + for _, affinity := range c.NodeAffinities { + affinities = append(affinities, map[string]interface{}{ + "key": affinity.Key, + "operator": affinity.Operator, + "values": affinity.Values, + }) + } + return append(result, map[string]interface{}{ + "node_affinity": affinities, + }) +} + +func flattenFastSocket(c *container.FastSocket) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + }) + } + return result +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenHostMaintenancePolicy(c *container.HostMaintenancePolicy) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "maintenance_interval": c.MaintenanceInterval, + }) + } + + return result +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl new file mode 100644 index 000000000000..8abea04b38ca --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl @@ -0,0 +1,6805 @@ +package container + +import ( + "context" + "fmt" + "log" + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-version" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/container/v1" +{{- else }} + container "google.golang.org/api/container/v1beta1" +{{- end }} +) + +var ( + instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", verify.ProjectRegex)) + + masterAuthorizedNetworksConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_blocks": { + Type: schema.TypeSet, + // This should be kept Optional. Expressing the + // parent with no entries and omitting the + // parent entirely are semantically different. + Optional: true, + Elem: cidrBlockConfig, + Description: `External networks that can access the Kubernetes cluster master through HTTPS.`, + }, + "gcp_public_cidrs_access_enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether Kubernetes master is accessible via Google Compute Engine Public IPs.`, + }, + }, + } + cidrBlockConfig = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cidr_block": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.IsCIDRNetwork(0, 32), + Description: `External network that can access Kubernetes master through HTTPS. Must be specified in CIDR notation.`, + }, + "display_name": { + Type: schema.TypeString, + Optional: true, + Description: `Field for users to identify CIDR blocks.`, + }, + }, + } + + ipAllocationCidrBlockFields = []string{"ip_allocation_policy.0.cluster_ipv4_cidr_block", "ip_allocation_policy.0.services_ipv4_cidr_block"} + ipAllocationRangeFields = []string{"ip_allocation_policy.0.cluster_secondary_range_name", "ip_allocation_policy.0.services_secondary_range_name"} + + addonsConfigKeys = []string{ + "addons_config.0.http_load_balancing", + "addons_config.0.horizontal_pod_autoscaling", + "addons_config.0.network_policy_config", + "addons_config.0.cloudrun_config", + "addons_config.0.gcp_filestore_csi_driver_config", + "addons_config.0.dns_cache_config", + "addons_config.0.gce_persistent_disk_csi_driver_config", + "addons_config.0.gke_backup_agent_config", + "addons_config.0.config_connector_config", + "addons_config.0.gcs_fuse_csi_driver_config", + "addons_config.0.stateful_ha_config", + {{- if ne $.TargetVersionName "ga" }} + "addons_config.0.istio_config", + "addons_config.0.kalm_config", + {{- end }} + } + + privateClusterConfigKeys = []string{ + "private_cluster_config.0.enable_private_endpoint", + "private_cluster_config.0.enable_private_nodes", + "private_cluster_config.0.master_ipv4_cidr_block", + "private_cluster_config.0.private_endpoint_subnetwork", + "private_cluster_config.0.master_global_access_config", + } + + forceNewClusterNodeConfigFields = []string{ + "labels", + "workload_metadata_config", + "resource_manager_tags", + } + + suppressDiffForAutopilot = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool { + if v, _ := d.Get("enable_autopilot").(bool); v { + return true + } + return false + }) + + suppressDiffForPreRegisteredFleet = schema.SchemaDiffSuppressFunc(func(k, oldValue, newValue string, d *schema.ResourceData) bool { + if v, _ := d.Get("fleet.0.pre_registered").(bool); v { + log.Printf("[DEBUG] fleet suppress pre_registered: %v\n", v) + return true + } + return false + }) +) + +// This uses the node pool nodeConfig schema but sets +// node-pool-only updatable fields to ForceNew +func clusterSchemaNodeConfig() *schema.Schema { + nodeConfigSch := schemaNodeConfig() + schemaMap := nodeConfigSch.Elem.(*schema.Resource).Schema + for _, k := range forceNewClusterNodeConfigFields { + if sch, ok := schemaMap[k]; ok { + tpgresource.ChangeFieldSchemaToForceNew(sch) + } + } + return nodeConfigSch +} + +// Defines default nodel pool settings for the entire cluster. These settings are +// overridden if specified on the specific NodePool object. +func clusterSchemaNodePoolDefaults() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `The default nodel pool settings for the entire cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_config_defaults": { + Type: schema.TypeList, + Optional: true, + Description: `Subset of NodeConfig message that has defaults.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containerd_config": schemaContainerdConfig(), +{{- if ne $.TargetVersionName "ga" }} + "gcfs_config": schemaGcfsConfig(false), +{{- end }} + "logging_variant": schemaLoggingVariant(), + }, + }, + }, + }, + }, + } +} + +func rfc5545RecurrenceDiffSuppress(k, o, n string, d *schema.ResourceData) bool { + // This diff gets applied in the cloud console if you specify + // "FREQ=DAILY" in your config and add a maintenance exclusion. + if o == "FREQ=WEEKLY;BYDAY=MO,TU,WE,TH,FR,SA,SU" && n == "FREQ=DAILY" { + return true + } + // Writing a full diff suppress for identical recurrences would be + // complex and error-prone - it's not a big problem if a user + // changes the recurrence and it's textually difference but semantically + // identical. + return false +} + +// Has the field (e.g. enable_l4_ilb_subsetting and enable_fqdn_network_policy) been enabled before? +func isBeenEnabled(_ context.Context, old, new, _ interface{}) bool { + if old == nil || new == nil { + return false + } + + // if subsetting is enabled, but is not now + if old.(bool) && !new.(bool) { + return true + } + + return false +} + +func ResourceContainerCluster() *schema.Resource { + return &schema.Resource{ + UseJSONNumber: true, + Create: resourceContainerClusterCreate, + Read: resourceContainerClusterRead, + Update: resourceContainerClusterUpdate, + Delete: resourceContainerClusterDelete, + + CustomizeDiff: customdiff.All( + resourceNodeConfigEmptyGuestAccelerator, + customdiff.ForceNewIfChange("enable_l4_ilb_subsetting", isBeenEnabled), +{{- if ne $.TargetVersionName "ga" }} + customdiff.ForceNewIfChange("enable_fqdn_network_policy", isBeenEnabled), +{{- end }} + containerClusterAutopilotCustomizeDiff, + containerClusterNodeVersionRemoveDefaultCustomizeDiff, + containerClusterNetworkPolicyEmptyCustomizeDiff, + containerClusterSurgeSettingsCustomizeDiff, + containerClusterEnableK8sBetaApisCustomizeDiff, + containerClusterNodeVersionCustomizeDiff, + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(40 * time.Minute), + Read: schema.DefaultTimeout(40 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(40 * time.Minute), + }, + + SchemaVersion: 2, + MigrateState: resourceContainerClusterMigrateState, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceContainerClusterResourceV1().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceContainerClusterUpgradeV1, + Version: 1, + }, + }, + + Importer: &schema.ResourceImporter{ + State: resourceContainerClusterStateImporter, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the cluster, unique within the project and location.`, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 40 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 40 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "operation": { + Type: schema.TypeString, + Computed: true, + }, + + "location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The location (region or zone) in which the cluster master will be created, as well as the default node location. If you specify a zone (such as us-central1-a), the cluster will be a zonal cluster with a single cluster master. If you specify a region (such as us-west1), the cluster will be a regional cluster with multiple masters spread across zones in the region, and with default node locations in those zones as well.`, + }, + + "node_locations": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If this is specified for a zonal cluster, omit the cluster's zone.`, + }, + + "deletion_protection": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `When the field is set to true or unset in Terraform state, a terraform apply or terraform destroy that would delete the cluster will fail. When the field is set to false, deleting the cluster is allowed.`, + }, + + "addons_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The configuration for addons supported by GKE.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_load_balancing": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set disabled = true to disable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "horizontal_pod_autoscaling": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Horizontal Pod Autoscaling addon, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. It ensures that a Heapster pod is running in the cluster, which is also used by the Cloud Monitoring service. It is enabled by default; set disabled = true to disable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "network_policy_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a network_policy block, otherwise nothing will happen. It can only be disabled if the nodes already do not have network policies enabled. Defaults to disabled; set disabled = false to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gcp_filestore_csi_driver_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled; set enabled = true to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "cloudrun_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the CloudRun addon. It is disabled by default. Set disabled = false to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + "load_balancer_type": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"LOAD_BALANCER_TYPE_INTERNAL"}, false), + Optional: true, + }, + }, + }, + }, + "dns_cache_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the NodeLocal DNSCache addon. It is disabled by default. Set enabled = true to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gce_persistent_disk_csi_driver_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Set enabled = true to enable. The Compute Engine persistent disk CSI Driver is enabled by default on newly created clusters for the following versions: Linux clusters: GKE version 1.18.10-gke.2100 or later, or 1.19.3-gke.2100 or later.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gke_backup_agent_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Backup for GKE Agent addon. It is disabled by default. Set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gcs_fuse_csi_driver_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the GCS Fuse CSI driver addon, which allows the usage of gcs bucket as volumes. Defaults to disabled; set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "istio_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Istio addon.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + Description: `The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable.`, + }, + "auth": { + Type: schema.TypeString, + Optional: true, + // We can't use a Terraform-level default because it won't be true when the block is disabled: true + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("AUTH_NONE"), + ValidateFunc: validation.StringInSlice([]string{"AUTH_NONE", "AUTH_MUTUAL_TLS"}, false), + Description: `The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS.`, + }, + }, + }, + }, + "kalm_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + {{- end }} + "config_connector_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The of the Config Connector addon.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "stateful_ha_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Stateful HA addon, which provides automatic configurable failover for stateful applications. Defaults to disabled; set enabled = true to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "cluster_autoscaling": { + Type: schema.TypeList, + MaxItems: 1, + // This field is Optional + Computed because we automatically set the + // enabled value to false if the block is not returned in API responses. + Optional: true, + Computed: true, + Description: `Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs of the cluster's workload. See the guide to using Node Auto-Provisioning for more details.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ConflictsWith: []string{"enable_autopilot"}, + Description: `Whether node auto-provisioning is enabled. Resource limits for cpu and memory must be defined to enable node auto-provisioning.`, + }, + "resource_limits": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"enable_autopilot"}, + DiffSuppressFunc: suppressDiffForAutopilot, + Description: `Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the resource. For example, cpu and memory. See the guide to using Node Auto-Provisioning for a list of types.`, + }, + "minimum": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum amount of the resource in the cluster.`, + }, + "maximum": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum amount of the resource in the cluster.`, + }, + }, + }, + }, + "auto_provisioning_defaults": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Contains defaults for a node pool created by NAP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oauth_scopes": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: containerClusterAddedScopesSuppress, + Description: `Scopes that are used by NAP when creating node pools.`, + }, + "service_account": { + Type: schema.TypeString, + Optional: true, + Default: "default", + Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.IntAtLeast(10), + }, + "disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "pd-standard", + Description: `Type of the disk attached to each node.`, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", "pd-balanced"}, false), + }, + "image_type": { + Type: schema.TypeString, + Optional: true, + Default: "COS_CONTAINERD", + Description: `The default image type used by NAP once a new node pool is being created.`, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.StringInSlice([]string{"COS_CONTAINERD", "COS", "UBUNTU_CONTAINERD", "UBUNTU"}, false), + }, + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("automatic"), + Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell.`, + }, + "boot_disk_kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, + }, + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + Description: `Shielded Instance options.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Defines whether the instance has Secure Boot enabled.`, + AtLeastOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_secure_boot", + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_integrity_monitoring", + }, + }, + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Defines whether the instance has integrity monitoring enabled.`, + AtLeastOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_secure_boot", + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_integrity_monitoring", + }, + }, + }, + }, + }, + "management": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `NodeManagement configuration for this NodePool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_upgrade": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.`, + }, + "auto_repair": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered.`, + }, + "upgrade_options": { + Type: schema.TypeList, + Computed: true, + Description: `Specifies the Auto Upgrade knobs for the node pool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_upgrade_start_time": { + Type: schema.TypeString, + Computed: true, + Description: `This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: `This field is set when upgrades are about to commence with the description of the upgrade.`, + }, + }, + }, + }, + }, + }, + }, + "upgrade_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the upgrade settings for NAP created node pools`, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_surge": { + Type: schema.TypeInt, + Optional: true, + Description: `The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process.`, + }, + "max_unavailable": { + Type: schema.TypeInt, + Optional: true, + Description: `The maximum number of nodes that can be simultaneously unavailable during the upgrade process.`, + }, + "strategy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Update strategy of the node pool.`, + ValidateFunc: validation.StringInSlice([]string{"NODE_POOL_UPDATE_STRATEGY_UNSPECIFIED", "BLUE_GREEN", "SURGE"}, false), + }, + "blue_green_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Settings for blue-green upgrade strategy.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_pool_soak_duration": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "standard_rollout_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Standard policy for the blue-green upgrade.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_percentage": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + ValidateFunc: validation.FloatBetween(0.0, 1.0), + ExactlyOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_percentage", + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_node_count", + }, + Description: `Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0].`, + }, + "batch_node_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ExactlyOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_percentage", + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_node_count", + }, + Description: `Number of blue nodes to drain in a batch.`, + }, + "batch_soak_duration": { + Type: schema.TypeString, + Optional: true, + Default: "0s", + Description: `Soak time after each batch gets drained. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "autoscaling_profile": { + Type: schema.TypeString, + Default: "BALANCED", + Optional: true, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.StringInSlice([]string{"BALANCED", "OPTIMIZE_UTILIZATION"}, false), + Description: `Configuration options for the Autoscaling profile feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability when deciding to remove nodes from a cluster. Can be BALANCED or OPTIMIZE_UTILIZATION. Defaults to BALANCED.`, + }, + }, + }, + }, + + "cluster_ipv4_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.OrEmpty(verify.ValidateRFC1918Network(8, 32)), + ConflictsWith: []string{"ip_allocation_policy"}, + Description: `The IP address range of the Kubernetes pods in this cluster in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8. This field will only work for routes-based clusters, where ip_allocation_policy is not defined.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: ` Description of the cluster.`, + }, + + "binary_authorization": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: BinaryAuthorizationDiffSuppress, + MaxItems: 1, + Description: "Configuration options for the Binary Authorization feature.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Deprecated: "Deprecated in favor of evaluation_mode.", + Description: "Enable Binary Authorization for this cluster.", + ConflictsWith: []string{"enable_autopilot", "binary_authorization.0.evaluation_mode"}, + }, + "evaluation_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"}, false), + Description: "Mode of operation for Binary Authorization policy evaluation.", + ConflictsWith: []string{"binary_authorization.0.enabled"}, + }, + }, + }, + }, + + "enable_kubernetes_alpha": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Whether to enable Kubernetes Alpha features for this cluster. Note that when this option is enabled, the cluster cannot be upgraded and will be automatically deleted after 30 days.`, + }, + + "enable_k8s_beta_apis": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Configuration for Kubernetes Beta APIs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled_apis": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Enabled Kubernetes Beta APIs.`, + }, + }, + }, + }, + + "enable_tpu": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to enable Cloud TPU resources in this cluster.`, +{{- if ne $.TargetVersionName "ga" }} + ConflictsWith: []string{"tpu_config"}, + Computed: true, + // TODO: deprecate when tpu_config is correctly returned by the API + // Deprecated: "Deprecated in favor of tpu_config", +{{- end }} + }, + +{{ if ne $.TargetVersionName `ga` -}} + "tpu_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `TPU configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether Cloud TPU integration is enabled or not`, + }, + "ipv4_cidr_block": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 CIDR block reserved for Cloud TPU in the VPC.`, + }, + "use_service_networking": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to use service networking for Cloud TPU or not`, + }, + }, + }, + }, +{{- end }} + + "enable_legacy_abac": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether the ABAC authorizer is enabled for this cluster. When enabled, identities in the system, including service accounts, nodes, and controllers, will have statically granted permissions beyond those provided by the RBAC configuration or IAM. Defaults to false.`, + }, + + "enable_shielded_nodes": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Enable Shielded Nodes features on all nodes in this cluster. Defaults to true.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "enable_autopilot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enable Autopilot for this cluster.`, + // ConflictsWith: many fields, see https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison. The conflict is only set one-way, on other fields w/ this field. + }, + + "allow_net_admin": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable NET_ADMIN for this cluster.`, + }, + + "authenticator_groups_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for the Google Groups for GKE feature.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_group": { + Type: schema.TypeString, + Required: true, + Description: `The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.`, + }, + }, + }, + }, + + "initial_node_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of nodes to create in this cluster's default node pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Must be set if node_pool is not set. If you're using google_container_node_pool objects with no default node pool, you'll need to set this to a value of at least 1, alongside setting remove_default_node_pool to true.`, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Logging configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Required: true, + Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), + }, + }, + }, + }, + }, + + "logging_service": { + Type: schema.TypeString, + Optional: true, + Computed: true, +{{- if ne $.TargetVersionName "ga" }} + ConflictsWith: []string{"cluster_telemetry"}, +{{- end }} + ValidateFunc: validation.StringInSlice([]string{"logging.googleapis.com", "logging.googleapis.com/kubernetes", "none"}, false), + Description: `The logging service that the cluster should write logs to. Available options include logging.googleapis.com(Legacy Stackdriver), logging.googleapis.com/kubernetes(Stackdriver Kubernetes Engine Logging), and none. Defaults to logging.googleapis.com/kubernetes.`, + }, + + "maintenance_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The maintenance policy to use for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "daily_maintenance_window": { + Type: schema.TypeList, + Optional: true, + ExactlyOneOf: []string{ + "maintenance_policy.0.daily_maintenance_window", + "maintenance_policy.0.recurring_window", + }, + MaxItems: 1, + Description: `Time window specified for daily maintenance operations. Specify start_time in RFC3339 format "HH:MM”, where HH : [00-23] and MM : [00-59] GMT.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Time, + DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, + }, + "duration": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "recurring_window": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{ + "maintenance_policy.0.daily_maintenance_window", + "maintenance_policy.0.recurring_window", + }, + Description: `Time window for recurring maintenance operations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "end_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "recurrence": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: rfc5545RecurrenceDiffSuppress, + }, + }, + }, + }, + "maintenance_exclusion": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 20, + Description: `Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exclusion_name": { + Type: schema.TypeString, + Required: true, + }, + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "end_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "exclusion_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Maintenance exclusion related options.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scope": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"NO_UPGRADES", "NO_MINOR_UPGRADES", "NO_MINOR_OR_NODE_UPGRADES"}, false), + Description: `The scope of automatic upgrades to restrict in the exclusion window.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "protect_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Enable/Disable Protect API features for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "workload_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `WorkloadConfig defines which actions are enabled for a cluster's workload configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_mode": { + Type: schema.TypeString, + Required: true, + Description: `Sets which mode of auditing should be used for the cluster's workloads. Accepted values are DISABLED, BASIC.`, + }, + }, + }, + AtLeastOneOf: []string{ + "protect_config.0.workload_config", + "protect_config.0.workload_vulnerability_mode", + }, + }, + "workload_vulnerability_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Sets which mode to use for Protect workload vulnerability scanning feature. Accepted values are DISABLED, BASIC.`, + AtLeastOneOf: []string{ + "protect_config.0.workload_config", + "protect_config.0.workload_vulnerability_mode", + }, + }, + }, + }, + }, +{{- end }} + + "security_posture_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Description: `Defines the config needed to enable/disable features for the Security Posture API`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "ENTERPRISE", "MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED, BASIC, and ENTERPRISE.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("MODE_UNSPECIFIED"), + }, + "vulnerability_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"VULNERABILITY_DISABLED", "VULNERABILITY_BASIC", "VULNERABILITY_ENTERPRISE", "VULNERABILITY_MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Available options include VULNERABILITY_DISABLED, VULNERABILITY_BASIC and VULNERABILITY_ENTERPRISE.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("VULNERABILITY_MODE_UNSPECIFIED"), + }, + }, + }, + }, + "monitoring_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Monitoring configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Optional: true, + Computed: true, +{{- if eq $.TargetVersionName "ga" }} + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, KUBELET and CADVISOR.`, +{{- else }} + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS, KUBELET and CADVISOR.`, +{{- end }} + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "managed_prometheus": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for Google Cloud Managed Services for Prometheus.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not the managed collection is enabled.`, + }, + }, + }, + }, + "advanced_datapath_observability_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration of Advanced Datapath Observability features.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_metrics": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not the advanced datapath metrics are enabled.`, + }, + "enable_relay": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not Relay is enabled.`, + Default: false, + ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.relay_mode"}, + }, + "relay_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Deprecated in favor of enable_relay field. Remove this attribute's configuration as this field will be removed in the next major release and enable_relay will become a required field.", + Description: `Mode used to make Relay available.`, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), + ConflictsWith: []string{"monitoring_config.0.advanced_datapath_observability_config.0.enable_relay"}, + }, + }, + }, + }, + }, + }, + }, + + "notification_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The notification config for sending cluster upgrade notifications`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `Notification config for Cloud Pub/Sub`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not the notification config is enabled`, + }, + "topic": { + Type: schema.TypeString, + Optional: true, + Description: `The pubsub topic to push upgrade notifications to. Must be in the same project as the cluster. Must be in the format: projects/{project}/topics/{topic}.`, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Allows filtering to one or more specific event types. If event types are present, those and only those event types will be transmitted to the cluster. Other types will be skipped. If no filter is specified, or no event types are present, all event types will be sent`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeList, + Required: true, + Description: `Can be used to filter what notifications are sent. Valid values include include UPGRADE_AVAILABLE_EVENT, UPGRADE_EVENT and SECURITY_BULLETIN_EVENT`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"UPGRADE_AVAILABLE_EVENT", "UPGRADE_EVENT", "SECURITY_BULLETIN_EVENT"}, false), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + "confidential_nodes": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after cluster creation without deleting and recreating the entire cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether Confidential Nodes feature is enabled for all nodes in this cluster.`, + }, + }, + }, + }, + + "master_auth": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Description: `The authentication information for accessing the Kubernetes master. Some values in this block are only returned by the API if your service account has permission to get credentials for your GKE cluster. If you see an unexpected diff unsetting your client cert, ensure you have the container.clusters.getCredentials permission.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_certificate_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Description: `Whether client certificate authorization is enabled for this cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issue_client_certificate": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether client certificate authorization is enabled for this cluster.`, + }, + }, + }, + }, + + "client_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.`, + }, + + "client_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: `Base64 encoded private key used by clients to authenticate to the cluster endpoint.`, + }, + + "cluster_ca_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `Base64 encoded public certificate that is the root of trust for the cluster.`, + }, + }, + }, + }, + + "master_authorized_networks_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: masterAuthorizedNetworksConfig, + Description: `The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists).`, + }, + + "min_master_version": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum version of the master. GKE will auto-update the master to new versions, so this does not guarantee the current master version--use the read-only master_version field to obtain that. If unset, the cluster's version will be set by GKE to the version of the most recent official release (which is not necessarily the latest version).`, + }, + + "monitoring_service": { + Type: schema.TypeString, + Optional: true, + Computed: true, +{{- if ne $.TargetVersionName "ga" }} + ConflictsWith: []string{"cluster_telemetry"}, +{{- end }} + ValidateFunc: validation.StringInSlice([]string{"monitoring.googleapis.com", "monitoring.googleapis.com/kubernetes", "none"}, false), + Description: `The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com(Legacy Stackdriver), monitoring.googleapis.com/kubernetes(Stackdriver Kubernetes Engine Monitoring), and none. Defaults to monitoring.googleapis.com/kubernetes.`, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the Google Compute Engine network to which the cluster is connected. For Shared VPC, set this to the self link of the shared network.`, + }, + + "network_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Configuration options for the NetworkPolicy feature.`, + ConflictsWith: []string{"enable_autopilot"}, + DiffSuppressFunc: containerClusterNetworkPolicyDiffSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether network policy is enabled on the cluster.`, + }, + "provider": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "CALICO"}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("PROVIDER_UNSPECIFIED"), + Description: `The selected network policy provider.`, + }, + }, + }, + }, + + "node_config": clusterSchemaNodeConfig(), + + "node_pool": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, // TODO: Add ability to add/remove nodePools + Elem: &schema.Resource{ + Schema: schemaNodePool, + }, + Description: `List of node pools associated with this cluster. See google_container_node_pool for schema. Warning: node pools defined inside a cluster can't be changed (or added/removed) after cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability to say "these are the only node pools associated with this cluster", use the google_container_node_pool resource instead of this property.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "node_pool_defaults": clusterSchemaNodePoolDefaults(), + + "node_pool_auto_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Collection of Compute Engine network tags that can be applied to a node's underlying VM instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `List of network tags applied to auto-provisioned node pools.`, + }, + }, + }, + }, + "resource_manager_tags": { + Type: schema.TypeMap, + Optional: true, + Description: `A map of resource manager tags. Resource manager tag keys and values have the same definition as resource manager tags. Keys must be in the format tagKeys/{tag_key_id}, and values are in the format tagValues/456. The field is ignored (both PUT & PATCH) when empty.`, + }, + }, + }, + }, + + "node_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Kubernetes version on the nodes. Must either be unset or set to the same value as min_master_version on create. Defaults to the default version set by GKE which is not necessarily the latest version. This only affects nodes in the default node pool. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way. To update nodes in other node pools, use the version attribute on the node pool.`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "pod_security_policy_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the PodSecurityPolicy feature.`, + MaxItems: 1, + DiffSuppressFunc: podSecurityPolicyCfgSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created.`, + }, + }, + }, + }, +{{- end }} +{{- if ne $.TargetVersionName "ga" }} + "secret_manager_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the Secret Manager feature.`, + MaxItems: 1, + DiffSuppressFunc: SecretManagerCfgSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enable the Secret manager csi component.`, + }, + }, + }, + }, +{{- end }} + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the Google Compute Engine subnetwork in which the cluster's instances are launched.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL for the resource.`, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address of this cluster's Kubernetes master.`, + }, + + "master_version": { + Type: schema.TypeString, + Computed: true, + Description: `The current version of the master in the cluster. This may be different than the min_master_version set in the config if the master has been updated by GKE.`, + }, + + "services_ipv4_cidr": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address range of the Kubernetes services in this cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the container CIDR.`, + }, + + "ip_allocation_policy": { + Type: schema.TypeList, + MaxItems: 1, + ForceNew: true, + Computed: true, + Optional: true, + ConflictsWith: []string{"cluster_ipv4_cidr"}, + Description: `Configuration of cluster IP allocation for VPC-native clusters. Adding this block enables IP aliasing, making the cluster VPC-native instead of routes-based.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // GKE creates/deletes secondary ranges in VPC + "cluster_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationRangeFields, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, + Description: `The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, + }, + + "services_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationRangeFields, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, + Description: `The IP address range of the services IPs in this cluster. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, + }, + + // User manages secondary ranges manually + "cluster_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationCidrBlockFields, + Description: `The name of the existing secondary range in the cluster's subnetwork to use for pod IP addresses. Alternatively, cluster_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, + }, + + "services_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationCidrBlockFields, + Description: `The name of the existing secondary range in the cluster's subnetwork to use for service ClusterIPs. Alternatively, services_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "IPV4", + ValidateFunc: validation.StringInSlice([]string{"IPV4", "IPV4_IPV6"}, false), + Description: `The IP Stack type of the cluster. Choose between IPV4 and IPV4_IPV6. Default type is IPV4 Only if not set`, + }, + "pod_cidr_overprovision_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for cluster level pod cidr overprovision. Default is disabled=false.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "additional_pod_ranges_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: `AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_range_names": { + Type: schema.TypeSet, + MinItems: 1, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Name for pod secondary ipv4 range which has the actual range defined ahead.`, + }, + }, + }, + }, + }, + }, + }, + + // Defaults to "VPC_NATIVE" during create only + "networking_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"VPC_NATIVE", "ROUTES"}, false), + Description: `Determines whether alias IPs or routes will be used for pod IPs in the cluster. Defaults to VPC_NATIVE for new clusters.`, + }, + + "remove_default_node_pool": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, deletes the default node pool upon cluster creation. If you're using google_container_node_pool resources with no default node pool, this should be set to true, alongside setting initial_node_count to at least 1.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "private_cluster_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `Configuration for private clusters, clusters with private nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // enable_private_endpoint is orthogonal to private_endpoint_subnetwork. + // User can create a private_cluster_config block without including + // either one of those two fields. Both fields are optional. + // At the same time, we use 'AtLeastOneOf' to prevent an empty block + // like 'private_cluster_config{}' + "enable_private_endpoint": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used.`, + }, + "enable_private_nodes": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, + }, + "master_ipv4_cidr_block": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + ValidateFunc: verify.OrEmpty(validation.IsCIDRNetwork(28, 28)), + Description: `The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning private IP addresses to the cluster master(s) and the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network, and it must be a /28 subnet. See Private Cluster Limitations for more details. This field only applies to private clusters, when enable_private_nodes is true.`, + }, + "peering_name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the peering between this cluster and the Google owned VPC.`, + }, + "private_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The internal IP address of this cluster's master endpoint.`, + }, + "private_endpoint_subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `Subnetwork in cluster's network where master's endpoint will be provisioned.`, + }, + "public_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The external IP address of this cluster's master endpoint.`, + }, + "master_global_access_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + AtLeastOneOf: privateClusterConfigKeys, + Description: "Controls cluster master global access settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the cluster master is accessible globally or not.`, + }, + }, + }, + }, + }, + }, + }, + + "resource_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster.`, + }, + + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint of the set of labels for this cluster.`, + }, + + "default_max_pods_per_node": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The default maximum number of pods per node in this cluster. This doesn't work on "routes-based" clusters, clusters that don't have IP Aliasing enabled.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "vertical_pod_autoscaling": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enables vertical pod autoscaling.`, + }, + }, + }, + }, + "workload_identity_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + // Computed is unsafe to remove- this API may return `"workloadIdentityConfig": {},` or omit the key entirely + // and both will be valid. Note that we don't handle the case where the API returns nothing & the user has defined + // workload_identity_config today. + Computed: true, + Description: `Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "workload_pool": { + Type: schema.TypeString, + Optional: true, + Description: "The workload pool to attach all Kubernetes service accounts to.", + }, + }, + }, + }, + + "identity_service_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Configuration for Identity Service which allows customers to use external identity providers with the K8S API.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable the Identity Service component.", + }, + }, + }, + }, + + "service_external_ips_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `If set, and enabled=true, services with external ips field will not be blocked`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled, services with external ips specified will be allowed.`, + }, + }, + }, + }, + + "mesh_certificates": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `If set, and enable_certificates=true, the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_certificates": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + }, + }, + }, + }, + + "database_encryption": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"ENCRYPTED", "DECRYPTED"}, false), + Description: `ENCRYPTED or DECRYPTED.`, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The key to use to encrypt/decrypt secrets.`, + }, + }, + }, + }, + + "release_channel": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration options for the Release channel feature, which provide more control over automatic upgrades of your GKE clusters. Note that removing this field from your config will not unenroll it. Instead, use the "UNSPECIFIED" channel.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "channel": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), + Description: `The selected release channel. Accepted values are: +* UNSPECIFIED: Not set. +* RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. +* REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. +* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky.`, + }, + }, + }, + }, + + "tpu_ipv4_cidr_block": { + Computed: true, + Type: schema.TypeString, + Description: `The IP address range of the Cloud TPUs in this cluster, in CIDR notation (e.g. 1.2.3.4/29).`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "cluster_telemetry": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Telemetry integration for the cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED","ENABLED","SYSTEM_ONLY"}, false), + Description: `Type of the integration.`, + }, + }, + }, + }, +{{- end }} + + "default_snat_status": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.`, + Elem: &schema.Resource { + Schema: map[string]*schema.Schema { + "disabled": { + Type: schema.TypeBool, + Required: true, + Description: `When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic.`, + }, + }, + }, + }, + + "datapath_provider": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.`, + ValidateFunc: validation.StringInSlice([]string{"DATAPATH_PROVIDER_UNSPECIFIED", "LEGACY_DATAPATH", "ADVANCED_DATAPATH"}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("DATAPATH_PROVIDER_UNSPECIFIED"), + }, + "enable_cilium_clusterwide_network_policy": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether Cilium cluster-wide network policy is enabled on this cluster.`, + Default: false, + }, + "enable_intranode_visibility": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + "enable_l4_ilb_subsetting": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether L4ILB Subsetting is enabled for this cluster.`, + Default: false, + }, +{{- if ne $.TargetVersionName "ga" }} + "enable_multi_networking": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether multi-networking is enabled for this cluster.`, + Default: false, + }, + "enable_fqdn_network_policy": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether FQDN Network Policy is enabled on this cluster.`, + Default: false, + }, +{{- end }} + "private_ipv6_google_access": { + Type: schema.TypeString, + Optional: true, + Description: `The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).`, + Computed: true, + }, + + "cost_management_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Cost management configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether to enable GKE cost allocation. When you enable GKE cost allocation, the cluster name and namespace of your GKE workloads appear in the labels field of the billing export to BigQuery. Defaults to false.`, + }, + }, + }, + }, + + "resource_usage_export_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: `Configuration for the ResourceUsageExportConfig feature.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_network_egress_metering": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created in the cluster to meter network egress traffic.`, + }, + "enable_resource_consumption_metering": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. Defaults to true.`, + }, + "bigquery_destination": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: `Parameters for using BigQuery as the destination of resource usage export.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of a BigQuery Dataset.`, + }, + }, + }, + }, + }, + }, + }, + "dns_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressDiffForAutopilot, + Description: `Configuration for Cloud DNS for Kubernetes Engine.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + {{- if ne $.TargetVersionName "ga" }} + "additive_vpc_scope_dns_domain": { + Type: schema.TypeString, + Description: `Enable additive VPC scope DNS in a GKE cluster.`, + Optional: true, + }, + {{- end }} + "cluster_dns": { + Type: schema.TypeString, + Default: "PROVIDER_UNSPECIFIED", + ValidateFunc: validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "PLATFORM_DEFAULT", "CLOUD_DNS"}, false), + Description: `Which in-cluster DNS provider should be used.`, + Optional: true, + }, + "cluster_dns_scope": { + Type: schema.TypeString, + Default: "DNS_SCOPE_UNSPECIFIED", + ValidateFunc: validation.StringInSlice([]string{"DNS_SCOPE_UNSPECIFIED", "CLUSTER_SCOPE", "VPC_SCOPE"}, false), + Description: `The scope of access to cluster DNS records.`, + Optional: true, + }, + "cluster_dns_domain": { + Type: schema.TypeString, + Description: `The suffix used for all cluster service records.`, + Optional: true, + }, + }, + }, + }, + "gateway_api_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for GKE Gateway API controller.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "channel": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"CHANNEL_DISABLED", "CHANNEL_EXPERIMENTAL", "CHANNEL_STANDARD"}, false), + Description: `The Gateway API release channel to use for Gateway API.`, + }, + }, + }, + }, + "fleet": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Fleet configuration of the cluster.`, + DiffSuppressFunc: suppressDiffForPreRegisteredFleet, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: `The Fleet host project of the cluster.`, + }, + "membership": { + Type: schema.TypeString, + Computed: true, + Description: `Full resource name of the registered fleet membership of the cluster.`, + }, + "pre_registered": { + Type: schema.TypeBool, + Computed: true, + Description: `Whether the cluster has been registered via the fleet API.`, + }, + "membership_id": { + Type: schema.TypeString, + Computed: true, + Description: `Short name of the fleet membership, for example "member-1".`, + }, + "membership_location": { + Type: schema.TypeString, + Computed: true, + Description: `Location of the fleet membership, for example "us-central1".`, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "workload_alts_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for direct-path (via ALTS) with workload identity.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_alts": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the alts handshaker should be enabled or not for direct-path. Requires Workload Identity (workloadPool must be non-empty).`, + }, + }, + }, + }, + {{- end }} + }, + } +} + +// Setting a guest accelerator block to count=0 is the equivalent to omitting the block: it won't get +// sent to the API and it won't be stored in state. This diffFunc will try to compare the old + new state +// by only comparing the blocks with a positive count and ignoring those with count=0 +// +// One quirk with this approach is that configs with mixed count=0 and count>0 accelerator blocks will +// show a confusing diff if one of there are config changes that result in a legitimate diff as the count=0 +// blocks will not be in state. +// +// This could also be modelled by setting `guest_accelerator = []` in the config. However since the +// previous syntax requires that schema.SchemaConfigModeAttr is set on the field it is advisable that +// we have a work around for removing guest accelerators. Also Terraform 0.11 cannot use dynamic blocks +// so this isn't a solution for module authors who want to dynamically omit guest accelerators +// See https://github.com/hashicorp/terraform-provider-google/issues/3786 +func resourceNodeConfigEmptyGuestAccelerator(_ context.Context, diff *schema.ResourceDiff, meta interface{}) error { + old, new := diff.GetChange("node_config.0.guest_accelerator") + oList := old.([]interface{}) + nList := new.([]interface{}) + + if len(nList) == len(oList) || len(nList) == 0 { + return nil + } + var hasAcceleratorWithEmptyCount bool + // the list of blocks in a desired state with count=0 accelerator blocks in it + // will be longer than the current state. + // this index tracks the location of positive count accelerator blocks + index := 0 + for i, item := range nList { + accel := item.(map[string]interface{}) + if accel["count"].(int) == 0 { + hasAcceleratorWithEmptyCount = true + // Ignore any 'empty' accelerators because they aren't sent to the API + continue + } + if index >= len(oList) { + // Return early if there are more positive count accelerator blocks in the desired state + // than the current state since a difference in 'legit' blocks is a valid diff. + // This will prevent array index overruns + return nil + } + if !reflect.DeepEqual(nList[i], oList[index]) { + return nil + } + index += 1 + } + + if hasAcceleratorWithEmptyCount && index == len(oList) { + // If the number of count>0 blocks match, there are count=0 blocks present and we + // haven't already returned due to a legitimate diff + err := diff.Clear("node_config.0.guest_accelerator") + if err != nil { + return err + } + } + + return nil +} + +func resourceContainerClusterCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + + // Default to VPC_NATIVE mode during initial creation + // This solution (a conditional default) should not be considered to set a precedent on its own. + // If you're considering a similar approach on a different resource, strongly weigh making the field required. + // GKE tends to require exceptional handling in general- and this default was a breaking change in their API + // that was compounded on by numerous product developments afterwards. We have not seen a similar case + // since, after several years. + networkingMode := d.Get("networking_mode").(string) + clusterIpv4Cidr := d.Get("cluster_ipv4_cidr").(string) + if networkingMode == "" && clusterIpv4Cidr == "" { + err := d.Set("networking_mode", "VPC_NATIVE") + if err != nil { + return fmt.Errorf("Error setting networking mode during creation: %s", err) + } + } + + ipAllocationBlock, err := expandIPAllocationPolicy(d.Get("ip_allocation_policy"), d.Get("networking_mode").(string), d.Get("enable_autopilot").(bool)) + if err != nil { + return err + } + + var workloadPolicyConfig *container.WorkloadPolicyConfig + if allowed := d.Get("allow_net_admin").(bool); allowed { + workloadPolicyConfig = &container.WorkloadPolicyConfig{ + AllowNetAdmin: allowed, + } + } + + cluster := &container.Cluster{ + Name: clusterName, + InitialNodeCount: int64(d.Get("initial_node_count").(int)), + MaintenancePolicy: expandMaintenancePolicy(d, meta), + MasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(d.Get("master_authorized_networks_config"), d), + InitialClusterVersion: d.Get("min_master_version").(string), + ClusterIpv4Cidr: d.Get("cluster_ipv4_cidr").(string), + Description: d.Get("description").(string), + LegacyAbac: &container.LegacyAbac{ + Enabled: d.Get("enable_legacy_abac").(bool), + ForceSendFields: []string{"Enabled"}, + }, + LoggingService: d.Get("logging_service").(string), + MonitoringService: d.Get("monitoring_service").(string), + NetworkPolicy: expandNetworkPolicy(d.Get("network_policy")), + AddonsConfig: expandClusterAddonsConfig(d.Get("addons_config")), + EnableKubernetesAlpha: d.Get("enable_kubernetes_alpha").(bool), + IpAllocationPolicy: ipAllocationBlock, +{{- if ne $.TargetVersionName "ga" }} + PodSecurityPolicyConfig: expandPodSecurityPolicyConfig(d.Get("pod_security_policy_config")), +{{- end }} +{{- if ne $.TargetVersionName "ga" }} + SecretManagerConfig: expandSecretManagerConfig(d.Get("secret_manager_config")), +{{- end }} + Autoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), + BinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization")), + Autopilot: &container.Autopilot{ + Enabled: d.Get("enable_autopilot").(bool), + WorkloadPolicyConfig: workloadPolicyConfig, + ForceSendFields: []string{"Enabled"}, + }, + ReleaseChannel: expandReleaseChannel(d.Get("release_channel")), +{{- if ne $.TargetVersionName "ga" }} + ClusterTelemetry: expandClusterTelemetry(d.Get("cluster_telemetry")), +{{- end }} + EnableTpu: d.Get("enable_tpu").(bool), + NetworkConfig: &container.NetworkConfig{ + EnableIntraNodeVisibility: d.Get("enable_intranode_visibility").(bool), + DefaultSnatStatus: expandDefaultSnatStatus(d.Get("default_snat_status")), + DatapathProvider: d.Get("datapath_provider").(string), + EnableCiliumClusterwideNetworkPolicy: d.Get("enable_cilium_clusterwide_network_policy").(bool), + PrivateIpv6GoogleAccess: d.Get("private_ipv6_google_access").(string), + EnableL4ilbSubsetting: d.Get("enable_l4_ilb_subsetting").(bool), + DnsConfig: expandDnsConfig(d.Get("dns_config")), + GatewayApiConfig: expandGatewayApiConfig(d.Get("gateway_api_config")), +{{- if ne $.TargetVersionName "ga" }} + EnableMultiNetworking: d.Get("enable_multi_networking").(bool), + EnableFqdnNetworkPolicy: d.Get("enable_fqdn_network_policy").(bool), +{{- end }} + }, + MasterAuth: expandMasterAuth(d.Get("master_auth")), + NotificationConfig: expandNotificationConfig(d.Get("notification_config")), + ConfidentialNodes: expandConfidentialNodes(d.Get("confidential_nodes")), + ResourceLabels: tpgresource.ExpandStringMap(d, "resource_labels"), + NodePoolAutoConfig: expandNodePoolAutoConfig(d.Get("node_pool_auto_config")), +{{- if ne $.TargetVersionName "ga" }} + ProtectConfig: expandProtectConfig(d.Get("protect_config")), +{{- end }} + CostManagementConfig: expandCostManagementConfig(d.Get("cost_management_config")), + EnableK8sBetaApis: expandEnableK8sBetaApis(d.Get("enable_k8s_beta_apis"), nil), + } + + v:= d.Get("enable_shielded_nodes") + cluster.ShieldedNodes = &container.ShieldedNodes{ + Enabled: v.(bool), + ForceSendFields: []string{"Enabled"}, + } + + if v, ok := d.GetOk("default_max_pods_per_node"); ok { + cluster.DefaultMaxPodsConstraint = expandDefaultMaxPodsConstraint(v) + } + + // Only allow setting node_version on create if it's set to the equivalent master version, + // since `InitialClusterVersion` only accepts valid master-style versions. + if v, ok := d.GetOk("node_version"); ok { + // ignore -gke.X suffix for now. if it becomes a problem later, we can fix it. + mv := strings.Split(cluster.InitialClusterVersion, "-")[0] + nv := strings.Split(v.(string), "-")[0] + if mv != nv { + return fmt.Errorf("node_version and min_master_version must be set to equivalent values on create") + } + } + + if v, ok := d.GetOk("node_locations"); ok { + locationsSet := v.(*schema.Set) + if locationsSet.Contains(location) { + return fmt.Errorf("when using a multi-zonal cluster, node_locations should not contain the original 'zone'") + } + + // GKE requires a full list of node locations + // but when using a multi-zonal cluster our schema only asks for the + // additional zones, so append the cluster location if it's a zone + if tpgresource.IsZone(location) { + locationsSet.Add(location) + } + cluster.Locations = tpgresource.ConvertStringSet(locationsSet) + } + + if v, ok := d.GetOk("network"); ok { + network, err := tpgresource.ParseNetworkFieldValue(v.(string), d, config) + if err != nil { + return err + } + cluster.Network = network.RelativeLink() + } + + if v, ok := d.GetOk("subnetwork"); ok { + subnetwork, err := tpgresource.ParseRegionalFieldValue("subnetworks", v.(string), "project", "location", "location", d, config, true) // variant of ParseSubnetworkFieldValue + if err != nil { + return err + } + cluster.Subnetwork = subnetwork.RelativeLink() + } + + nodePoolsCount := d.Get("node_pool.#").(int) + if nodePoolsCount > 0 { + nodePools := make([]*container.NodePool, 0, nodePoolsCount) + for i := 0; i < nodePoolsCount; i++ { + prefix := fmt.Sprintf("node_pool.%d.", i) + nodePool, err := expandNodePool(d, prefix) + if err != nil { + return err + } + nodePools = append(nodePools, nodePool) + } + cluster.NodePools = nodePools + } else { + // Node Configs have default values that are set in the expand function, + // but can only be set if node pools are unspecified. + cluster.NodeConfig = expandNodeConfig([]interface{}{}) + } + + if v, ok := d.GetOk("node_pool_defaults"); ok { + cluster.NodePoolDefaults = expandNodePoolDefaults(v) + } + + if v, ok := d.GetOk("node_config"); ok { + cluster.NodeConfig = expandNodeConfig(v) + } + + if v, ok := d.GetOk("authenticator_groups_config"); ok { + cluster.AuthenticatorGroupsConfig = expandAuthenticatorGroupsConfig(v) + } + + if v, ok := d.GetOk("private_cluster_config"); ok { + cluster.PrivateClusterConfig = expandPrivateClusterConfig(v) + } + + if v, ok := d.GetOk("vertical_pod_autoscaling"); ok { + cluster.VerticalPodAutoscaling = expandVerticalPodAutoscaling(v) + } + + if v, ok := d.GetOk("service_external_ips_config"); ok { + cluster.NetworkConfig.ServiceExternalIpsConfig = expandServiceExternalIpsConfig(v) + } + + if v, ok := d.GetOk("mesh_certificates"); ok { + cluster.MeshCertificates = expandMeshCertificates(v) + } + + if v, ok := d.GetOk("database_encryption"); ok { + cluster.DatabaseEncryption = expandDatabaseEncryption(v) + } + + if v, ok := d.GetOk("workload_identity_config"); ok { + cluster.WorkloadIdentityConfig = expandWorkloadIdentityConfig(v) + } + + if v, ok := d.GetOk("identity_service_config"); ok { + cluster.IdentityServiceConfig = expandIdentityServiceConfig(v) + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := d.GetOk("tpu_config"); ok { + cluster.TpuConfig = expandContainerClusterTpuConfig(v) + } +{{- end }} + + if v, ok := d.GetOk("resource_usage_export_config"); ok { + cluster.ResourceUsageExportConfig = expandResourceUsageExportConfig(v) + } + + if v, ok := d.GetOk("logging_config"); ok { + cluster.LoggingConfig = expandContainerClusterLoggingConfig(v) + } + + if v, ok := d.GetOk("monitoring_config"); ok { + cluster.MonitoringConfig = expandMonitoringConfig(v) + } + + if v, ok := d.GetOk("fleet"); ok { + cluster.Fleet = expandFleet(v) + } + + if err := validateNodePoolAutoConfig(cluster); err != nil { + return err + } + + if err := validatePrivateClusterConfig(cluster); err != nil { + return err + } + + if v, ok := d.GetOk("security_posture_config"); ok { + cluster.SecurityPostureConfig = expandSecurityPostureConfig(v) + } + + needUpdateAfterCreate := false + + // For now PSC based cluster don't support `enable_private_endpoint` on `create`, but only on `update` API call. + // If cluster is PSC based and enable_private_endpoint is set to true we will ignore it on `create` call and update cluster right after creation. + enablePrivateEndpointPSCCluster := isEnablePrivateEndpointPSCCluster(cluster) + if enablePrivateEndpointPSCCluster { + cluster.PrivateClusterConfig.EnablePrivateEndpoint = false + needUpdateAfterCreate = true + } + + enablePDCSI := isEnablePDCSI(cluster); + if !enablePDCSI { + // GcePersistentDiskCsiDriver cannot be disabled at cluster create, only on cluster update. Ignore on create then update after creation. + // If pdcsi is disabled, the config should be defined. But we will be paranoid and double-check. + needUpdateAfterCreate = true + if cluster.AddonsConfig == nil { + cluster.AddonsConfig = &container.AddonsConfig{} + } + if cluster.AddonsConfig.GcePersistentDiskCsiDriverConfig == nil { + cluster.AddonsConfig.GcePersistentDiskCsiDriverConfig = &container.GcePersistentDiskCsiDriverConfig{} + } + cluster.AddonsConfig.GcePersistentDiskCsiDriverConfig.Enabled = true + } + + {{ if ne $.TargetVersionName `ga` -}} + if v, ok := d.GetOk("workload_alts_config"); ok { + cluster.WorkloadAltsConfig = expandWorkloadAltsConfig(v) + } + {{- end }} + + req := &container.CreateClusterRequest{ + Cluster: cluster, + } + + transport_tpg.MutexStore.Lock(containerClusterMutexKey(project, location, clusterName)) + defer transport_tpg.MutexStore.Unlock(containerClusterMutexKey(project, location, clusterName)) + + parent := fmt.Sprintf("projects/%s/locations/%s", project, location) + var op *container.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Create(parent, req) + if config.UserProjectOverride { + clusterCreateCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterCreateCall.Do() + return err + }, + }) + if err != nil { + return err + } + + d.SetId(containerClusterFullName(project, location, clusterName)) + + // Wait until it's created + waitErr := ContainerOperationWait(config, op, project, location, "creating GKE cluster", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + // Check if the create operation failed because Terraform was prematurely terminated. If it was we can persist the + // operation id to state so that a subsequent refresh of this resource will wait until the operation has terminated + // before attempting to Read the state of the cluster. This allows a graceful resumption of a Create that was killed + // by the upstream Terraform process exiting early such as a sigterm. + select { + case <-config.Context.Done(): + log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", op.Name) + if err := d.Set("operation", op.Name); err != nil { + return fmt.Errorf("Error setting operation: %s", err) + } + + return nil + default: + // leaving default case to ensure this is non blocking + } + + // Try a GET on the cluster so we can see the state in debug logs. This will help classify error states. + clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(containerClusterFullName(project, location, clusterName)) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + + _, getErr := clusterGetCall.Do() + if getErr != nil { + log.Printf("[WARN] Cluster %s was created in an error state and not found", clusterName) + d.SetId("") + } + + // Don't clear cluster id, this will taint the resource + log.Printf("[WARN] GKE cluster %s was created in an error state, and has been marked as tainted", clusterName) + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been created", clusterName) + + if d.Get("remove_default_node_pool").(bool) { + parent := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterNodePoolDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(parent) + if config.UserProjectOverride { + clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterNodePoolDeleteCall.Do() + return err + }, + }) + if err != nil { + return errwrap.Wrapf("Error deleting default node pool: {{"{{"}}err{{"}}"}}", err) + } + err = ContainerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf("Error while waiting to delete default node pool: {{"{{"}}err{{"}}"}}", err) + } + } + + if needUpdateAfterCreate { + name := containerClusterFullName(project, location, clusterName) + update := &container.ClusterUpdate{} + if enablePrivateEndpointPSCCluster { + update.DesiredEnablePrivateEndpoint = true + update.ForceSendFields = append(update.ForceSendFields, "DesiredEnablePrivateEndpoint"); + } + if !enablePDCSI { + update.DesiredAddonsConfig = &container.AddonsConfig{ + GcePersistentDiskCsiDriverConfig: &container.GcePersistentDiskCsiDriverConfig{ + Enabled: false, + }, + } + update.ForceSendFields = append(update.ForceSendFields, "DesiredAddonsConfig.GcePersistentDiskCsiDriverConfig.Enabled"); + } + req := &container.UpdateClusterRequest{Update: update} + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterUpdateCall.Do() + return err + }, + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error updating cluster for %v: {{"{{"}}err{{"}}"}}", update.ForceSendFields), err) + } + + err = ContainerOperationWait(config, op, project, location, "updating enable private endpoint", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error while waiting on cluster update for %v: {{"{{"}}err{{"}}"}}", update.ForceSendFields), err) + } + } + + if names, ok := d.GetOk("ip_allocation_policy.0.additional_pod_ranges_config.0.pod_range_names"); ok { + name := containerClusterFullName(project, location, clusterName) + additionalPodRangesConfig := &container.AdditionalPodRangesConfig{ + PodRangeNames: tpgresource.ConvertStringSet(names.(*schema.Set)), + } + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + AdditionalPodRangesConfig: additionalPodRangesConfig, + }, + } + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterUpdateCall.Do() + return err + }, + }) + if err != nil { + return errwrap.Wrapf("Error updating AdditionalPodRangesConfig: {{"{{"}}err{{"}}"}}", err) + } + + err = ContainerOperationWait(config, op, project, location, "updating AdditionalPodRangesConfig", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return errwrap.Wrapf("Error while waiting to update AdditionalPodRangesConfig: {{"{{"}}err{{"}}"}}", err) + } + } + + if err := resourceContainerClusterRead(d, meta); err != nil { + return err + } + + state, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + if containerClusterRestingStates[state] == ErrorState { + return fmt.Errorf("Cluster %s was created in the error state %q", clusterName, state) + } + + return nil +} + +func resourceContainerClusterRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + + operation := d.Get("operation").(string) + if operation != "" { + log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) + op := &container.Operation{ + Name: operation, + } + if err := d.Set("operation", ""); err != nil { + return fmt.Errorf("Error setting operation: %s", err) + } + waitErr := ContainerOperationWait(config, op, project, location, "resuming GKE cluster", userAgent, d.Timeout(schema.TimeoutRead)) + if waitErr != nil { + // Try a GET on the cluster so we can see the state in debug logs. This will help classify error states. + clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(containerClusterFullName(project, location, clusterName)) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + _, getErr := clusterGetCall.Do() + if getErr != nil { + log.Printf("[WARN] Cluster %s was created in an error state and not found", clusterName) + d.SetId("") + } + + // Don't clear cluster id, this will taint the resource + log.Printf("[WARN] GKE cluster %s was created in an error state, and has been marked as tainted", clusterName) + return waitErr + } + } + + name := containerClusterFullName(project, location, clusterName) + clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + + cluster, err := clusterGetCall.Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Container Cluster %q", d.Get("name").(string))) + } + + if err := d.Set("name", cluster.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("network_policy", flattenNetworkPolicy(cluster.NetworkPolicy)); err != nil { + return err + } + + if err := d.Set("location", cluster.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + + locations := schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(cluster.Locations)) + locations.Remove(cluster.Zone) // Remove the original zone since we only store additional zones + if err := d.Set("node_locations", locations); err != nil { + return fmt.Errorf("Error setting node_locations: %s", err) + } + + if err := d.Set("endpoint", cluster.Endpoint); err != nil { + return fmt.Errorf("Error setting endpoint: %s", err) + } + if err := d.Set("self_link", cluster.SelfLink); err != nil { + return fmt.Errorf("Error setting self link: %s", err) + } + if err := d.Set("maintenance_policy", flattenMaintenancePolicy(cluster.MaintenancePolicy)); err != nil { + return err + } + if err := d.Set("master_auth", flattenMasterAuth(cluster.MasterAuth)); err != nil { + return err + } + if err := d.Set("master_authorized_networks_config", flattenMasterAuthorizedNetworksConfig(cluster.MasterAuthorizedNetworksConfig)); err != nil { + return err + } + if err := d.Set("initial_node_count", cluster.InitialNodeCount); err != nil { + return fmt.Errorf("Error setting initial_node_count: %s", err) + } + if err := d.Set("master_version", cluster.CurrentMasterVersion); err != nil { + return fmt.Errorf("Error setting master_version: %s", err) + } + if err := d.Set("node_version", cluster.CurrentNodeVersion); err != nil { + return fmt.Errorf("Error setting node_version: %s", err) + } + if err := d.Set("cluster_ipv4_cidr", cluster.ClusterIpv4Cidr); err != nil { + return fmt.Errorf("Error setting cluster_ipv4_cidr: %s", err) + } + if err := d.Set("services_ipv4_cidr", cluster.ServicesIpv4Cidr); err != nil { + return fmt.Errorf("Error setting services_ipv4_cidr: %s", err) + } + if err := d.Set("description", cluster.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("enable_kubernetes_alpha", cluster.EnableKubernetesAlpha); err != nil { + return fmt.Errorf("Error setting enable_kubernetes_alpha: %s", err) + } + if err := d.Set("enable_legacy_abac", cluster.LegacyAbac.Enabled); err != nil { + return fmt.Errorf("Error setting enable_legacy_abac: %s", err) + } + if err := d.Set("logging_service", cluster.LoggingService); err != nil { + return fmt.Errorf("Error setting logging_service: %s", err) + } + if err := d.Set("monitoring_service", cluster.MonitoringService); err != nil { + return fmt.Errorf("Error setting monitoring_service: %s", err) + } + if err := d.Set("network", cluster.NetworkConfig.Network); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("subnetwork", cluster.NetworkConfig.Subnetwork); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + if err := d.Set("cluster_autoscaling", flattenClusterAutoscaling(cluster.Autoscaling)); err != nil { + return err + } + if err := d.Set("binary_authorization", flattenBinaryAuthorization(cluster.BinaryAuthorization)); err != nil { + return err + } + if autopilot := cluster.Autopilot; autopilot != nil { + if err := d.Set("enable_autopilot", autopilot.Enabled); err != nil { + return fmt.Errorf("Error setting enable_autopilot: %s", err) + } + if autopilot.WorkloadPolicyConfig != nil { + if err := d.Set("allow_net_admin", autopilot.WorkloadPolicyConfig.AllowNetAdmin); err != nil { + return fmt.Errorf("Error setting allow_net_admin: %s", err) + } + } + } + if cluster.ShieldedNodes != nil { + if err := d.Set("enable_shielded_nodes", cluster.ShieldedNodes.Enabled); err != nil { + return fmt.Errorf("Error setting enable_shielded_nodes: %s", err) + } + } + if err := d.Set("release_channel", flattenReleaseChannel(cluster.ReleaseChannel)); err != nil { + return err + } + if err := d.Set("notification_config", flattenNotificationConfig(cluster.NotificationConfig)); err != nil { + return err + } + if err := d.Set("enable_l4_ilb_subsetting", cluster.NetworkConfig.EnableL4ilbSubsetting); err != nil { + return fmt.Errorf("Error setting enable_l4_ilb_subsetting: %s", err) + } + if err := d.Set("cost_management_config", flattenManagementConfig(cluster.CostManagementConfig)); err != nil { + return fmt.Errorf("Error setting cost_management_config: %s", err) + } + if err := d.Set("confidential_nodes", flattenConfidentialNodes(cluster.ConfidentialNodes)); err != nil { + return err + } + if err := d.Set("enable_tpu", cluster.EnableTpu); err != nil { + return fmt.Errorf("Error setting enable_tpu: %s", err) + } + if err := d.Set("tpu_ipv4_cidr_block", cluster.TpuIpv4CidrBlock); err != nil { + return fmt.Errorf("Error setting tpu_ipv4_cidr_block: %s", err) + } + if err := d.Set("datapath_provider", cluster.NetworkConfig.DatapathProvider); err != nil { + return fmt.Errorf("Error setting datapath_provider: %s", err) + } + if err := d.Set("enable_cilium_clusterwide_network_policy", cluster.NetworkConfig.EnableCiliumClusterwideNetworkPolicy); err != nil { + return fmt.Errorf("Error setting enable_cilium_clusterwide_network_policy: %s", err) + } + if err := d.Set("default_snat_status", flattenDefaultSnatStatus(cluster.NetworkConfig.DefaultSnatStatus)); err != nil { + return err + } + if err := d.Set("enable_intranode_visibility", cluster.NetworkConfig.EnableIntraNodeVisibility); err != nil { + return fmt.Errorf("Error setting enable_intranode_visibility: %s", err) + } +{{- if ne $.TargetVersionName "ga" }} + if err := d.Set("enable_multi_networking", cluster.NetworkConfig.EnableMultiNetworking); err != nil { + return fmt.Errorf("Error setting enable_multi_networking: %s", err) + } + if err := d.Set("enable_fqdn_network_policy", cluster.NetworkConfig.EnableFqdnNetworkPolicy); err != nil { + return fmt.Errorf("Error setting enable_fqdn_network_policy: %s", err) + } +{{- end }} + if err := d.Set("private_ipv6_google_access", cluster.NetworkConfig.PrivateIpv6GoogleAccess); err != nil { + return fmt.Errorf("Error setting private_ipv6_google_access: %s", err) + } + if err := d.Set("authenticator_groups_config", flattenAuthenticatorGroupsConfig(cluster.AuthenticatorGroupsConfig)); err != nil { + return err + } + if cluster.DefaultMaxPodsConstraint != nil { + if err := d.Set("default_max_pods_per_node", cluster.DefaultMaxPodsConstraint.MaxPodsPerNode); err != nil { + return fmt.Errorf("Error setting default_max_pods_per_node: %s", err) + } + } + if err := d.Set("node_config", flattenNodeConfig(cluster.NodeConfig, d.Get("node_config"))); err != nil { + return err + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("addons_config", flattenClusterAddonsConfig(cluster.AddonsConfig)); err != nil { + return err + } + nps, err := flattenClusterNodePools(d, config, cluster.NodePools) + if err != nil { + return err + } + if err := d.Set("node_pool", nps); err != nil { + return err + } + + ipAllocPolicy, err := flattenIPAllocationPolicy(cluster, d, config) + if err != nil { + return err + } + if err := d.Set("ip_allocation_policy", ipAllocPolicy); err != nil { + return err + } + + if err := d.Set("private_cluster_config", flattenPrivateClusterConfig(cluster.PrivateClusterConfig)); err != nil { + return err + } + + if err := d.Set("vertical_pod_autoscaling", flattenVerticalPodAutoscaling(cluster.VerticalPodAutoscaling)); err != nil { + return err + } + + if err := d.Set("workload_identity_config", flattenWorkloadIdentityConfig(cluster.WorkloadIdentityConfig, d, config)); err != nil { + return err + } + + if err := d.Set("identity_service_config", flattenIdentityServiceConfig(cluster.IdentityServiceConfig, d, config)); err != nil { + return err + } + + if err := d.Set("service_external_ips_config", flattenServiceExternalIpsConfig(cluster.NetworkConfig.ServiceExternalIpsConfig)); err != nil { + return err + } + + if err := d.Set("mesh_certificates", flattenMeshCertificates(cluster.MeshCertificates)); err != nil { + return err + } + + if err := d.Set("database_encryption", flattenDatabaseEncryption(cluster.DatabaseEncryption)); err != nil { + return err + } + +{{ if ne $.TargetVersionName `ga` -}} + if err := d.Set("pod_security_policy_config", flattenPodSecurityPolicyConfig(cluster.PodSecurityPolicyConfig)); err != nil { + return err + } + + if err := d.Set("cluster_telemetry", flattenClusterTelemetry(cluster.ClusterTelemetry)); err != nil { + return err + } + + if err := d.Set("secret_manager_config", flattenSecretManagerConfig(cluster.SecretManagerConfig)); err != nil { + return err + } +{{- end }} + + if err := d.Set("resource_labels", cluster.ResourceLabels); err != nil { + return fmt.Errorf("Error setting resource_labels: %s", err) + } + if err := d.Set("label_fingerprint", cluster.LabelFingerprint); err != nil { + return fmt.Errorf("Error setting label_fingerprint: %s", err) + } + + if err := d.Set("resource_usage_export_config", flattenResourceUsageExportConfig(cluster.ResourceUsageExportConfig)); err != nil { + return err + } + if err := d.Set("dns_config", flattenDnsConfig(cluster.NetworkConfig.DnsConfig)); err != nil { + return err + } + if err := d.Set("gateway_api_config", flattenGatewayApiConfig(cluster.NetworkConfig.GatewayApiConfig)); err != nil { + return err + } + if err := d.Set("fleet", flattenFleet(cluster.Fleet)); err != nil { + return err + } + if err := d.Set("enable_k8s_beta_apis", flattenEnableK8sBetaApis(cluster.EnableK8sBetaApis)); err != nil { + return err + } + if err := d.Set("logging_config", flattenContainerClusterLoggingConfig(cluster.LoggingConfig)); err != nil { + return err + } + + if err := d.Set("monitoring_config", flattenMonitoringConfig(cluster.MonitoringConfig)); err != nil { + return err + } + + if err := d.Set("node_pool_auto_config", flattenNodePoolAutoConfig(cluster.NodePoolAutoConfig)); err != nil { + return err + } + + if err := d.Set("node_pool_defaults", flattenNodePoolDefaults(cluster.NodePoolDefaults)); err != nil { + return err + } + + if err := d.Set("security_posture_config", flattenSecurityPostureConfig(cluster.SecurityPostureConfig)); err != nil { + return err + } + +{{ if ne $.TargetVersionName `ga` -}} + if err := d.Set("protect_config", flattenProtectConfig(cluster.ProtectConfig)); err != nil { + return err + } +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} + if err := d.Set("workload_alts_config", flattenWorkloadAltsConfig(cluster.WorkloadAltsConfig)); err != nil { + return err + } +{{- end }} + + return nil +} + +func resourceContainerClusterUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + + if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + + d.Partial(true) + + lockKey := containerClusterMutexKey(project, location, clusterName) + + updateFunc := func(req *container.UpdateClusterRequest, updateDescription string) func() error { + return func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, updateDescription, userAgent, d.Timeout(schema.TimeoutUpdate)) + } + } + + // The ClusterUpdate object that we use for most of these updates only allows updating one field at a time, + // so we have to make separate calls for each field that we want to update. The order here is fairly arbitrary- + // if the order of updating fields does matter, it is called out explicitly. + if d.HasChange("master_authorized_networks_config") { + c := d.Get("master_authorized_networks_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMasterAuthorizedNetworksConfig: expandMasterAuthorizedNetworksConfig(c, d), + }, + } + + updateF := updateFunc(req, "updating GKE cluster master authorized networks") + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s master authorized networks config has been updated", d.Id()) + } + + if d.HasChange("addons_config") { + if ac, ok := d.GetOk("addons_config"); ok { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredAddonsConfig: expandClusterAddonsConfig(ac), + }, + } + + updateF := updateFunc(req, "updating GKE cluster addons") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s addons have been updated", d.Id()) + } + } + + if d.HasChange("cluster_autoscaling") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredClusterAutoscaling: expandClusterAutoscaling(d.Get("cluster_autoscaling"), d), + }} + + updateF := updateFunc(req, "updating GKE cluster autoscaling") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's cluster-wide autoscaling has been updated", d.Id()) + } + + if d.HasChange("dns_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredDnsConfig: expandDnsConfig(d.Get("dns_config")), + }, + } + + updateF := updateFunc(req, "updating GKE cluster DNSConfig") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's DNSConfig has been updated", d.Id()) + } + + if d.HasChange("allow_net_admin") { + allowed := d.Get("allow_net_admin").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredAutopilotWorkloadPolicyConfig: &container.WorkloadPolicyConfig{ + AllowNetAdmin: allowed, + }, + }, + } + + updateF := updateFunc(req, "updating net admin for GKE autopilot workload policy config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's autopilot workload policy config allow_net_admin has been set to %v", d.Id(), allowed) + } + + if d.HasChange("private_cluster_config.0.enable_private_endpoint") { + enabled := d.Get("private_cluster_config.0.enable_private_endpoint").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredEnablePrivateEndpoint: enabled, + ForceSendFields: []string{"DesiredEnablePrivateEndpoint"}, + }, + } + + updateF := updateFunc(req, "updating enable private endpoint") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's enable private endpoint has been updated to %v", d.Id(), enabled) + } + + if d.HasChange("private_cluster_config") && d.HasChange("private_cluster_config.0.master_global_access_config") { + config := d.Get("private_cluster_config.0.master_global_access_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredPrivateClusterConfig: &container.PrivateClusterConfig{ + MasterGlobalAccessConfig: expandPrivateClusterConfigMasterGlobalAccessConfig(config), + ForceSendFields: []string{"MasterGlobalAccessConfig"}, + }, + }, + } + + updateF := updateFunc(req, "updating master global access config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's master global access config has been updated to %v", d.Id(), config) + } + + if d.HasChange("binary_authorization") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredBinaryAuthorization: expandBinaryAuthorization(d.Get("binary_authorization")), + }, + } + + updateF := updateFunc(req, "updating GKE binary authorization") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's binary authorization has been updated to %v", d.Id(), req.Update.DesiredBinaryAuthorization) + } + + if d.HasChange("enable_shielded_nodes") { + enabled := d.Get("enable_shielded_nodes").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredShieldedNodes: &container.ShieldedNodes{ + Enabled: enabled, + ForceSendFields: []string{"Enabled"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE shielded nodes") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's shielded nodes has been updated to %v", d.Id(), enabled) + } + + if d.HasChange("release_channel") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredReleaseChannel: expandReleaseChannel(d.Get("release_channel")), + }, + } + updateF := func() error { + log.Println("[DEBUG] updating release_channel") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating Release Channel", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating release_channel") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Release Channel has been updated to %#v", d.Id(), req.Update.DesiredReleaseChannel) + } + + if d.HasChange("enable_intranode_visibility") { + enabled := d.Get("enable_intranode_visibility").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredIntraNodeVisibilityConfig: &container.IntraNodeVisibilityConfig{ + Enabled: enabled, + ForceSendFields: []string{"Enabled"}, + }, + }, + } + updateF := func() error { + log.Println("[DEBUG] updating enable_intranode_visibility") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating GKE Intra Node Visibility", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating enable_intranode_visibility") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Intra Node Visibility has been updated to %v", d.Id(), enabled) + } + + if d.HasChange("private_ipv6_google_access") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredPrivateIpv6GoogleAccess: d.Get("private_ipv6_google_access").(string), + }, + } + updateF := func() error { + log.Println("[DEBUG] updating private_ipv6_google_access") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating GKE Private IPv6 Google Access", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating private_ipv6_google_access") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Private IPv6 Google Access has been updated", d.Id()) + } + + if d.HasChange("enable_l4_ilb_subsetting") { + // This field can be changed from false to true but not from false to true. CustomizeDiff handles that check. + enabled := d.Get("enable_l4_ilb_subsetting").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredL4ilbSubsettingConfig: &container.ILBSubsettingConfig{ + Enabled: enabled, + ForceSendFields: []string{"Enabled"}, + }, + }, + } + updateF := func() error { + log.Println("[DEBUG] updating enable_l4_ilb_subsetting") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating L4", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating enable_l4_ilb_subsetting") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s L4 ILB Subsetting has been updated to %v", d.Id(), enabled) + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("enable_fqdn_network_policy") { + enabled := d.Get("enable_fqdn_network_policy").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredEnableFqdnNetworkPolicy: enabled, + }, + } + updateF := updateFunc(req, "updating fqdn network policy") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s FQDN Network Policy has been updated to %v", d.Id(), enabled) + } +{{- end }} + + if d.HasChange("enable_cilium_clusterwide_network_policy") { + enabled := d.Get("enable_cilium_clusterwide_network_policy").(bool) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredEnableCiliumClusterwideNetworkPolicy: enabled, + }, + } + updateF := updateFunc(req, "updating cilium clusterwide network policy") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Cilium Clusterwide Network Policy has been updated to %v", d.Id(), enabled) + } + + if d.HasChange("cost_management_config") { + c := d.Get("cost_management_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredCostManagementConfig: expandCostManagementConfig(c), + }, + } + + updateF := updateFunc(req, "updating cost management config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s cost management config has been updated", d.Id()) + } + + if d.HasChange("authenticator_groups_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredAuthenticatorGroupsConfig: expandContainerClusterAuthenticatorGroupsConfig(d.Get("authenticator_groups_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster authenticator groups config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s authenticator groups config has been updated", d.Id()) + } + + if d.HasChange("default_snat_status") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredDefaultSnatStatus: expandDefaultSnatStatus(d.Get("default_snat_status")), + }, + } + updateF := func() error { + log.Println("[DEBUG] updating default_snat_status") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating GKE Default SNAT status", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating default_snat_status") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Default SNAT status has been updated", d.Id()) + } + + if d.HasChange("maintenance_policy") { + req := &container.SetMaintenancePolicyRequest{ + MaintenancePolicy: expandMaintenancePolicy(d, meta), + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterSetMaintenancePolicyCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetMaintenancePolicy(name, req) + if config.UserProjectOverride { + clusterSetMaintenancePolicyCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetMaintenancePolicyCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE cluster maintenance policy", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s maintenance policy has been updated", d.Id()) + } + + if d.HasChange("node_locations") { + azSetOldI, azSetNewI := d.GetChange("node_locations") + azSetNew := azSetNewI.(*schema.Set) + azSetOld := azSetOldI.(*schema.Set) + if azSetNew.Contains(location) { + return fmt.Errorf("for multi-zonal clusters, node_locations should not contain the primary 'zone'") + } + // Since we can't add & remove zones in the same request, first add all the + // zones, then remove the ones we aren't using anymore. + azSet := azSetOld.Union(azSetNew) + + if tpgresource.IsZone(location) { + azSet.Add(location) + } + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredLocations: tpgresource.ConvertStringSet(azSet), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node locations") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + if tpgresource.IsZone(location) { + azSetNew.Add(location) + } + if !azSet.Equal(azSetNew) { + req = &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredLocations: tpgresource.ConvertStringSet(azSetNew), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node locations") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + } + + log.Printf("[INFO] GKE cluster %s node locations have been updated to %v", d.Id(), azSet.List()) + } + + if d.HasChange("enable_legacy_abac") { + enabled := d.Get("enable_legacy_abac").(bool) + req := &container.SetLegacyAbacRequest{ + Enabled: enabled, + ForceSendFields: []string{"Enabled"}, + } + + updateF := func() error { + log.Println("[DEBUG] updating enable_legacy_abac") + name := containerClusterFullName(project, location, clusterName) + clusterSetLegacyAbacCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetLegacyAbac(name, req) + if config.UserProjectOverride { + clusterSetLegacyAbacCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetLegacyAbacCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating GKE legacy ABAC", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating enable_legacy_abac") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s legacy ABAC has been updated to %v", d.Id(), enabled) + } + + if d.HasChange("monitoring_service") || d.HasChange("logging_service") { + logging := d.Get("logging_service").(string) + monitoring := d.Get("monitoring_service").(string) + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMonitoringService: monitoring, + DesiredLoggingService: logging, + }, + } + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE logging+monitoring service", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: logging service has been updated to %s, monitoring service has been updated to %s", d.Id(), logging, monitoring) + } + + if d.HasChange("network_policy") { + np := d.Get("network_policy") + req := &container.SetNetworkPolicyRequest{ + NetworkPolicy: expandNetworkPolicy(np), + } + + updateF := func() error { + log.Println("[DEBUG] updating network_policy") + name := containerClusterFullName(project, location, clusterName) + clusterSetNetworkPolicyCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetNetworkPolicy(name, req) + if config.UserProjectOverride { + clusterSetNetworkPolicyCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetNetworkPolicyCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating GKE cluster network policy", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating network_policy") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Network policy for GKE cluster %s has been updated", d.Id()) + + } + + if d.HasChange("ip_allocation_policy.0.additional_pod_ranges_config") { + o, n := d.GetChange("ip_allocation_policy.0.additional_pod_ranges_config.0.pod_range_names") + old_names := o.(*schema.Set) + new_names := n.(*schema.Set) + + // Filter unchanged names. + removed_names := old_names.Difference(new_names) + added_names := new_names.Difference(old_names) + + var additional_config *container.AdditionalPodRangesConfig + var removed_config *container.AdditionalPodRangesConfig + if added_names.Len() > 0 { + var names []string + for _, name := range added_names.List() { + names = append(names, name.(string)) + } + additional_config = &container.AdditionalPodRangesConfig{ + PodRangeNames: names, + } + } + if removed_names.Len() > 0 { + var names []string + for _, name := range removed_names.List() { + names = append(names, name.(string)) + } + removed_config = &container.AdditionalPodRangesConfig{ + PodRangeNames: names, + } + } + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + AdditionalPodRangesConfig: additional_config, + RemovedAdditionalPodRangesConfig: removed_config, + }, + } + + updateF := updateFunc(req, "updating AdditionalPodRangesConfig") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's AdditionalPodRangesConfig has been updated", d.Id()) + } + + if n, ok := d.GetOk("node_pool.#"); ok { + for i := 0; i < n.(int); i++ { + nodePoolInfo, err := extractNodePoolInformationFromCluster(d, config, clusterName) + if err != nil { + return err + } + + if err := nodePoolUpdate(d, meta, nodePoolInfo, fmt.Sprintf("node_pool.%d.", i), d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + } + } + + // The master must be updated before the nodes + // If set to "", skip this step- any master version satisfies that minimum. + if ver := d.Get("min_master_version").(string); d.HasChange("min_master_version") && ver != "" { + des, err := version.NewVersion(ver) + if err != nil { + return err + } + + currentMasterVersion := d.Get("master_version").(string) + cur, err := version.NewVersion(currentMasterVersion) + if err != nil { + return err + } + + // Only upgrade the master if the current version is lower than the desired version + if cur.LessThan(des) { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMasterVersion: ver, + }, + } + + updateF := updateFunc(req, "updating GKE master version") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s: master has been updated to %s", d.Id(), ver) + } + } + + // It's not super important that this come after updating the node pools, but it still seems like a better + // idea than doing it before. + if d.HasChange("node_version") { + foundDefault := false + if n, ok := d.GetOk("node_pool.#"); ok { + for i := 0; i < n.(int); i++ { + key := fmt.Sprintf("node_pool.%d.", i) + if d.Get(key+"name").(string) == "default-pool" { + desiredNodeVersion := d.Get("node_version").(string) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodeVersion: desiredNodeVersion, + DesiredNodePoolId: "default-pool", + }, + } + updateF := updateFunc(req, "updating GKE default node pool node version") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s: default node pool has been updated to %s", d.Id(), + desiredNodeVersion) + foundDefault = true + } + } + } + + if !foundDefault { + return fmt.Errorf("node_version was updated but default-pool was not found. To update the version for a non-default pool, use the version attribute on that pool.") + } + } + + if d.HasChange("node_config") { + if d.HasChange("node_config.0.image_type") { + it := d.Get("node_config.0.image_type").(string) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredImageType: it, + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE image type", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s: image type has been updated to %s", d.Id(), it) + } + } + + if d.HasChange("notification_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNotificationConfig: expandNotificationConfig(d.Get("notification_config")), + }, + } + updateF := func() error { + log.Println("[DEBUG] updating notification_config") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating Notification Config", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating notification_config") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Notification Config has been updated to %#v", d.Id(), req.Update.DesiredNotificationConfig) + } + + if d.HasChange("vertical_pod_autoscaling") { + if ac, ok := d.GetOk("vertical_pod_autoscaling"); ok { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredVerticalPodAutoscaling: expandVerticalPodAutoscaling(ac), + }, + } + + updateF := updateFunc(req, "updating GKE cluster vertical pod autoscaling") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s vertical pod autoscaling has been updated", d.Id()) + } + } + + if d.HasChange("service_external_ips_config") { + c := d.Get("service_external_ips_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredServiceExternalIpsConfig: expandServiceExternalIpsConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE cluster service externalips config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s service externalips config has been updated", d.Id()) + } + + if d.HasChange("mesh_certificates") { + c := d.Get("mesh_certificates") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMeshCertificates: expandMeshCertificates(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE cluster mesh certificates config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s mesh certificates config has been updated", d.Id()) + } + + if d.HasChange("database_encryption") { + c := d.Get("database_encryption") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredDatabaseEncryption: expandDatabaseEncryption(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE cluster database encryption config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s database encryption config has been updated", d.Id()) + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("pod_security_policy_config") { + c := d.Get("pod_security_policy_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredPodSecurityPolicyConfig: expandPodSecurityPolicyConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE cluster pod security policy config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s pod security policy config has been updated", d.Id()) + } +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("secret_manager_config") { + c := d.Get("secret_manager_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredSecretManagerConfig: expandSecretManagerConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating secret manager csi driver config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s secret manager csi add-on has been updated", d.Id()) + } +{{- end }} + + if d.HasChange("workload_identity_config") { + // Because GKE uses a non-RESTful update function, when removing the + // feature you need to specify a fairly full request body or it fails: + // "update": {"desiredWorkloadIdentityConfig": {"identityNamespace": ""}} + req := &container.UpdateClusterRequest{} + if v, ok := d.GetOk("workload_identity_config"); !ok { + req.Update = &container.ClusterUpdate{ + DesiredWorkloadIdentityConfig: &container.WorkloadIdentityConfig{ + WorkloadPool: "", + ForceSendFields: []string{"WorkloadPool"}, + }, + } + } else { + req.Update = &container.ClusterUpdate{ + DesiredWorkloadIdentityConfig: expandWorkloadIdentityConfig(v), + } + } + + updateF := updateFunc(req, "updating GKE cluster workload identity config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s workload identity config has been updated", d.Id()) + } + + if d.HasChange("identity_service_config") { + req := &container.UpdateClusterRequest{} + if v, ok := d.GetOk("identity_service_config"); !ok { + req.Update = &container.ClusterUpdate{ + DesiredIdentityServiceConfig: &container.IdentityServiceConfig{ + Enabled: false, + }, + } + } else { + req.Update = &container.ClusterUpdate{ + DesiredIdentityServiceConfig: expandIdentityServiceConfig(v), + } + } + + updateF := updateFunc(req, "updating GKE cluster identity service config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s identity service config has been updated", d.Id()) + } + + if d.HasChange("logging_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredLoggingConfig: expandContainerClusterLoggingConfig(d.Get("logging_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster logging config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s logging config has been updated", d.Id()) + } + + if d.HasChange("monitoring_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredMonitoringConfig: expandMonitoringConfig(d.Get("monitoring_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster monitoring config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s monitoring config has been updated", d.Id()) + } + + if d.HasChange("resource_labels") { + resourceLabels := d.Get("resource_labels").(map[string]interface{}) + labelFingerprint := d.Get("label_fingerprint").(string) + req := &container.SetLabelsRequest{ + ResourceLabels: tpgresource.ConvertStringMap(resourceLabels), + LabelFingerprint: labelFingerprint, + } + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterSetResourceLabelsCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.SetResourceLabels(name, req) + if config.UserProjectOverride { + clusterSetResourceLabelsCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterSetResourceLabelsCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE resource labels", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + } + + if d.HasChange("remove_default_node_pool") && d.Get("remove_default_node_pool").(bool) { + name := fmt.Sprintf("%s/nodePools/%s", containerClusterFullName(project, location, clusterName), "default-pool") + clusterNodePoolDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(name) + if config.UserProjectOverride { + clusterNodePoolDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterNodePoolDeleteCall.Do() + if err != nil { + if !transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + return errwrap.Wrapf("Error deleting default node pool: {{"{{"}}err{{"}}"}}", err) + } + log.Printf("[WARN] Container cluster %q default node pool already removed, no change", d.Id()) + } else { + err = ContainerOperationWait(config, op, project, location, "removing default node pool", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return errwrap.Wrapf("Error deleting default node pool: {{"{{"}}err{{"}}"}}", err) + } + } + } + + if d.HasChange("resource_usage_export_config") { + c := d.Get("resource_usage_export_config") + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredResourceUsageExportConfig: expandResourceUsageExportConfig(c), + }, + } + + updateF := func() error { + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + // Wait until it's updated + return ContainerOperationWait(config, op, project, location, "updating GKE cluster resource usage export config", userAgent, d.Timeout(schema.TimeoutUpdate)) + } + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s resource usage export config has been updated", d.Id()) + } + + if d.HasChange("gateway_api_config") { + if gac, ok := d.GetOk("gateway_api_config"); ok { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredGatewayApiConfig: expandGatewayApiConfig(gac), + }, + } + + updateF := updateFunc(req, "updating GKE Gateway API") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Gateway API has been updated", d.Id()) + } + } + + if d.HasChange("fleet") { + // Because GKE uses a non-RESTful update function, when removing the + // feature you need to specify a fairly full request body or it fails: + // "update": {"desiredFleet": {"project": ""}} + req := &container.UpdateClusterRequest{} + if v, ok := d.GetOk("fleet"); !ok { + req.Update = &container.ClusterUpdate{ + DesiredFleet: &container.Fleet{ + Project: "", + }, + } + } else { + req.Update = &container.ClusterUpdate{ + DesiredFleet: expandFleet(v), + } + } + updateF := updateFunc(req, "updating GKE cluster fleet config") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s fleet config has been updated", d.Id()) + } + + if d.HasChange("enable_k8s_beta_apis") { + log.Print("[INFO] Enable Kubernetes Beta APIs") + if v, ok := d.GetOk("enable_k8s_beta_apis"); ok { + name := containerClusterFullName(project, location, clusterName) + clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + // Fetch the cluster information to get the already enabled Beta APIs. + cluster, err := clusterGetCall.Do() + if err != nil { + return err + } + + // To avoid an already enabled Beta APIs error, we need to deduplicate the requested APIs + // with those that are already enabled. + var enabledAPIs []string + if cluster.EnableK8sBetaApis != nil && len(cluster.EnableK8sBetaApis.EnabledApis) > 0 { + enabledAPIs = cluster.EnableK8sBetaApis.EnabledApis + } + enableK8sBetaAPIs := expandEnableK8sBetaApis(v, enabledAPIs) + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredK8sBetaApis: enableK8sBetaAPIs, + }, + } + + updateF := updateFunc(req, "updating enabled Kubernetes Beta APIs") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s enabled Kubernetes Beta APIs has been updated", d.Id()) + } + } + + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.logging_variant") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.logging_variant"); ok { + loggingVariant := v.(string) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolLoggingConfig: &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: loggingVariant, + }, + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster desired node pool logging configuration defaults.") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool logging configuration defaults have been updated", d.Id()) + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.gcfs_config") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.gcfs_config"); ok { + gcfsConfig := v.([]interface{})[0].(map[string]interface{}) + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredGcfsConfig: &container.GcfsConfig{ + Enabled: gcfsConfig["enabled"].(bool), + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster desired gcfs config.") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s default gcfs config has been updated", d.Id()) + } + } +{{- end }} + + if d.HasChange("security_posture_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredSecurityPostureConfig: expandSecurityPostureConfig(d.Get("security_posture_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster master Security Posture Config") + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Security Posture Config has been updated to %#v", d.Id(), req.Update.DesiredSecurityPostureConfig) + } + + if d.HasChange("node_pool_defaults") && d.HasChange("node_pool_defaults.0.node_config_defaults.0.containerd_config") { + if v, ok := d.GetOk("node_pool_defaults.0.node_config_defaults.0.containerd_config"); ok { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredContainerdConfig: expandContainerdConfig(v), + }, + } + updateF := updateFunc(req, "updating GKE cluster containerd config") + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE cluster %s containerd config has been updated to %#v", d.Id(), req.Update.DesiredContainerdConfig) + } + } + + if d.HasChange("node_pool_auto_config.0.network_tags.0.tags") { + tags := d.Get("node_pool_auto_config.0.network_tags.0.tags").([]interface{}) + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigNetworkTags: &container.NetworkTags{ + Tags: tpgresource.ConvertStringArr(tags), + ForceSendFields: []string{"Tags"}, + }, + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config network tags") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config network tags have been updated", d.Id()) + } + + if d.HasChange("node_pool_auto_config.0.resource_manager_tags") { + rmtags := d.Get("node_pool_auto_config.0.resource_manager_tags") + + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolAutoConfigResourceManagerTags: expandResourceManagerTags(rmtags), + }, + } + + updateF := updateFunc(req, "updating GKE cluster node pool auto config resource manager tags") + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s node pool auto config resource manager tags have been updated", d.Id()) + } + + d.Partial(false) + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("cluster_telemetry") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredClusterTelemetry: expandClusterTelemetry(d.Get("cluster_telemetry")), + }, + } + updateF := func() error { + log.Println("[DEBUG] updating cluster_telemetry") + name := containerClusterFullName(project, location, clusterName) + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(name, req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + err = ContainerOperationWait(config, op, project, location, "updating Cluster Telemetry", userAgent, d.Timeout(schema.TimeoutUpdate)) + log.Println("[DEBUG] done updating cluster_telemetry") + return err + } + + // Call update serially. + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Cluster Telemetry has been updated to %#v", d.Id(), req.Update.DesiredClusterTelemetry) + } +{{- end }} + + if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + +{{ if ne $.TargetVersionName `ga` -}} + if d.HasChange("protect_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredProtectConfig: expandProtectConfig(d.Get("protect_config")), + }, + } + updateF := updateFunc(req, "updating GKE cluster master protect_config") + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s Protect Config has been updated to %#v", d.Id(), req.Update.DesiredProtectConfig) + } +{{- end }} +{{- if ne $.TargetVersionName "ga" }} + if d.HasChange("workload_alts_config") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredWorkloadAltsConfig: expandWorkloadAltsConfig(d.Get("workload_alts_config")), + }, + } + + updateF := updateFunc(req, "updating GKE cluster WorkloadALTSConfig") + if err := transport_tpg.LockedCall(lockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] GKE cluster %s's WorkloadALTSConfig has been updated", d.Id()) + } +{{- end }} + return resourceContainerClusterRead(d, meta) +} + +func resourceContainerClusterDelete(d *schema.ResourceData, meta interface{}) error { + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("Cannot destroy cluster because deletion_protection is set to true. Set it to false to proceed with cluster deletion.") + } + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return err + } + + clusterName := d.Get("name").(string) + + if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutDelete)); err != nil { + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("[INFO] GKE cluster %s doesn't exist to delete", d.Id()) + return nil + } + return err + } + + log.Printf("[DEBUG] Deleting GKE cluster %s", d.Get("name").(string)) + transport_tpg.MutexStore.Lock(containerClusterMutexKey(project, location, clusterName)) + defer transport_tpg.MutexStore.Unlock(containerClusterMutexKey(project, location, clusterName)) + + var op *container.Operation + var count = 0 + err = retry.Retry(30*time.Second, func() *retry.RetryError { + count++ + + name := containerClusterFullName(project, location, clusterName) + clusterDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Delete(name) + if config.UserProjectOverride { + clusterDeleteCall.Header().Add("X-Goog-User-Project", project) + } + op, err = clusterDeleteCall.Do() + + if err != nil { + log.Printf("[WARNING] Cluster is still not ready to delete, retrying %s", clusterName) + return retry.RetryableError(err) + } + + if count == 15 { + return retry.NonRetryableError(fmt.Errorf("Error retrying to delete cluster %s", clusterName)) + } + return nil + }) + + if err != nil { + return fmt.Errorf("Error deleting Cluster: %s", err) + } + + // Wait until it's deleted + waitErr := ContainerOperationWait(config, op, project, location, "deleting GKE cluster", userAgent, d.Timeout(schema.TimeoutDelete)) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE cluster %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +var containerClusterRestingStates = RestingStates{ + "RUNNING": ReadyState, + "DEGRADED": ErrorState, + "ERROR": ErrorState, +} + +// returns a state with no error if the state is a resting state, and the last state with an error otherwise +func containerClusterAwaitRestingState(config *transport_tpg.Config, project, location, clusterName, userAgent string, timeout time.Duration) (state string, err error) { + err = retry.Retry(timeout, func() *retry.RetryError { + name := containerClusterFullName(project, location, clusterName) + clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + cluster, gErr := clusterGetCall.Do() + if gErr != nil { + return retry.NonRetryableError(gErr) + } + + state = cluster.Status + + switch stateType := containerClusterRestingStates[cluster.Status]; stateType { + case ReadyState: + log.Printf("[DEBUG] Cluster %q has status %q with message %q.", clusterName, state, cluster.StatusMessage) + return nil + case ErrorState: + log.Printf("[DEBUG] Cluster %q has error state %q with message %q.", clusterName, state, cluster.StatusMessage) + return nil + default: + return retry.RetryableError(fmt.Errorf("Cluster %q has state %q with message %q", clusterName, state, cluster.StatusMessage)) + } + }) + + return state, err +} + +func expandClusterAddonsConfig(configured interface{}) *container.AddonsConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + ac := &container.AddonsConfig{} + + if v, ok := config["http_load_balancing"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.HttpLoadBalancing = &container.HttpLoadBalancing{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["horizontal_pod_autoscaling"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.HorizontalPodAutoscaling = &container.HorizontalPodAutoscaling{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["network_policy_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.NetworkPolicyConfig = &container.NetworkPolicyConfig{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["gcp_filestore_csi_driver_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.GcpFilestoreCsiDriverConfig = &container.GcpFilestoreCsiDriverConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + + if v, ok := config["cloudrun_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.CloudRunConfig = &container.CloudRunConfig{ + Disabled: addon["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + if addon["load_balancer_type"] != "" { + ac.CloudRunConfig.LoadBalancerType = addon["load_balancer_type"].(string) + } + } + + if v, ok := config["dns_cache_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.DnsCacheConfig = &container.DnsCacheConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + + if v, ok := config["gce_persistent_disk_csi_driver_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.GcePersistentDiskCsiDriverConfig = &container.GcePersistentDiskCsiDriverConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + if v, ok := config["gke_backup_agent_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.GkeBackupAgentConfig = &container.GkeBackupAgentConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + if v, ok := config["config_connector_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.ConfigConnectorConfig = &container.ConfigConnectorConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + if v, ok := config["gcs_fuse_csi_driver_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.GcsFuseCsiDriverConfig = &container.GcsFuseCsiDriverConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + + if v, ok := config["stateful_ha_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.StatefulHaConfig = &container.StatefulHAConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := config["istio_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.IstioConfig = &container.IstioConfig{ + Disabled: addon["disabled"].(bool), + Auth: addon["auth"].(string), + ForceSendFields: []string{"Disabled"}, + } + } + + if v, ok := config["kalm_config"]; ok && len(v.([]interface{})) > 0 { + addon := v.([]interface{})[0].(map[string]interface{}) + ac.KalmConfig = &container.KalmConfig{ + Enabled: addon["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } + } +{{- end }} + + return ac +} + +func expandPodCidrOverprovisionConfig(configured interface{}) *container.PodCIDROverprovisionConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.PodCIDROverprovisionConfig{ + Disable: config["disabled"].(bool), + ForceSendFields: []string{"Disable"}, + } +} + +func expandIPAllocationPolicy(configured interface{}, networkingMode string, autopilot bool) (*container.IPAllocationPolicy, error) { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + if networkingMode == "VPC_NATIVE" { + return nil, nil + } + return &container.IPAllocationPolicy{ + UseIpAliases: false, + UseRoutes: true, + StackType: "IPV4", + ForceSendFields: []string{"UseIpAliases"}, + }, nil + } + + config := l[0].(map[string]interface{}) + stackType := config["stack_type"].(string) + + return &container.IPAllocationPolicy{ + UseIpAliases: networkingMode == "VPC_NATIVE" || networkingMode == "", + ClusterIpv4CidrBlock: config["cluster_ipv4_cidr_block"].(string), + ServicesIpv4CidrBlock: config["services_ipv4_cidr_block"].(string), + ClusterSecondaryRangeName: config["cluster_secondary_range_name"].(string), + ServicesSecondaryRangeName: config["services_secondary_range_name"].(string), + ForceSendFields: []string{"UseIpAliases"}, + UseRoutes: networkingMode == "ROUTES", + StackType: stackType, + PodCidrOverprovisionConfig: expandPodCidrOverprovisionConfig(config["pod_cidr_overprovision_config"]), + }, nil +} + +func expandMaintenancePolicy(d *schema.ResourceData, meta interface{}) *container.MaintenancePolicy { + config := meta.(*transport_tpg.Config) + // We have to perform a full Get() as part of this, to get the fingerprint. We can't do this + // at any other time, because the fingerprint update might happen between plan and apply. + // We can omit error checks, since to have gotten this far, a project is definitely configured. + project, _ := tpgresource.GetProject(d, config) + location, _ := tpgresource.GetLocation(d, config) + clusterName := d.Get("name").(string) + name := containerClusterFullName(project, location, clusterName) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil + } + clusterGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Get(name) + if config.UserProjectOverride { + clusterGetCall.Header().Add("X-Goog-User-Project", project) + } + cluster, _ := clusterGetCall.Do() + resourceVersion := "" + exclusions := make(map[string]container.TimeWindow) + if cluster != nil && cluster.MaintenancePolicy != nil { + // If the cluster doesn't exist or if there is a read error of any kind, we will pass in an empty + // resourceVersion. If there happens to be a change to maintenance policy, we will fail at that + // point. This is a compromise between code cleanliness and a slightly worse user experience in + // an unlikely error case - we choose code cleanliness. + resourceVersion = cluster.MaintenancePolicy.ResourceVersion + + // Having a MaintenancePolicy doesn't mean that you need MaintenanceExclusions, but if they were set, + // they need to be assigned to exclusions. + if cluster.MaintenancePolicy.Window != nil && cluster.MaintenancePolicy.Window.MaintenanceExclusions != nil { + exclusions = cluster.MaintenancePolicy.Window.MaintenanceExclusions + } + } + + configured := d.Get("maintenance_policy") + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return &container.MaintenancePolicy{ + ResourceVersion: resourceVersion, + Window: &container.MaintenanceWindow{ + MaintenanceExclusions: exclusions, + }, + } + } + maintenancePolicy := l[0].(map[string]interface{}) + + if maintenanceExclusions, ok := maintenancePolicy["maintenance_exclusion"]; ok { + for k := range exclusions { + delete(exclusions, k) + } + for _, me := range maintenanceExclusions.(*schema.Set).List() { + exclusion := me.(map[string]interface{}) + exclusions[exclusion["exclusion_name"].(string)] = container.TimeWindow{ + StartTime: exclusion["start_time"].(string), + EndTime: exclusion["end_time"].(string), + } + if exclusionOptions, ok := exclusion["exclusion_options"]; ok && len(exclusionOptions.([]interface{})) > 0 { + meo := exclusionOptions.([]interface{})[0].(map[string]interface{}) + mex := exclusions[exclusion["exclusion_name"].(string)] + mex.MaintenanceExclusionOptions = &container.MaintenanceExclusionOptions{ + Scope: meo["scope"].(string), + ForceSendFields: []string{"Scope"}, + } + exclusions[exclusion["exclusion_name"].(string)] = mex + } + } + } + + if dailyMaintenanceWindow, ok := maintenancePolicy["daily_maintenance_window"]; ok && len(dailyMaintenanceWindow.([]interface{})) > 0 { + dmw := dailyMaintenanceWindow.([]interface{})[0].(map[string]interface{}) + startTime := dmw["start_time"].(string) + return &container.MaintenancePolicy{ + Window: &container.MaintenanceWindow{ + MaintenanceExclusions: exclusions, + DailyMaintenanceWindow: &container.DailyMaintenanceWindow{ + StartTime: startTime, + }, + }, + ResourceVersion: resourceVersion, + } + } + if recurringWindow, ok := maintenancePolicy["recurring_window"]; ok && len(recurringWindow.([]interface{})) > 0 { + rw := recurringWindow.([]interface{})[0].(map[string]interface{}) + return &container.MaintenancePolicy{ + Window: &container.MaintenanceWindow{ + MaintenanceExclusions: exclusions, + RecurringWindow: &container.RecurringTimeWindow{ + Window: &container.TimeWindow{ + StartTime: rw["start_time"].(string), + EndTime: rw["end_time"].(string), + }, + Recurrence: rw["recurrence"].(string), + }, + }, + ResourceVersion: resourceVersion, + } + } + return nil +} + +func expandClusterAutoscaling(configured interface{}, d *schema.ResourceData) *container.ClusterAutoscaling { + l, ok := configured.([]interface{}) + enableAutopilot := false + if v, ok := d.GetOk("enable_autopilot"); ok && v == true { + enableAutopilot = true + } + if !ok || l == nil || len(l) == 0 || l[0] == nil { + if enableAutopilot { + return nil + } + return &container.ClusterAutoscaling{ + EnableNodeAutoprovisioning: false, + ForceSendFields: []string{"EnableNodeAutoprovisioning"}, + } + } + + config := l[0].(map[string]interface{}) + + // Conditionally provide an empty list to preserve a legacy 2.X behaviour + // when `enabled` is false and resource_limits is unset, allowing users to + // explicitly disable the feature. resource_limits don't work when node + // auto-provisioning is disabled at time of writing. This may change API-side + // in the future though, as the feature is intended to apply to both node + // auto-provisioning and node autoscaling. + var resourceLimits []*container.ResourceLimit + if limits, ok := config["resource_limits"]; ok { + resourceLimits = make([]*container.ResourceLimit, 0) + if lmts, ok := limits.([]interface{}); ok { + for _, v := range lmts { + limit := v.(map[string]interface{}) + resourceLimits = append(resourceLimits, + &container.ResourceLimit{ + ResourceType: limit["resource_type"].(string), + // Here we're relying on *not* setting ForceSendFields for 0-values. + Minimum: int64(limit["minimum"].(int)), + Maximum: int64(limit["maximum"].(int)), + }) + } + } + } + return &container.ClusterAutoscaling{ + EnableNodeAutoprovisioning: config["enabled"].(bool), + ResourceLimits: resourceLimits, + AutoscalingProfile: config["autoscaling_profile"].(string), + AutoprovisioningNodePoolDefaults: expandAutoProvisioningDefaults(config["auto_provisioning_defaults"], d), + } +} + +func expandAutoProvisioningDefaults(configured interface{}, d *schema.ResourceData) *container.AutoprovisioningNodePoolDefaults { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return &container.AutoprovisioningNodePoolDefaults{} + } + config := l[0].(map[string]interface{}) + + npd := &container.AutoprovisioningNodePoolDefaults{ + OauthScopes: tpgresource.ConvertStringArr(config["oauth_scopes"].([]interface{})), + ServiceAccount: config["service_account"].(string), + DiskSizeGb: int64(config["disk_size"].(int)), + DiskType: config["disk_type"].(string), + ImageType: config["image_type"].(string), + BootDiskKmsKey: config["boot_disk_kms_key"].(string), + Management: expandManagement(config["management"]), + UpgradeSettings: expandUpgradeSettings(config["upgrade_settings"]), + } + + if v, ok := config["shielded_instance_config"]; ok && len(v.([]interface{})) > 0 { + conf := v.([]interface{})[0].(map[string]interface{}) + npd.ShieldedInstanceConfig = &container.ShieldedInstanceConfig{ + EnableSecureBoot: conf["enable_secure_boot"].(bool), + EnableIntegrityMonitoring: conf["enable_integrity_monitoring"].(bool), + } + } + + cpu := config["min_cpu_platform"].(string) + // the only way to unset the field is to pass "automatic" as its value + if cpu == "" { + cpu = "automatic" + } + npd.MinCpuPlatform = cpu + + return npd +} + +func expandUpgradeSettings(configured interface{}) *container.UpgradeSettings { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return &container.UpgradeSettings{} + } + config := l[0].(map[string]interface{}) + + upgradeSettings := &container.UpgradeSettings{ + MaxSurge: int64(config["max_surge"].(int)), + MaxUnavailable: int64(config["max_unavailable"].(int)), + Strategy: config["strategy"].(string), + BlueGreenSettings: expandBlueGreenSettings(config["blue_green_settings"]), + } + + return upgradeSettings +} + +func expandBlueGreenSettings(configured interface{}) *container.BlueGreenSettings { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return &container.BlueGreenSettings{} + } + config := l[0].(map[string]interface{}) + + blueGreenSettings := &container.BlueGreenSettings{ + NodePoolSoakDuration: config["node_pool_soak_duration"].(string), + StandardRolloutPolicy: expandStandardRolloutPolicy(config["standard_rollout_policy"]), + } + + return blueGreenSettings +} + +func expandStandardRolloutPolicy(configured interface{}) *container.StandardRolloutPolicy { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return &container.StandardRolloutPolicy{} + } + + config := l[0].(map[string]interface{}) + standardRolloutPolicy := &container.StandardRolloutPolicy{ + BatchPercentage: config["batch_percentage"].(float64), + BatchNodeCount: int64(config["batch_node_count"].(int)), + BatchSoakDuration: config["batch_soak_duration"].(string), + } + + return standardRolloutPolicy +} + +func expandManagement(configured interface{}) *container.NodeManagement { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + + mng := &container.NodeManagement{ + AutoUpgrade: config["auto_upgrade"].(bool), + AutoRepair: config["auto_repair"].(bool), + UpgradeOptions: expandUpgradeOptions(config["upgrade_options"]), + } + + return mng +} + +func expandUpgradeOptions(configured interface{}) *container.AutoUpgradeOptions { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return &container.AutoUpgradeOptions{} + } + config := l[0].(map[string]interface{}) + + upgradeOptions := &container.AutoUpgradeOptions{ + AutoUpgradeStartTime: config["auto_upgrade_start_time"].(string), + Description: config["description"].(string), + } + + return upgradeOptions +} + +func expandAuthenticatorGroupsConfig(configured interface{}) *container.AuthenticatorGroupsConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + result := &container.AuthenticatorGroupsConfig{} + config := l[0].(map[string]interface{}) + if securityGroup, ok := config["security_group"]; ok { + result.Enabled = true + result.SecurityGroup = securityGroup.(string) + } + return result +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandProtectConfig(configured interface{}) *container.ProtectConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + pc := &container.ProtectConfig{} + protectConfig := l[0].(map[string]interface{}) + pc.WorkloadConfig = expandProtectConfigWorkloadConfig(protectConfig["workload_config"]) + if v, ok := protectConfig["workload_vulnerability_mode"]; ok { + pc.WorkloadVulnerabilityMode = v.(string) + } + return pc +} + +func expandProtectConfigWorkloadConfig(configured interface{}) *container.WorkloadConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + workloadConfig := l[0].(map[string]interface{}) + return &container.WorkloadConfig{ + AuditMode: workloadConfig["audit_mode"].(string), + } +} + +func flattenProtectConfig(pc *container.ProtectConfig) []map[string]interface{} { + if pc == nil { + return nil + } + + result := make(map[string]interface{}) + + result["workload_config"] = flattenProtectConfigWorkloadConfig(pc.WorkloadConfig) + result["workload_vulnerability_mode"] = pc.WorkloadVulnerabilityMode + + return []map[string]interface{}{result} +} + +func flattenProtectConfigWorkloadConfig(wc *container.WorkloadConfig) []map[string]interface{} { + if wc == nil { + return nil + } + + result := make(map[string]interface{}) + result["audit_mode"] = wc.AuditMode + + return []map[string]interface{}{result} +} +{{- end }} + +func expandSecurityPostureConfig(configured interface{}) *container.SecurityPostureConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + spc := &container.SecurityPostureConfig{} + spConfig := l[0].(map[string]interface{}) + if v, ok := spConfig["mode"]; ok { + spc.Mode = v.(string) + } + + if v, ok := spConfig["vulnerability_mode"]; ok { + spc.VulnerabilityMode = v.(string) + } + return spc +} + +func flattenSecurityPostureConfig(spc *container.SecurityPostureConfig) []map[string]interface{} { + if spc == nil { + return nil + } + result := make(map[string]interface{}) + + result["mode"] = spc.Mode + result["vulnerability_mode"] = spc.VulnerabilityMode + + return []map[string]interface{}{result} +} + +func flattenAdditionalPodRangesConfig(ipAllocationPolicy *container.IPAllocationPolicy) []map[string]interface{} { + if ipAllocationPolicy == nil { + return nil + } + result := make(map[string]interface{}) + + if aprc := ipAllocationPolicy.AdditionalPodRangesConfig; aprc != nil { + if len(aprc.PodRangeNames) > 0 { + result["pod_range_names"] = aprc.PodRangeNames + } else { + return nil + } + } else { + return nil + } + + return []map[string]interface{}{result} +} + +func expandNotificationConfig(configured interface{}) *container.NotificationConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return &container.NotificationConfig{ + Pubsub: &container.PubSub{ + Enabled: false, + }, + } + } + + notificationConfig := l[0].(map[string]interface{}) + if v, ok := notificationConfig["pubsub"]; ok { + if len(v.([]interface{})) > 0 { + pubsub := notificationConfig["pubsub"].([]interface{})[0].(map[string]interface{}) + + nc := &container.NotificationConfig{ + Pubsub: &container.PubSub{ + Enabled: pubsub["enabled"].(bool), + Topic: pubsub["topic"].(string), + }, + } + + if vv, ok := pubsub["filter"]; ok && len(vv.([]interface{})) > 0 { + filter := vv.([]interface{})[0].(map[string]interface{}) + eventType := filter["event_type"].([]interface{}) + nc.Pubsub.Filter = &container.Filter{ + EventType: tpgresource.ConvertStringArr(eventType), + } + } + + return nc + } + } + + return &container.NotificationConfig{ + Pubsub: &container.PubSub{ + Enabled: false, + }, + } +} + +func expandBinaryAuthorization(configured interface{}) *container.BinaryAuthorization { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return &container.BinaryAuthorization{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + } + } + config := l[0].(map[string]interface{}) + return &container.BinaryAuthorization{ + Enabled: config["enabled"].(bool), + EvaluationMode: config["evaluation_mode"].(string), + } +} + +func expandMasterAuth(configured interface{}) *container.MasterAuth { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + masterAuth := l[0].(map[string]interface{}) + result := &container.MasterAuth{} + + if v, ok := masterAuth["client_certificate_config"]; ok { + if len(v.([]interface{})) > 0 { + clientCertificateConfig := masterAuth["client_certificate_config"].([]interface{})[0].(map[string]interface{}) + + result.ClientCertificateConfig = &container.ClientCertificateConfig{ + IssueClientCertificate: clientCertificateConfig["issue_client_certificate"].(bool), + } + } + } + + return result +} + +func expandMasterAuthorizedNetworksConfig(configured interface{}, d *schema.ResourceData) *container.MasterAuthorizedNetworksConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return &container.MasterAuthorizedNetworksConfig{ + Enabled: false, + } + } + result := &container.MasterAuthorizedNetworksConfig{ + Enabled: true, + } + if config, ok := l[0].(map[string]interface{}); ok { + if _, ok := config["cidr_blocks"]; ok { + cidrBlocks := config["cidr_blocks"].(*schema.Set).List() + result.CidrBlocks = make([]*container.CidrBlock, 0) + for _, v := range cidrBlocks { + cidrBlock := v.(map[string]interface{}) + result.CidrBlocks = append(result.CidrBlocks, &container.CidrBlock{ + CidrBlock: cidrBlock["cidr_block"].(string), + DisplayName: cidrBlock["display_name"].(string), + }) + } + } + if v, ok := d.GetOkExists("master_authorized_networks_config.0.gcp_public_cidrs_access_enabled"); ok { + result.GcpPublicCidrsAccessEnabled = v.(bool) + result.ForceSendFields = []string{"GcpPublicCidrsAccessEnabled"} + } + } + return result +} + +func expandNetworkPolicy(configured interface{}) *container.NetworkPolicy { + result := &container.NetworkPolicy{} + l := configured.([]interface{}) + if len(l) == 0 { + return result + } + config := l[0].(map[string]interface{}) + if enabled, ok := config["enabled"]; ok && enabled.(bool) { + result.Enabled = true + if provider, ok := config["provider"]; ok { + result.Provider = provider.(string) + } + } + return result +} + +func isEnablePrivateEndpointPSCCluster(cluster *container.Cluster) bool { + // EnablePrivateEndpoint not provided + if cluster == nil || cluster.PrivateClusterConfig == nil { + return false + } + // Not a PSC cluster + if cluster.PrivateClusterConfig.EnablePrivateNodes || len(cluster.PrivateClusterConfig.MasterIpv4CidrBlock) > 0 { + return false + } + // PSC Cluster with EnablePrivateEndpoint + if cluster.PrivateClusterConfig.EnablePrivateEndpoint { + return true + } + return false +} + +func isEnablePDCSI(cluster *container.Cluster) bool { + if cluster.AddonsConfig == nil || cluster.AddonsConfig.GcePersistentDiskCsiDriverConfig == nil { + return true; // PDCSI is enabled by default. + } + return cluster.AddonsConfig.GcePersistentDiskCsiDriverConfig.Enabled +} + +func expandPrivateClusterConfig(configured interface{}) *container.PrivateClusterConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.PrivateClusterConfig{ + EnablePrivateEndpoint: config["enable_private_endpoint"].(bool), + EnablePrivateNodes: config["enable_private_nodes"].(bool), + MasterIpv4CidrBlock: config["master_ipv4_cidr_block"].(string), + MasterGlobalAccessConfig: expandPrivateClusterConfigMasterGlobalAccessConfig(config["master_global_access_config"]), + PrivateEndpointSubnetwork: config["private_endpoint_subnetwork"].(string), + ForceSendFields: []string{"EnablePrivateEndpoint", "EnablePrivateNodes", "MasterIpv4CidrBlock", "MasterGlobalAccessConfig"}, + } +} + +func expandPrivateClusterConfigMasterGlobalAccessConfig(configured interface{}) *container.PrivateClusterMasterGlobalAccessConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.PrivateClusterMasterGlobalAccessConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} + +func expandVerticalPodAutoscaling(configured interface{}) *container.VerticalPodAutoscaling { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.VerticalPodAutoscaling{ + Enabled: config["enabled"].(bool), + } +} + +func expandServiceExternalIpsConfig(configured interface{}) *container.ServiceExternalIPsConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.ServiceExternalIPsConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} + +func expandMeshCertificates(configured interface{}) *container.MeshCertificates { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.MeshCertificates{ + EnableCertificates: config["enable_certificates"].(bool), + ForceSendFields: []string{"EnableCertificates"}, + } +} + +func expandDatabaseEncryption(configured interface{}) *container.DatabaseEncryption { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &container.DatabaseEncryption{ + State: config["state"].(string), + KeyName: config["key_name"].(string), + } +} + +func expandReleaseChannel(configured interface{}) *container.ReleaseChannel { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.ReleaseChannel{ + Channel: config["channel"].(string), + } +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandClusterTelemetry(configured interface{}) *container.ClusterTelemetry { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.ClusterTelemetry{ + Type: config["type"].(string), + } +} + +{{ end }} +func expandDefaultSnatStatus(configured interface{}) *container.DefaultSnatStatus { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + config := l[0].(map[string]interface{}) + return &container.DefaultSnatStatus{ + Disabled: config["disabled"].(bool), + ForceSendFields: []string{"Disabled"}, + } + +} + +func expandWorkloadIdentityConfig(configured interface{}) *container.WorkloadIdentityConfig { + l := configured.([]interface{}) + v := &container.WorkloadIdentityConfig{} + + // this API considers unset and set-to-empty equivalent. Note that it will + // always return an empty block given that we always send one, but clusters + // not created in TF will not always return one (and may return nil) + if len(l) == 0 || l[0] == nil { + return v + } + + config := l[0].(map[string]interface{}) + v.WorkloadPool = config["workload_pool"].(string) + + return v +} + +func expandIdentityServiceConfig(configured interface{}) *container.IdentityServiceConfig { + l := configured.([]interface{}) + v := &container.IdentityServiceConfig{} + + config := l[0].(map[string]interface{}) + v.Enabled = config["enabled"].(bool) + + return v +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandPodSecurityPolicyConfig(configured interface{}) *container.PodSecurityPolicyConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.PodSecurityPolicyConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func expandSecretManagerConfig(configured interface{}) *container.SecretManagerConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.SecretManagerConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} +{{- end }} + +func expandDefaultMaxPodsConstraint(v interface{}) *container.MaxPodsConstraint { + if v == nil { + return nil + } + + return &container.MaxPodsConstraint{ + MaxPodsPerNode: int64(v.(int)), + } +} + +func expandCostManagementConfig(configured interface{}) *container.CostManagementConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.CostManagementConfig{ + Enabled: config["enabled"].(bool), + ForceSendFields: []string{"Enabled"}, + } +} + +func expandResourceUsageExportConfig(configured interface{}) *container.ResourceUsageExportConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return &container.ResourceUsageExportConfig{} + } + + resourceUsageConfig := l[0].(map[string]interface{}) + + result := &container.ResourceUsageExportConfig{ + EnableNetworkEgressMetering: resourceUsageConfig["enable_network_egress_metering"].(bool), + ConsumptionMeteringConfig: &container.ConsumptionMeteringConfig{ + Enabled: resourceUsageConfig["enable_resource_consumption_metering"].(bool), + ForceSendFields: []string{"Enabled"}, + }, + ForceSendFields: []string{"EnableNetworkEgressMetering"}, + } + if _, ok := resourceUsageConfig["bigquery_destination"]; ok { + destinationArr := resourceUsageConfig["bigquery_destination"].([]interface{}) + if len(destinationArr) > 0 && destinationArr[0] != nil { + bigqueryDestination := destinationArr[0].(map[string]interface{}) + if _, ok := bigqueryDestination["dataset_id"]; ok { + result.BigqueryDestination = &container.BigQueryDestination{ + DatasetId: bigqueryDestination["dataset_id"].(string), + } + } + } + } + return result +} + +func expandDnsConfig(configured interface{}) *container.DNSConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.DNSConfig{ +{{- if ne $.TargetVersionName "ga" }} + AdditiveVpcScopeDnsDomain: config["additive_vpc_scope_dns_domain"].(string), +{{- end }} + ClusterDns: config["cluster_dns"].(string), + ClusterDnsScope: config["cluster_dns_scope"].(string), + ClusterDnsDomain: config["cluster_dns_domain"].(string), + } +} + +func expandGatewayApiConfig(configured interface{}) *container.GatewayAPIConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.GatewayAPIConfig{ + Channel: config["channel"].(string), + } +} + +func expandFleet(configured interface{}) *container.Fleet { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.Fleet{ + Project: config["project"].(string), + } +} + +func expandEnableK8sBetaApis(configured interface{}, enabledAPIs []string) *container.K8sBetaAPIConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + result := &container.K8sBetaAPIConfig{} + if v, ok := config["enabled_apis"]; ok { + notEnabledAPIsSet := v.(*schema.Set) + for _, enabledAPI := range enabledAPIs { + if notEnabledAPIsSet.Contains(enabledAPI) { + notEnabledAPIsSet.Remove(enabledAPI) + } + } + + result.EnabledApis = tpgresource.ConvertStringSet(notEnabledAPIsSet) + } + + return result +} + +func expandContainerClusterLoggingConfig(configured interface{}) *container.LoggingConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + var components []string + if l[0] != nil { + config := l[0].(map[string]interface{}) + components = tpgresource.ConvertStringArr(config["enable_components"].([]interface{})) + } + + return &container.LoggingConfig{ + ComponentConfig: &container.LoggingComponentConfig{ + EnableComponents: components, + }, + } +} + +func expandMonitoringConfig(configured interface{}) *container.MonitoringConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + mc := &container.MonitoringConfig{} + config := l[0].(map[string]interface{}) +{{/* In version == 'ga' enable_components will always be specified. */}} + if v, ok := config["enable_components"]; ok { + enable_components := v.([]interface{}) + mc.ComponentConfig = &container.MonitoringComponentConfig{ + EnableComponents: tpgresource.ConvertStringArr(enable_components), + } + } + if v, ok := config["managed_prometheus"]; ok && len(v.([]interface{})) > 0 { + managed_prometheus := v.([]interface{})[0].(map[string]interface{}) + mc.ManagedPrometheusConfig = &container.ManagedPrometheusConfig{ + Enabled: managed_prometheus["enabled"].(bool), + } + } + + if v, ok := config["advanced_datapath_observability_config"]; ok && len(v.([]interface{})) > 0 { + advanced_datapath_observability_config := v.([]interface{})[0].(map[string]interface{}) + + mc.AdvancedDatapathObservabilityConfig = &container.AdvancedDatapathObservabilityConfig{ + EnableMetrics: advanced_datapath_observability_config["enable_metrics"].(bool), + } + + enable_relay := advanced_datapath_observability_config["enable_relay"].(bool) + relay_mode := advanced_datapath_observability_config["relay_mode"].(string) + if enable_relay { + mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay + } else if relay_mode == "INTERNAL_VPC_LB" || relay_mode == "EXTERNAL_LB" { + mc.AdvancedDatapathObservabilityConfig.RelayMode = relay_mode + } else { + mc.AdvancedDatapathObservabilityConfig.EnableRelay = enable_relay + mc.AdvancedDatapathObservabilityConfig.RelayMode = "DISABLED" + mc.AdvancedDatapathObservabilityConfig.ForceSendFields = []string{"EnableRelay"} + } + } + + return mc +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandContainerClusterTpuConfig(configured interface{}) *container.TpuConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.TpuConfig{ + Enabled: config["enabled"].(bool), + UseServiceNetworking: config["use_service_networking"].(bool), + } +} +{{- end }} + +func expandContainerClusterAuthenticatorGroupsConfig(configured interface{}) *container.AuthenticatorGroupsConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + result := &container.AuthenticatorGroupsConfig{} + if securityGroup, ok := config["security_group"]; ok { + if securityGroup == nil || securityGroup.(string) == "" { + result.Enabled = false + } else { + result.Enabled = true + result.SecurityGroup = securityGroup.(string) + } + } + return result +} + +func expandNodePoolDefaults(configured interface{}) *container.NodePoolDefaults { + l, ok := configured.([]interface{}) + if !ok || l == nil || len(l) == 0 || l[0] == nil { + return nil + } + nodePoolDefaults := &container.NodePoolDefaults{} + config := l[0].(map[string]interface{}) + if v, ok := config["node_config_defaults"]; ok && len(v.([]interface{})) > 0 { + nodePoolDefaults.NodeConfigDefaults = expandNodeConfigDefaults(v) + } + return nodePoolDefaults +} + +func flattenNodePoolDefaults(c *container.NodePoolDefaults) []map[string]interface{} { + if c == nil { + return nil + } + + result := make(map[string]interface{}) + if c.NodeConfigDefaults != nil { + result["node_config_defaults"] = flattenNodeConfigDefaults(c.NodeConfigDefaults) + } + + return []map[string]interface{}{result} +} + +func expandNodePoolAutoConfig(configured interface{}) *container.NodePoolAutoConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + npac := &container.NodePoolAutoConfig{} + config := l[0].(map[string]interface{}) + + if v, ok := config["network_tags"]; ok && len(v.([]interface{})) > 0 { + npac.NetworkTags = expandNodePoolAutoConfigNetworkTags(v) + } + + if v, ok := config["resource_manager_tags"]; ok && len(v.(map[string]interface{})) > 0 { + npac.ResourceManagerTags = expandResourceManagerTags(v) + } + + return npac +} + +func expandNodePoolAutoConfigNetworkTags(configured interface{}) *container.NetworkTags { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + nt := &container.NetworkTags{} + config := l[0].(map[string]interface{}) + + if v, ok := config["tags"]; ok && len(v.([]interface{})) > 0 { + nt.Tags = tpgresource.ConvertStringArr(v.([]interface{})) + } + return nt +} + +{{ if ne $.TargetVersionName `ga` -}} +func expandWorkloadAltsConfig(configured interface{}) *container.WorkloadALTSConfig { + l := configured.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil + } + + config := l[0].(map[string]interface{}) + return &container.WorkloadALTSConfig{ + EnableAlts: config["enable_alts"].(bool), + ForceSendFields: []string{"EnableAlts"}, + } +} +{{- end }} + +func flattenNotificationConfig(c *container.NotificationConfig) []map[string]interface{} { + if c == nil { + return nil + } + + if c.Pubsub.Filter != nil { + filter := []map[string]interface{}{} + if len(c.Pubsub.Filter.EventType) > 0 { + filter = append(filter, map[string]interface{}{ + "event_type": c.Pubsub.Filter.EventType, + }) + } + + return []map[string]interface{}{ + { + "pubsub": []map[string]interface{}{ + { + "enabled": c.Pubsub.Enabled, + "topic": c.Pubsub.Topic, + "filter": filter, + }, + }, + }, + } + } + + return []map[string]interface{}{ + { + "pubsub": []map[string]interface{}{ + { + "enabled": c.Pubsub.Enabled, + "topic": c.Pubsub.Topic, + }, + }, + }, + } +} + +func flattenBinaryAuthorization(c *container.BinaryAuthorization) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + "evaluation_mode": c.EvaluationMode, + }) + } + return result +} + +func flattenNetworkPolicy(c *container.NetworkPolicy) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "enabled": c.Enabled, + "provider": c.Provider, + }) + } else { + // Explicitly set the network policy to the default. + result = append(result, map[string]interface{}{ + "enabled": false, + "provider": "PROVIDER_UNSPECIFIED", + }) + } + return result +} + +func flattenClusterAddonsConfig(c *container.AddonsConfig) []map[string]interface{} { + result := make(map[string]interface{}) + if c == nil { + return nil + } + if c.HorizontalPodAutoscaling != nil { + result["horizontal_pod_autoscaling"] = []map[string]interface{}{ + { + "disabled": c.HorizontalPodAutoscaling.Disabled, + }, + } + } + if c.HttpLoadBalancing != nil { + result["http_load_balancing"] = []map[string]interface{}{ + { + "disabled": c.HttpLoadBalancing.Disabled, + }, + } + } + if c.NetworkPolicyConfig != nil { + result["network_policy_config"] = []map[string]interface{}{ + { + "disabled": c.NetworkPolicyConfig.Disabled, + }, + } + } + + if c.GcpFilestoreCsiDriverConfig != nil { + result["gcp_filestore_csi_driver_config"] = []map[string]interface{}{ + { + "enabled": c.GcpFilestoreCsiDriverConfig.Enabled, + }, + } + } + + if c.CloudRunConfig != nil { + cloudRunConfig := map[string]interface{}{ + "disabled": c.CloudRunConfig.Disabled, + } + if c.CloudRunConfig.LoadBalancerType == "LOAD_BALANCER_TYPE_INTERNAL" { + // Currently we only allow setting load_balancer_type to LOAD_BALANCER_TYPE_INTERNAL + cloudRunConfig["load_balancer_type"] = "LOAD_BALANCER_TYPE_INTERNAL" + } + result["cloudrun_config"] = []map[string]interface{}{cloudRunConfig} + } + + if c.DnsCacheConfig != nil { + result["dns_cache_config"] = []map[string]interface{}{ + { + "enabled": c.DnsCacheConfig.Enabled, + }, + } + } + + if c.GcePersistentDiskCsiDriverConfig != nil { + result["gce_persistent_disk_csi_driver_config"] = []map[string]interface{}{ + { + "enabled": c.GcePersistentDiskCsiDriverConfig.Enabled, + }, + } + } + if c.GkeBackupAgentConfig != nil { + result["gke_backup_agent_config"] = []map[string]interface{}{ + { + "enabled": c.GkeBackupAgentConfig.Enabled, + }, + } + } + if c.ConfigConnectorConfig != nil { + result["config_connector_config"] = []map[string]interface{}{ + { + "enabled": c.ConfigConnectorConfig.Enabled, + }, + } + } + if c.GcsFuseCsiDriverConfig != nil { + result["gcs_fuse_csi_driver_config"] = []map[string]interface{}{ + { + "enabled": c.GcsFuseCsiDriverConfig.Enabled, + }, + } + } + if c.StatefulHaConfig != nil { + result["stateful_ha_config"] = []map[string]interface{}{ + { + "enabled": c.StatefulHaConfig.Enabled, + }, + } + } + +{{ if ne $.TargetVersionName `ga` -}} + if c.IstioConfig != nil { + result["istio_config"] = []map[string]interface{}{ + { + "disabled": c.IstioConfig.Disabled, + "auth": c.IstioConfig.Auth, + }, + } + } + + if c.KalmConfig != nil { + result["kalm_config"] = []map[string]interface{}{ + { + "enabled": c.KalmConfig.Enabled, + }, + } + } +{{- end }} + + return []map[string]interface{}{result} +} + +func flattenClusterNodePools(d *schema.ResourceData, config *transport_tpg.Config, c []*container.NodePool) ([]map[string]interface{}, error) { + nodePools := make([]map[string]interface{}, 0, len(c)) + + for i, np := range c { + nodePool, err := flattenNodePool(d, config, np, fmt.Sprintf("node_pool.%d.", i)) + if err != nil { + return nil, err + } + nodePools = append(nodePools, nodePool) + } + + return nodePools, nil +} + +func flattenAuthenticatorGroupsConfig(c *container.AuthenticatorGroupsConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "security_group": c.SecurityGroup, + }, + } +} + +func flattenPrivateClusterConfig(c *container.PrivateClusterConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enable_private_endpoint": c.EnablePrivateEndpoint, + "enable_private_nodes": c.EnablePrivateNodes, + "master_ipv4_cidr_block": c.MasterIpv4CidrBlock, + "master_global_access_config": flattenPrivateClusterConfigMasterGlobalAccessConfig(c.MasterGlobalAccessConfig), + "peering_name": c.PeeringName, + "private_endpoint": c.PrivateEndpoint, + "private_endpoint_subnetwork": c.PrivateEndpointSubnetwork, + "public_endpoint": c.PublicEndpoint, + }, + } +} + +// Like most GKE blocks, this is not returned from the API at all when false. This causes trouble +// for users who've set enabled = false in config as they will get a permadiff. Always setting the +// field resolves that. We can assume if it was not returned, it's false. +func flattenPrivateClusterConfigMasterGlobalAccessConfig(c *container.PrivateClusterMasterGlobalAccessConfig) []map[string]interface{} { + return []map[string]interface{}{ + { + "enabled": c != nil && c.Enabled, + }, + } +} + +func flattenVerticalPodAutoscaling(c *container.VerticalPodAutoscaling) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +func flattenReleaseChannel(c *container.ReleaseChannel) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil && c.Channel != "" { + result = append(result, map[string]interface{}{ + "channel": c.Channel, + }) + } else { + // Explicitly set the release channel to the UNSPECIFIED. + result = append(result, map[string]interface{}{ + "channel": "UNSPECIFIED", + }) + } + return result +} + +{{ if ne $.TargetVersionName `ga` -}} + +func flattenClusterTelemetry(c *container.ClusterTelemetry) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "type": c.Type, + }) + } + return result +} + +{{ end }} + +func flattenDefaultSnatStatus(c *container.DefaultSnatStatus) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "disabled": c.Disabled, + }) + } + return result +} + +func flattenWorkloadIdentityConfig(c *container.WorkloadIdentityConfig, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { + if c == nil { + return nil + } + + return []map[string]interface{}{ + { + "workload_pool": c.WorkloadPool, + }, + } +} + +func flattenIdentityServiceConfig(c *container.IdentityServiceConfig, d *schema.ResourceData, config *transport_tpg.Config) []map[string]interface{} { + if c == nil { + return nil + } + + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +func flattenPodCidrOverprovisionConfig(c *container.PodCIDROverprovisionConfig) []map[string]interface{} { + if c == nil { + return nil + } + + return []map[string]interface{}{ + { + "disabled": c.Disable, + }, + } +} + +func flattenIPAllocationPolicy(c *container.Cluster, d *schema.ResourceData, config *transport_tpg.Config) ([]map[string]interface{}, error) { + // If IP aliasing isn't enabled, none of the values in this block can be set. + if c == nil || c.IpAllocationPolicy == nil || !c.IpAllocationPolicy.UseIpAliases { + if err := d.Set("networking_mode", "ROUTES"); err != nil { + return nil, fmt.Errorf("Error setting networking_mode: %s", err) + } + return nil, nil + } + if err := d.Set("networking_mode", "VPC_NATIVE"); err != nil { + return nil, fmt.Errorf("Error setting networking_mode: %s", err) + } + + p := c.IpAllocationPolicy + + // handle older clusters that return JSON null + // corresponding to "STACK_TYPE_UNSPECIFIED" due to GKE declining to backfill + // equivalent to default_if_empty + if p.StackType == "" { + p.StackType = "IPV4" + } + + return []map[string]interface{}{ + { + "cluster_ipv4_cidr_block": p.ClusterIpv4CidrBlock, + "services_ipv4_cidr_block": p.ServicesIpv4CidrBlock, + "cluster_secondary_range_name": p.ClusterSecondaryRangeName, + "services_secondary_range_name": p.ServicesSecondaryRangeName, + "stack_type": p.StackType, + "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(p.PodCidrOverprovisionConfig), + "additional_pod_ranges_config": flattenAdditionalPodRangesConfig(c.IpAllocationPolicy), + }, + }, nil +} + +func flattenMaintenancePolicy(mp *container.MaintenancePolicy) []map[string]interface{} { + if mp == nil || mp.Window == nil { + return nil + } + + exclusions := []map[string]interface{}{} + if mp.Window.MaintenanceExclusions != nil { + for wName, window := range mp.Window.MaintenanceExclusions { + exclusion := map[string]interface{}{ + "start_time": window.StartTime, + "end_time": window.EndTime, + "exclusion_name": wName, + } + if window.MaintenanceExclusionOptions != nil { + // When the scope is set to NO_UPGRADES which is the default value, + // the maintenance exclusion returned by GCP will be empty. + // This seems like a bug. To workaround this, assign NO_UPGRADES to the scope explicitly + scope := "NO_UPGRADES" + if window.MaintenanceExclusionOptions.Scope != "" { + scope = window.MaintenanceExclusionOptions.Scope + } + exclusion["exclusion_options"] = []map[string]interface{}{ + { + "scope": scope, + }, + } + } + exclusions = append(exclusions, exclusion) + } + } + + if mp.Window.DailyMaintenanceWindow != nil { + return []map[string]interface{}{ + { + "daily_maintenance_window": []map[string]interface{}{ + { + "start_time": mp.Window.DailyMaintenanceWindow.StartTime, + "duration": mp.Window.DailyMaintenanceWindow.Duration, + }, + }, + "maintenance_exclusion": exclusions, + }, + } + } + if mp.Window.RecurringWindow != nil { + return []map[string]interface{}{ + { + "recurring_window": []map[string]interface{}{ + { + "start_time": mp.Window.RecurringWindow.Window.StartTime, + "end_time": mp.Window.RecurringWindow.Window.EndTime, + "recurrence": mp.Window.RecurringWindow.Recurrence, + }, + }, + "maintenance_exclusion": exclusions, + }, + } + } + return nil +} + +func flattenMasterAuth(ma *container.MasterAuth) []map[string]interface{} { + if ma == nil { + return nil + } + masterAuth := []map[string]interface{}{ + { + "client_certificate": ma.ClientCertificate, + "client_key": ma.ClientKey, + "cluster_ca_certificate": ma.ClusterCaCertificate, + }, + } + + // No version of the GKE API returns the client_certificate_config value. + // Instead, we need to infer whether or not it was set based on the + // client cert being returned from the API or not. + // Previous versions of the provider didn't record anything in state when + // a client cert was enabled, only setting the block when it was false. + masterAuth[0]["client_certificate_config"] = []map[string]interface{}{ + { + "issue_client_certificate": len(ma.ClientCertificate) != 0, + }, + } + + return masterAuth +} + +func flattenClusterAutoscaling(a *container.ClusterAutoscaling) []map[string]interface{} { + r := make(map[string]interface{}) + if a == nil { + r["enabled"] = false + return []map[string]interface{}{r} + } + + if a.EnableNodeAutoprovisioning { + resourceLimits := make([]interface{}, 0, len(a.ResourceLimits)) + for _, rl := range a.ResourceLimits { + resourceLimits = append(resourceLimits, map[string]interface{}{ + "resource_type": rl.ResourceType, + "minimum": rl.Minimum, + "maximum": rl.Maximum, + }) + } + r["resource_limits"] = resourceLimits + r["enabled"] = true + r["auto_provisioning_defaults"] = flattenAutoProvisioningDefaults(a.AutoprovisioningNodePoolDefaults) + } else { + r["enabled"] = false + } + r["autoscaling_profile"] = a.AutoscalingProfile + + return []map[string]interface{}{r} +} + +func flattenAutoProvisioningDefaults(a *container.AutoprovisioningNodePoolDefaults) []map[string]interface{} { + r := make(map[string]interface{}) + r["oauth_scopes"] = a.OauthScopes + r["service_account"] = a.ServiceAccount + r["disk_size"] = a.DiskSizeGb + r["disk_type"] = a.DiskType + r["image_type"] = a.ImageType + r["min_cpu_platform"] = a.MinCpuPlatform + r["boot_disk_kms_key"] = a.BootDiskKmsKey + r["shielded_instance_config"] = flattenShieldedInstanceConfig(a.ShieldedInstanceConfig) + r["management"] = flattenManagement(a.Management) + r["upgrade_settings"] = flattenUpgradeSettings(a.UpgradeSettings) + + return []map[string]interface{}{r} +} + +func flattenUpgradeSettings(a *container.UpgradeSettings) []map[string]interface{} { + if a == nil { + return nil + } + r := make(map[string]interface{}) + r["max_surge"] = a.MaxSurge + r["max_unavailable"] = a.MaxUnavailable + r["strategy"] = a.Strategy + r["blue_green_settings"] = flattenBlueGreenSettings(a.BlueGreenSettings) + + return []map[string]interface{}{r} +} + +func flattenBlueGreenSettings(a *container.BlueGreenSettings) []map[string]interface{} { + if a == nil { + return nil + } + + r := make(map[string]interface{}) + r["node_pool_soak_duration"] = a.NodePoolSoakDuration + r["standard_rollout_policy"] = flattenStandardRolloutPolicy(a.StandardRolloutPolicy) + + return []map[string]interface{}{r} +} + +func flattenStandardRolloutPolicy(a *container.StandardRolloutPolicy) []map[string]interface{} { + if a == nil { + return nil + } + + r := make(map[string]interface{}) + r["batch_percentage"] = a.BatchPercentage + r["batch_node_count"] = a.BatchNodeCount + r["batch_soak_duration"] = a.BatchSoakDuration + + return []map[string]interface{}{r} +} + +func flattenManagement(a *container.NodeManagement) []map[string]interface{} { + if a == nil { + return nil + } + r := make(map[string]interface{}) + r["auto_upgrade"] = a.AutoUpgrade + r["auto_repair"] = a.AutoRepair + r["upgrade_options"] = flattenUpgradeOptions(a.UpgradeOptions) + + return []map[string]interface{}{r} +} + +func flattenUpgradeOptions(a *container.AutoUpgradeOptions) []map[string]interface{} { + if a == nil { + return nil + } + + r := make(map[string]interface{}) + r["auto_upgrade_start_time"] = a.AutoUpgradeStartTime + r["description"] = a.Description + + return []map[string]interface{}{r} +} + +func flattenMasterAuthorizedNetworksConfig(c *container.MasterAuthorizedNetworksConfig) []map[string]interface{} { + if c == nil || !c.Enabled { + return nil + } + result := make(map[string]interface{}) + cidrBlocks := make([]interface{}, 0, len(c.CidrBlocks)) + for _, v := range c.CidrBlocks { + cidrBlocks = append(cidrBlocks, map[string]interface{}{ + "cidr_block": v.CidrBlock, + "display_name": v.DisplayName, + }) + } + result["cidr_blocks"] = schema.NewSet(schema.HashResource(cidrBlockConfig), cidrBlocks) + result["gcp_public_cidrs_access_enabled"] = c.GcpPublicCidrsAccessEnabled + return []map[string]interface{}{result} +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenPodSecurityPolicyConfig(c *container.PodSecurityPolicyConfig) []map[string]interface{} { + if c == nil { + return []map[string]interface{}{ + { + "enabled": false, + }, + } + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +{{ end }} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenSecretManagerConfig(c *container.SecretManagerConfig) []map[string]interface{} { + if c == nil { + return []map[string]interface{}{ + { + "enabled": false, + }, + } + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +{{ end }} + +func flattenResourceUsageExportConfig(c *container.ResourceUsageExportConfig) []map[string]interface{} { + if c == nil { + return nil + } + + enableResourceConsumptionMetering := false + if c.ConsumptionMeteringConfig != nil && c.ConsumptionMeteringConfig.Enabled == true { + enableResourceConsumptionMetering = true + } + + return []map[string]interface{}{ + { + "enable_network_egress_metering": c.EnableNetworkEgressMetering, + "enable_resource_consumption_metering": enableResourceConsumptionMetering, + "bigquery_destination": []map[string]interface{}{ + {"dataset_id": c.BigqueryDestination.DatasetId}, + }, + }, + } +} + +func flattenServiceExternalIpsConfig(c *container.ServiceExternalIPsConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +func flattenMeshCertificates(c *container.MeshCertificates) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enable_certificates": c.EnableCertificates, + }, + } +} + +func flattenManagementConfig(c *container.CostManagementConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enabled": c.Enabled, + }, + } +} + +func flattenDatabaseEncryption(c *container.DatabaseEncryption) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "state": c.State, + "key_name": c.KeyName, + }, + } +} + +func flattenDnsConfig(c *container.DNSConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { +{{- if ne $.TargetVersionName "ga" }} + "additive_vpc_scope_dns_domain": c.AdditiveVpcScopeDnsDomain, +{{- end }} + "cluster_dns": c.ClusterDns, + "cluster_dns_scope": c.ClusterDnsScope, + "cluster_dns_domain": c.ClusterDnsDomain, + }, + } +} + +func flattenGatewayApiConfig(c *container.GatewayAPIConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "channel": c.Channel, + }, + } +} + +func flattenFleet(c *container.Fleet) []map[string]interface{} { + if c == nil { + return nil + } + + // Parse membership_id and membership_location from full membership name. + var membership_id, membership_location string + membershipRE := regexp.MustCompile(`^(//[a-zA-Z0-9\.\-]+)?/?projects/([^/]+)/locations/([a-zA-Z0-9\-]+)/memberships/([^/]+)$`) + if match := membershipRE.FindStringSubmatch(c.Membership); match != nil { + membership_id = match[4] + membership_location = match[3] + } + + return []map[string]interface{}{ + { + "project": c.Project, + "membership": c.Membership, + "membership_id": membership_id, + "membership_location": membership_location, + "pre_registered": c.PreRegistered, + }, + } +} + +func flattenEnableK8sBetaApis(c *container.K8sBetaAPIConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enabled_apis": c.EnabledApis, + }, + } +} + +func flattenContainerClusterLoggingConfig(c *container.LoggingConfig) []map[string]interface{} { + if c == nil { + return nil + } + + return []map[string]interface{}{ + { + "enable_components": c.ComponentConfig.EnableComponents, + }, + } +} + +func flattenMonitoringConfig(c *container.MonitoringConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := make(map[string]interface{}) + if c.ComponentConfig != nil { + result["enable_components"] = c.ComponentConfig.EnableComponents + } + if c.ManagedPrometheusConfig != nil { + result["managed_prometheus"] = flattenManagedPrometheusConfig(c.ManagedPrometheusConfig) + } + if c.AdvancedDatapathObservabilityConfig != nil { + result["advanced_datapath_observability_config"] = flattenAdvancedDatapathObservabilityConfig(c.AdvancedDatapathObservabilityConfig) + } + + return []map[string]interface{}{result} +} + +func flattenAdvancedDatapathObservabilityConfig(c *container.AdvancedDatapathObservabilityConfig) []map[string]interface{} { + if c == nil { + return nil + } + + if c.EnableRelay { + return []map[string]interface{}{ + { + "enable_metrics": c.EnableMetrics, + "enable_relay": c.EnableRelay, + }, + } + } + + if c.RelayMode == "INTERNAL_VPC_LB" || c.RelayMode == "EXTERNAL_LB" { + return []map[string]interface{}{ + { + "enable_metrics": c.EnableMetrics, + "relay_mode": c.RelayMode, + }, + } + } + + return []map[string]interface{}{ + { + "enable_metrics": c.EnableMetrics, + "enable_relay": false, + "relay_mode": "DISABLED", + }, + } +} + +func flattenManagedPrometheusConfig(c *container.ManagedPrometheusConfig) []map[string]interface{} { + return []map[string]interface{}{ + { + "enabled": c != nil && c.Enabled, + }, + } +} + +func flattenNodePoolAutoConfig(c *container.NodePoolAutoConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := make(map[string]interface{}) + if c.NetworkTags != nil { + result["network_tags"] = flattenNodePoolAutoConfigNetworkTags(c.NetworkTags) + } + if c.ResourceManagerTags != nil { + result["resource_manager_tags"] = flattenResourceManagerTags(c.ResourceManagerTags) + } + + return []map[string]interface{}{result} +} + +func flattenNodePoolAutoConfigNetworkTags(c *container.NetworkTags) []map[string]interface{} { + if c == nil { + return nil + } + + result := make(map[string]interface{}) + if c.Tags != nil { + result["tags"] = c.Tags + } + return []map[string]interface{}{result} +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenWorkloadAltsConfig(c *container.WorkloadALTSConfig) []map[string]interface{} { + if c == nil { + return nil + } + return []map[string]interface{}{ + { + "enable_alts": c.EnableAlts, + }, + } +} +{{- end }} + +func resourceContainerClusterStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return nil, err + } + + clusterName := d.Get("name").(string) + + if err := d.Set("location", location); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) + } + + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + + if _, err := containerClusterAwaitRestingState(config, project, location, clusterName, userAgent, d.Timeout(schema.TimeoutCreate)); err != nil { + return nil, err + } + + d.SetId(containerClusterFullName(project, location, clusterName)) + + return []*schema.ResourceData{d}, nil +} + +func containerClusterMutexKey(project, location, clusterName string) string { + return fmt.Sprintf("google-container-cluster/%s/%s/%s", project, location, clusterName) +} + +func containerClusterFullName(project, location, cluster string) string { + return fmt.Sprintf("projects/%s/locations/%s/clusters/%s", project, location, cluster) +} + +func extractNodePoolInformationFromCluster(d *schema.ResourceData, config *transport_tpg.Config, clusterName string) (*NodePoolInformation, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return nil, err + } + + return &NodePoolInformation{ + project: project, + location: location, + cluster: d.Get("name").(string), + }, nil +} + +// Suppress unremovable default scope values from GCP. +// If the default service account would not otherwise have it, the `monitoring.write` scope +// is added to a GKE cluster's scopes regardless of what the user provided. +// monitoring.write is inherited from monitoring (rw) and cloud-platform, so it won't always +// be present. +// Enabling Stackdriver features through logging_service and monitoring_service may enable +// monitoring or logging.write. We've chosen not to suppress in those cases because they're +// removable by disabling those features. +func containerClusterAddedScopesSuppress(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange("cluster_autoscaling.0.auto_provisioning_defaults.0.oauth_scopes") + if o == nil || n == nil { + return false + } + + addedScopes := []string{ + "https://www.googleapis.com/auth/monitoring.write", + } + + // combine what the default scopes are with what was passed + m := tpgresource.GolangSetFromStringSlice(append(addedScopes, tpgresource.ConvertStringArr(n.([]interface{}))...)) + combined := tpgresource.StringSliceFromGolangSet(m) + + // compare if the combined new scopes and default scopes differ from the old scopes + if len(combined) != len(tpgresource.ConvertStringArr(o.([]interface{}))) { + return false + } + + for _, i := range combined { + if tpgresource.StringInSlice(tpgresource.ConvertStringArr(o.([]interface{})), i) { + continue + } + + return false + } + + return true +} + +// We want to suppress diffs for empty/disabled private cluster config. +func containerClusterPrivateClusterConfigSuppress(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange("private_cluster_config.0.enable_private_endpoint") + suppressEndpoint := !o.(bool) && !n.(bool) + + o, n = d.GetChange("private_cluster_config.0.enable_private_nodes") + suppressNodes := !o.(bool) && !n.(bool) + + // Do not suppress diffs when private_endpoint_subnetwork is configured + _, hasSubnet := d.GetOk("private_cluster_config.0.private_endpoint_subnetwork") + + // Do not suppress diffs when master_global_access_config is configured + _, hasGlobalAccessConfig := d.GetOk("private_cluster_config.0.master_global_access_config") + + if k == "private_cluster_config.0.enable_private_endpoint" { + return suppressEndpoint && !hasSubnet + } else if k == "private_cluster_config.0.enable_private_nodes" { + return suppressNodes && !hasSubnet + } else if k == "private_cluster_config.#" { + return suppressEndpoint && suppressNodes && !hasSubnet && !hasGlobalAccessConfig + } else if k == "private_cluster_config.0.private_endpoint_subnetwork" { + // Before regular compare, for the sake of private flexible cluster, + // suppress diffs in private_endpoint_subnetwork when + // master_ipv4_cidr_block is set + // && private_endpoint_subnetwork is unset in terraform (new value == "") + // && private_endpoint_subnetwork is returned from resource (old value != "") + _, hasMasterCidr := d.GetOk("private_cluster_config.0.master_ipv4_cidr_block") + return (hasMasterCidr && new == "" && old != "") || tpgresource.CompareSelfLinkOrResourceName(k, old, new, d) + } + return false +} + +func validatePrivateClusterConfig(cluster *container.Cluster) error { + if cluster == nil || cluster.PrivateClusterConfig == nil { + return nil + } + if !cluster.PrivateClusterConfig.EnablePrivateNodes && len(cluster.PrivateClusterConfig.MasterIpv4CidrBlock) > 0 { + return fmt.Errorf("master_ipv4_cidr_block can only be set if enable_private_nodes is true") + } + if cluster.PrivateClusterConfig.EnablePrivateNodes && len(cluster.PrivateClusterConfig.MasterIpv4CidrBlock) == 0 { + if len(cluster.PrivateClusterConfig.PrivateEndpointSubnetwork) > 0 { + return nil + } + if cluster.Autopilot == nil || !cluster.Autopilot.Enabled { + return fmt.Errorf("master_ipv4_cidr_block must be set if enable_private_nodes is true") + } + } + return nil +} + +// Autopilot clusters have preconfigured defaults: https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison. +// This function modifies the diff so users can see what these will be during plan time. +func containerClusterAutopilotCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + if d.HasChange("enable_autopilot") && d.Get("enable_autopilot").(bool) { + if err := d.SetNew("enable_intranode_visibility", true); err != nil { + return err + } + if err := d.SetNew("networking_mode", "VPC_NATIVE"); err != nil { + return err + } + } + return nil +} + +// node_version only applies to the default node pool, so it should conflict with remove_default_node_pool = true +func containerClusterNodeVersionRemoveDefaultCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + // node_version is computed, so we can only check this on initial creation + o, _ := d.GetChange("name") + if o != "" { + return nil + } + if d.Get("node_version").(string) != "" && d.Get("remove_default_node_pool").(bool) { + return fmt.Errorf("node_version can only be specified if remove_default_node_pool is not true") + } + return nil +} + +func containerClusterNetworkPolicyEmptyCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + // we want to set computed only in the case that there wasn't a previous network_policy configured + // because we default a returned empty network policy to a configured false, this will only apply + // on the first run, if network_policy is not configured - all other runs will store empty configurations + // as enabled=false and provider=PROVIDER_UNSPECIFIED + o, n := d.GetChange("network_policy") + if o == nil && n == nil { + return d.SetNewComputed("network_policy") + } + return nil +} + +{{ if ne $.TargetVersionName `ga` -}} +func podSecurityPolicyCfgSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "pod_security_policy_config.#" && old == "1" && new == "0" { + if v, ok := r.GetOk("pod_security_policy_config"); ok { + cfgList := v.([]interface{}) + if len(cfgList) > 0 { + d := cfgList[0].(map[string]interface{}) + // Suppress if old value was {enabled == false} + return !d["enabled"].(bool) + } + } + } + return false +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func SecretManagerCfgSuppress(k, old, new string, r *schema.ResourceData) bool { + if k == "secret_manager_config.#" && old == "1" && new == "0" { + if v, ok := r.GetOk("secret_manager_config"); ok { + cfgList := v.([]interface{}) + if len(cfgList) > 0 { + d := cfgList[0].(map[string]interface{}) + // Suppress if old value was {enabled == false} + return !d["enabled"].(bool) + } + } + } + return false +} +{{- end }} + +func containerClusterNetworkPolicyDiffSuppress(k, old, new string, r *schema.ResourceData) bool { + // if network_policy configuration is empty, we store it as populated and enabled=false, and + // provider=PROVIDER_UNSPECIFIED, in the case that it was previously stored with this state, + // and the configuration removed, we want to suppress the diff + if k == "network_policy.#" && old == "1" && new == "0" { + o, _ := r.GetChange("network_policy.0.enabled") + if !o.(bool) { + return true + } + } + + return false +} + +func BinaryAuthorizationDiffSuppress(k, old, new string, r *schema.ResourceData) bool { + // An empty config is equivalent to a config with enabled set to false. + if k == "binary_authorization.#" && old == "1" && new == "0" { + o, _ := r.GetChange("binary_authorization.0.enabled") + if !o.(bool) && !r.HasChange("binary_authorization.0.evaluation_mode") { + return true + } + } + + return false +} + +func validateNodePoolAutoConfig(cluster *container.Cluster) error { + if cluster == nil || cluster.NodePoolAutoConfig == nil { + return nil + } + if cluster.NodePoolAutoConfig != nil && cluster.NodePoolAutoConfig.NetworkTags != nil && len(cluster.NodePoolAutoConfig.NetworkTags.Tags) > 0 { + if (cluster.Autopilot == nil || !cluster.Autopilot.Enabled) && (cluster.Autoscaling == nil || !cluster.Autoscaling.EnableNodeAutoprovisioning) { + return fmt.Errorf("node_pool_auto_config network tags can only be set if enable_autopilot or cluster_autoscaling is enabled") + } + } + + return nil +} + +func containerClusterSurgeSettingsCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + if v, ok := d.GetOk("cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.strategy"); ok { + if v != "SURGE" { + if _, maxSurgeIsPresent := d.GetOk("cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.max_surge"); maxSurgeIsPresent { + return fmt.Errorf("Surge upgrade settings max_surge/max_unavailable can only be used when strategy is set to SURGE") + } + } + if v != "SURGE" { + if _, maxSurgeIsPresent := d.GetOk("cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.max_unavailable"); maxSurgeIsPresent { + return fmt.Errorf("Surge upgrade settings max_surge/max_unavailable can only be used when strategy is set to SURGE") + } + } + } + + return nil +} + +func containerClusterEnableK8sBetaApisCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return containerClusterEnableK8sBetaApisCustomizeDiffFunc(d) +} + +func containerClusterEnableK8sBetaApisCustomizeDiffFunc(d tpgresource.TerraformResourceDiff) error { + // The Kubernetes Beta APIs cannot be disabled once they have been enabled by users. + // The reason why we don't allow disabling is that the controller does not have the + // ability to clean up the Kubernetes objects created by the APIs. If the user + // removes the already enabled Kubernetes Beta API from the list, we need to force + // a new cluster. + if !d.HasChange("enable_k8s_beta_apis.0.enabled_apis") { + return nil + } + old, new := d.GetChange("enable_k8s_beta_apis.0.enabled_apis") + if old != "" && new != "" { + oldAPIsSet := old.(*schema.Set) + newAPIsSet := new.(*schema.Set) + for _, oldAPI := range oldAPIsSet.List() { + if !newAPIsSet.Contains(oldAPI) { + return d.ForceNew("enable_k8s_beta_apis.0.enabled_apis") + } + } + } + + return nil +} + +func containerClusterNodeVersionCustomizeDiff(_ context.Context,diff *schema.ResourceDiff, meta interface{}) error { + // separate func to allow unit testing + return containerClusterNodeVersionCustomizeDiffFunc(diff) +} + +func containerClusterNodeVersionCustomizeDiffFunc(diff tpgresource.TerraformResourceDiff) error { + oldValueName, _ := diff.GetChange("name") + if oldValueName != "" { + return nil + } + + _, newValueNode := diff.GetChange("node_version") + _, newValueMaster := diff.GetChange("min_master_version") + + if newValueNode == "" || newValueMaster == "" { + return nil + } + + //ignore -gke.X suffix for now. If it becomes a problem later, we can fix it + masterVersion := strings.Split(newValueMaster.(string), "-")[0] + nodeVersion := strings.Split(newValueNode.(string), "-")[0] + + if masterVersion != nodeVersion { + return fmt.Errorf("Resource argument node_version (value: %s) must either be unset or set to the same value as min_master_version (value: %s) on create." , newValueNode, newValueMaster) + } + + return nil +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_internal_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_internal_test.go.tmpl new file mode 100644 index 000000000000..52eb636947b2 --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_internal_test.go.tmpl @@ -0,0 +1,297 @@ +package container + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +{{- if ne $.TargetVersionName "ga" }} + container "google.golang.org/api/container/v1beta1" +{{- end }} +) + +{{ if ne $.TargetVersionName `ga` -}} +func TestValidateNodePoolAutoConfig(t *testing.T) { + withTags := &container.NodePoolAutoConfig{ + NetworkTags: &container.NetworkTags{ + Tags: []string{"not-empty"}, + }, + } + noTags := &container.NodePoolAutoConfig{} + + cases := map[string]struct { + Input *container.Cluster + ExpectError bool + }{ + "with tags, nap nil, autopilot nil": { + Input: &container.Cluster{NodePoolAutoConfig: withTags}, + ExpectError: true, + }, + "with tags, autopilot disabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: false}, + NodePoolAutoConfig: withTags, + }, + ExpectError: true, + }, + "with tags, nap disabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: false}, + NodePoolAutoConfig: withTags, + }, + ExpectError: true, + }, + "with tags, autopilot enabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: true}, + NodePoolAutoConfig: withTags, + }, + ExpectError: false, + }, + "with tags, nap enabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: true}, + NodePoolAutoConfig: withTags, + }, + ExpectError: false, + }, + "no tags, autopilot enabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: true}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + "no tags, nap enabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: true}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + "no tags, autopilot disabled": { + Input: &container.Cluster{ + Autopilot: &container.Autopilot{Enabled: false}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + "no tags, nap disabled": { + Input: &container.Cluster{ + Autoscaling: &container.ClusterAutoscaling{EnableNodeAutoprovisioning: false}, + NodePoolAutoConfig: noTags, + }, + ExpectError: false, + }, + } + + for tn, tc := range cases { + if err := validateNodePoolAutoConfig(tc.Input); (err != nil) != tc.ExpectError { + t.Fatalf("bad: '%s', expected error: %t, received error: %t", tn, tc.ExpectError, (err != nil)) + } + } +} +{{- end }} + + +func TestContainerClusterEnableK8sBetaApisCustomizeDiff(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + before *schema.Set + after *schema.Set + expectedForceNew bool + }{ + "no need to force new from nil to empty apis": { + before: schema.NewSet(schema.HashString, nil), + after: schema.NewSet(schema.HashString, []interface{}{}), + expectedForceNew: false, + }, + "no need to force new from empty apis to nil": { + before: schema.NewSet(schema.HashString, []interface{}{}), + after: schema.NewSet(schema.HashString, nil), + expectedForceNew: false, + }, + "no need to force new from empty apis to empty apis": { + before: schema.NewSet(schema.HashString, []interface{}{}), + after: schema.NewSet(schema.HashString, []interface{}{}), + expectedForceNew: false, + }, + "no need to force new from nil to empty string apis": { + before: schema.NewSet(schema.HashString, nil), + after: schema.NewSet(schema.HashString, []interface{}{""}), + expectedForceNew: false, + }, + "no need to force new from empty string apis to empty string apis": { + before: schema.NewSet(schema.HashString, []interface{}{""}), + after: schema.NewSet(schema.HashString, []interface{}{""}), + expectedForceNew: false, + }, + "no need to force new for enabling new api from empty apis": { + before: schema.NewSet(schema.HashString, []interface{}{}), + after: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + expectedForceNew: false, + }, + "no need to force new for enabling new api from nil": { + before: schema.NewSet(schema.HashString, nil), + after: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + expectedForceNew: false, + }, + "no need to force new for passing same apis": { + before: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + after: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + expectedForceNew: false, + }, + "no need to force new for passing same apis with inconsistent order": { + before: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo", "dummy.k8s.io/v1beta1/bar"}), + after: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/bar", "dummy.k8s.io/v1beta1/foo"}), + expectedForceNew: false, + }, + "need to force new from empty string apis to nil": { + before: schema.NewSet(schema.HashString, []interface{}{""}), + after: schema.NewSet(schema.HashString, nil), + expectedForceNew: true, + }, + "need to force new for disabling existing api": { + before: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + after: schema.NewSet(schema.HashString, []interface{}{}), + expectedForceNew: true, + }, + "need to force new for disabling existing api with nil": { + before: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + after: schema.NewSet(schema.HashString, nil), + expectedForceNew: true, + }, + "need to force new for disabling existing apis": { + before: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo", "dummy.k8s.io/v1beta1/bar", "dummy.k8s.io/v1beta1/baz"}), + after: schema.NewSet(schema.HashString, []interface{}{"dummy.k8s.io/v1beta1/foo"}), + expectedForceNew: true, + }, + } + + for tn, tc := range cases { + d := &tpgresource.ResourceDiffMock{ + Before: map[string]interface{}{ + "enable_k8s_beta_apis.0.enabled_apis": tc.before, + }, + After: map[string]interface{}{ + "enable_k8s_beta_apis.0.enabled_apis": tc.after, + }, + } + err := containerClusterEnableK8sBetaApisCustomizeDiffFunc(d) + if err != nil { + t.Errorf("%s failed, found unexpected error: %s", tn, err) + } + if d.IsForceNew != tc.expectedForceNew { + t.Errorf("%v: expected d.IsForceNew to be %v, but was %v", tn, tc.expectedForceNew, d.IsForceNew) + } + } +} + +func TestContainerCluster_NodeVersionCustomizeDiff(t* testing.T) { + t.Parallel() + + cases := map[string]struct{ + BeforeName string + AfterName string + MasterVersion string + NodeVersion string + ExpectError bool + }{ + "Master version and node version are exactly the same" : { + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.9-gke.5", + NodeVersion : "1.10.9-gke.5", + ExpectError : false, + }, + "Master version and node version have the same Kubernetes patch version but not the same gke-N suffix " : { + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.9-gke.5", + NodeVersion : "1.10.9-gke.9", + ExpectError : false, + }, + "Master version and node version have different minor versions" :{ + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.9-gke.5", + NodeVersion : "1.11.6-gke.11", + ExpectError : true, + }, + "Master version and node version have different Kubernetes Patch Versions" :{ + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.9-gke.5", + NodeVersion : "1.10.6-gke.11", + ExpectError : true, + }, + "Master version is not set, but node version is" : { + BeforeName : "", + AfterName : "test", + MasterVersion : "", + NodeVersion : "1.10.6-gke.11", + ExpectError : false, + }, + "Node version is not set, but master version is" : { + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.6-gke.11", + NodeVersion : "", + ExpectError : false, + }, + "Node version and master version match, both do not have -gke.X suffix" :{ + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.6", + NodeVersion : "1.10.6", + ExpectError : false, + + }, + "Node version and master version do not match, both do not have -gke.X suffix" : { + BeforeName : "", + AfterName : "test", + MasterVersion : "1.10.6", + NodeVersion : "1.11.6", + ExpectError : true, + + }, + "Node version and master version do not match, node version has -gke.X suffix but master version doesn't" : { + BeforeName : "", + AfterName : "test", + MasterVersion : "1.11.6", + NodeVersion : "1.10.6-gke.11", + ExpectError : true, + }, + "Diff is executed in non-create scenario, master version and node version do not match" : { + BeforeName : "test", + AfterName : "test-1", + MasterVersion : "1.11.6-gke.11", + NodeVersion : "1.10.6-gke.11", + ExpectError : false, + }, + } + + for tn,tc := range cases { + d := &tpgresource.ResourceDiffMock{ + Before: map[string]interface{}{ + "name" : tc.BeforeName, + "min_master_version": "", + "node_version": "", + }, + After: map[string]interface{}{ + "name" : tc.AfterName, + "min_master_version": tc.MasterVersion, + "node_version": tc.NodeVersion, + }, + } + err := containerClusterNodeVersionCustomizeDiffFunc(d) + + if tc.ExpectError && err == nil { + t.Errorf("%s failed, expected error but was none", tn) + } + if !tc.ExpectError && err != nil { + t.Errorf("%s failed, found unexpected error: %s", tn, err) + } + } +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl new file mode 100644 index 000000000000..e61fc211f072 --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl @@ -0,0 +1,1845 @@ +package container + +import ( + "context" + "fmt" + "log" + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func ResourceContainerClusterUpgradeV1(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + log.Printf("[DEBUG] Applying container cluster migration to schema version V2.") + + rawState["deletion_protection"] = true + return rawState, nil +} + + +func resourceContainerClusterResourceV1() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the cluster, unique within the project and location.`, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + if len(value) > 40 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 40 characters", k)) + } + if !regexp.MustCompile("^[a-z0-9-]+$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q can only contain lowercase letters, numbers and hyphens", k)) + } + if !regexp.MustCompile("^[a-z]").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must start with a letter", k)) + } + if !regexp.MustCompile("[a-z0-9]$").MatchString(value) { + errors = append(errors, fmt.Errorf( + "%q must end with a number or a letter", k)) + } + return + }, + }, + + "operation": { + Type: schema.TypeString, + Computed: true, + }, + + "location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The location (region or zone) in which the cluster master will be created, as well as the default node location. If you specify a zone (such as us-central1-a), the cluster will be a zonal cluster with a single cluster master. If you specify a region (such as us-west1), the cluster will be a regional cluster with multiple masters spread across zones in the region, and with default node locations in those zones as well.`, + }, + + "node_locations": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of zones in which the cluster's nodes are located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If this is specified for a zonal cluster, omit the cluster's zone.`, + }, + + "addons_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The configuration for addons supported by GKE.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_load_balancing": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the HTTP (L7) load balancing controller addon, which makes it easy to set up HTTP load balancers for services in a cluster. It is enabled by default; set disabled = true to disable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "horizontal_pod_autoscaling": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Horizontal Pod Autoscaling addon, which increases or decreases the number of replica pods a replication controller has based on the resource usage of the existing pods. It ensures that a Heapster pod is running in the cluster, which is also used by the Cloud Monitoring service. It is enabled by default; set disabled = true to disable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "network_policy_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `Whether we should enable the network policy addon for the master. This must be enabled in order to enable network policy for the nodes. To enable this, you must also define a network_policy block, otherwise nothing will happen. It can only be disabled if the nodes already do not have network policies enabled. Defaults to disabled; set disabled = false to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gcp_filestore_csi_driver_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Filestore CSI driver addon, which allows the usage of filestore instance as volumes. Defaults to disabled; set enabled = true to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "cloudrun_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the CloudRun addon. It is disabled by default. Set disabled = false to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + "load_balancer_type": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"LOAD_BALANCER_TYPE_INTERNAL"}, false), + Optional: true, + }, + }, + }, + }, + "dns_cache_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the NodeLocal DNSCache addon. It is disabled by default. Set enabled = true to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gce_persistent_disk_csi_driver_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `Whether this cluster should enable the Google Compute Engine Persistent Disk Container Storage Interface (CSI) Driver. Set enabled = true to enable. The Compute Engine persistent disk CSI Driver is enabled by default on newly created clusters for the following versions: Linux clusters: GKE version 1.18.10-gke.2100 or later, or 1.19.3-gke.2100 or later.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gke_backup_agent_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Backup for GKE Agent addon. It is disabled by default. Set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "gcs_fuse_csi_driver_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the GCS Fuse CSI driver addon, which allows the usage of gcs bucket as volumes. Defaults to disabled; set enabled = true to enable.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "istio_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The status of the Istio addon.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + Description: `The status of the Istio addon, which makes it easy to set up Istio for services in a cluster. It is disabled by default. Set disabled = false to enable.`, + }, + "auth": { + Type: schema.TypeString, + Optional: true, + // We can't use a Terraform-level default because it won't be true when the block is disabled: true + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("AUTH_NONE"), + ValidateFunc: validation.StringInSlice([]string{"AUTH_NONE", "AUTH_MUTUAL_TLS"}, false), + Description: `The authentication type between services in Istio. Available options include AUTH_MUTUAL_TLS.`, + }, + }, + }, + }, + "kalm_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `Configuration for the KALM addon, which manages the lifecycle of k8s. It is disabled by default; Set enabled = true to enable.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + {{- end }} + "config_connector_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: addonsConfigKeys, + MaxItems: 1, + Description: `The of the Config Connector addon.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + }, + }, + }, + + "cluster_autoscaling": { + Type: schema.TypeList, + MaxItems: 1, + // This field is Optional + Computed because we automatically set the + // enabled value to false if the block is not returned in API responses. + Optional: true, + Computed: true, + Description: `Per-cluster configuration of Node Auto-Provisioning with Cluster Autoscaler to automatically adjust the size of the cluster and create/delete node pools based on the current needs of the cluster's workload. See the guide to using Node Auto-Provisioning for more details.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + ConflictsWith: []string{"enable_autopilot"}, + Description: `Whether node auto-provisioning is enabled. Resource limits for cpu and memory must be defined to enable node auto-provisioning.`, + }, + "resource_limits": { + Type: schema.TypeList, + Optional: true, + ConflictsWith: []string{"enable_autopilot"}, + DiffSuppressFunc: suppressDiffForAutopilot, + Description: `Global constraints for machine resources in the cluster. Configuring the cpu and memory types is required if node auto-provisioning is enabled. These limits will apply to node pool autoscaling in addition to node auto-provisioning.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "resource_type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the resource. For example, cpu and memory. See the guide to using Node Auto-Provisioning for a list of types.`, + }, + "minimum": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum amount of the resource in the cluster.`, + }, + "maximum": { + Type: schema.TypeInt, + Optional: true, + Description: `Maximum amount of the resource in the cluster.`, + }, + }, + }, + }, + "auto_provisioning_defaults": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Contains defaults for a node pool created by NAP.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oauth_scopes": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + DiffSuppressFunc: containerClusterAddedScopesSuppress, + Description: `Scopes that are used by NAP when creating node pools.`, + }, + "service_account": { + Type: schema.TypeString, + Optional: true, + Default: "default", + Description: `The Google Cloud Platform Service Account to be used by the node VMs.`, + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + Default: 100, + Description: `Size of the disk attached to each node, specified in GB. The smallest allowed disk size is 10GB.`, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.IntAtLeast(10), + }, + "disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "pd-standard", + Description: `Type of the disk attached to each node.`, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.StringInSlice([]string{"pd-standard", "pd-ssd", "pd-balanced"}, false), + }, + "image_type": { + Type: schema.TypeString, + Optional: true, + Default: "COS_CONTAINERD", + Description: `The default image type used by NAP once a new node pool is being created.`, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.StringInSlice([]string{"COS_CONTAINERD", "COS", "UBUNTU_CONTAINERD", "UBUNTU"}, false), + }, + "min_cpu_platform": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("automatic"), + Description: `Minimum CPU platform to be used by this instance. The instance may be scheduled on the specified or newer CPU platform. Applicable values are the friendly names of CPU platforms, such as Intel Haswell.`, + }, + "boot_disk_kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The Customer Managed Encryption Key used to encrypt the boot disk attached to each node in the node pool.`, + }, + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + Description: `Shielded Instance options.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Defines whether the instance has Secure Boot enabled.`, + AtLeastOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_secure_boot", + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_integrity_monitoring", + }, + }, + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Defines whether the instance has integrity monitoring enabled.`, + AtLeastOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_secure_boot", + "cluster_autoscaling.0.auto_provisioning_defaults.0.shielded_instance_config.0.enable_integrity_monitoring", + }, + }, + }, + }, + }, + "management": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `NodeManagement configuration for this NodePool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_upgrade": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Specifies whether node auto-upgrade is enabled for the node pool. If enabled, node auto-upgrade helps keep the nodes in your node pool up to date with the latest release version of Kubernetes.`, + }, + "auto_repair": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Specifies whether the node auto-repair is enabled for the node pool. If enabled, the nodes in this node pool will be monitored and, if they fail health checks too many times, an automatic repair action will be triggered.`, + }, + "upgrade_options": { + Type: schema.TypeList, + Computed: true, + Description: `Specifies the Auto Upgrade knobs for the node pool.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_upgrade_start_time": { + Type: schema.TypeString, + Computed: true, + Description: `This field is set when upgrades are about to commence with the approximate start time for the upgrades, in RFC3339 text format.`, + }, + "description": { + Type: schema.TypeString, + Computed: true, + Description: `This field is set when upgrades are about to commence with the description of the upgrade.`, + }, + }, + }, + }, + }, + }, + }, + "upgrade_settings": { + Type: schema.TypeList, + Optional: true, + Description: `Specifies the upgrade settings for NAP created node pools`, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_surge": { + Type: schema.TypeInt, + Optional: true, + Description: `The maximum number of nodes that can be created beyond the current size of the node pool during the upgrade process.`, + }, + "max_unavailable": { + Type: schema.TypeInt, + Optional: true, + Description: `The maximum number of nodes that can be simultaneously unavailable during the upgrade process.`, + }, + "strategy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Update strategy of the node pool.`, + ValidateFunc: validation.StringInSlice([]string{"NODE_POOL_UPDATE_STRATEGY_UNSPECIFIED", "BLUE_GREEN", "SURGE"}, false), + }, + "blue_green_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Settings for blue-green upgrade strategy.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_pool_soak_duration": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Time needed after draining entire blue pool. After this period, blue pool will be cleaned up. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "standard_rollout_policy": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Standard policy for the blue-green upgrade.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_percentage": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + ValidateFunc: validation.FloatBetween(0.0, 1.0), + ExactlyOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_percentage", + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_node_count", + }, + Description: `Percentage of the bool pool nodes to drain in a batch. The range of this field should be (0.0, 1.0].`, + }, + "batch_node_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ExactlyOneOf: []string{ + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_percentage", + "cluster_autoscaling.0.auto_provisioning_defaults.0.upgrade_settings.0.blue_green_settings.0.standard_rollout_policy.0.batch_node_count", + }, + Description: `Number of blue nodes to drain in a batch.`, + }, + "batch_soak_duration": { + Type: schema.TypeString, + Optional: true, + Default: "0s", + Description: `Soak time after each batch gets drained. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + {{- if ne $.TargetVersionName "ga" }} + "autoscaling_profile": { + Type: schema.TypeString, + Default: "BALANCED", + Optional: true, + DiffSuppressFunc: suppressDiffForAutopilot, + ValidateFunc: validation.StringInSlice([]string{"BALANCED", "OPTIMIZE_UTILIZATION"}, false), + Description: `Configuration options for the Autoscaling profile feature, which lets you choose whether the cluster autoscaler should optimize for resource utilization or resource availability when deciding to remove nodes from a cluster. Can be BALANCED or OPTIMIZE_UTILIZATION. Defaults to BALANCED.`, + }, + {{- end }} + }, + }, + }, + + "cluster_ipv4_cidr": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.OrEmpty(verify.ValidateRFC1918Network(8, 32)), + ConflictsWith: []string{"ip_allocation_policy"}, + Description: `The IP address range of the Kubernetes pods in this cluster in CIDR notation (e.g. 10.96.0.0/14). Leave blank to have one automatically chosen or specify a /14 block in 10.0.0.0/8. This field will only work for routes-based clusters, where ip_allocation_policy is not defined.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: ` Description of the cluster.`, + }, + + "binary_authorization": { + Type: schema.TypeList, + Optional: true, + DiffSuppressFunc: BinaryAuthorizationDiffSuppress, + MaxItems: 1, + Description: "Configuration options for the Binary Authorization feature.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Deprecated: "Deprecated in favor of evaluation_mode.", + Description: "Enable Binary Authorization for this cluster.", + ConflictsWith: []string{"enable_autopilot", "binary_authorization.0.evaluation_mode"}, + }, + "evaluation_mode": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "PROJECT_SINGLETON_POLICY_ENFORCE"}, false), + Description: "Mode of operation for Binary Authorization policy evaluation.", + ConflictsWith: []string{"binary_authorization.0.enabled"}, + }, + }, + }, + }, + + "enable_kubernetes_alpha": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Default: false, + Description: `Whether to enable Kubernetes Alpha features for this cluster. Note that when this option is enabled, the cluster cannot be upgraded and will be automatically deleted after 30 days.`, + }, + + "enable_k8s_beta_apis": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Configuration for Kubernetes Beta APIs.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled_apis": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Enabled Kubernetes Beta APIs.`, + }, + }, + }, + }, + + "enable_tpu": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to enable Cloud TPU resources in this cluster.`, +{{- if ne $.TargetVersionName "ga" }} + ConflictsWith: []string{"tpu_config"}, + Computed: true, + // TODO: deprecate when tpu_config is correctly returned by the API + // Deprecated: "Deprecated in favor of tpu_config", +{{- end }} + }, + +{{ if ne $.TargetVersionName `ga` -}} + "tpu_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `TPU configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether Cloud TPU integration is enabled or not`, + }, + "ipv4_cidr_block": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 CIDR block reserved for Cloud TPU in the VPC.`, + }, + "use_service_networking": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to use service networking for Cloud TPU or not`, + }, + }, + }, + }, +{{- end }} + + "enable_legacy_abac": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether the ABAC authorizer is enabled for this cluster. When enabled, identities in the system, including service accounts, nodes, and controllers, will have statically granted permissions beyond those provided by the RBAC configuration or IAM. Defaults to false.`, + }, + + "enable_shielded_nodes": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Enable Shielded Nodes features on all nodes in this cluster. Defaults to true.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "enable_autopilot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enable Autopilot for this cluster.`, + // ConflictsWith: many fields, see https://cloud.google.com/kubernetes-engine/docs/concepts/autopilot-overview#comparison. The conflict is only set one-way, on other fields w/ this field. + }, + + "allow_net_admin": { + Type: schema.TypeBool, + Optional: true, + Description: `Enable NET_ADMIN for this cluster.`, + }, + + "authenticator_groups_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for the Google Groups for GKE feature.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "security_group": { + Type: schema.TypeString, + Required: true, + Description: `The name of the RBAC security group for use with Google security groups in Kubernetes RBAC. Group name must be in format gke-security-groups@yourdomain.com.`, + }, + }, + }, + }, + + "initial_node_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: `The number of nodes to create in this cluster's default node pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Must be set if node_pool is not set. If you're using google_container_node_pool objects with no default node pool, you'll need to set this to a value of at least 1, alongside setting remove_default_node_pool to true.`, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Logging configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Required: true, + Description: `GKE components exposing logs. Valid values include SYSTEM_COMPONENTS, APISERVER, CONTROLLER_MANAGER, SCHEDULER, and WORKLOADS.`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER", "WORKLOADS"}, false), + }, + }, + }, + }, + }, + + "logging_service": { + Type: schema.TypeString, + Optional: true, + Computed: true, +{{- if ne $.TargetVersionName "ga" }} + ConflictsWith: []string{"cluster_telemetry"}, +{{- end }} + ValidateFunc: validation.StringInSlice([]string{"logging.googleapis.com", "logging.googleapis.com/kubernetes", "none"}, false), + Description: `The logging service that the cluster should write logs to. Available options include logging.googleapis.com(Legacy Stackdriver), logging.googleapis.com/kubernetes(Stackdriver Kubernetes Engine Logging), and none. Defaults to logging.googleapis.com/kubernetes.`, + }, + + "maintenance_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `The maintenance policy to use for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "daily_maintenance_window": { + Type: schema.TypeList, + Optional: true, + ExactlyOneOf: []string{ + "maintenance_policy.0.daily_maintenance_window", + "maintenance_policy.0.recurring_window", + }, + MaxItems: 1, + Description: `Time window specified for daily maintenance operations. Specify start_time in RFC3339 format "HH:MM”, where HH : [00-23] and MM : [00-59] GMT.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Time, + DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, + }, + "duration": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + "recurring_window": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ExactlyOneOf: []string{ + "maintenance_policy.0.daily_maintenance_window", + "maintenance_policy.0.recurring_window", + }, + Description: `Time window for recurring maintenance operations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "end_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "recurrence": { + Type: schema.TypeString, + Required: true, + DiffSuppressFunc: rfc5545RecurrenceDiffSuppress, + }, + }, + }, + }, + "maintenance_exclusion": { + Type: schema.TypeSet, + Optional: true, + MaxItems: 20, + Description: `Exceptions to maintenance window. Non-emergency maintenance should not occur in these windows.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "exclusion_name": { + Type: schema.TypeString, + Required: true, + }, + "start_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "end_time": { + Type: schema.TypeString, + Required: true, + ValidateFunc: verify.ValidateRFC3339Date, + }, + "exclusion_options": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Maintenance exclusion related options.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scope": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"NO_UPGRADES", "NO_MINOR_UPGRADES", "NO_MINOR_OR_NODE_UPGRADES"}, false), + Description: `The scope of automatic upgrades to restrict in the exclusion window.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "protect_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Enable/Disable Protect API features for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "workload_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `WorkloadConfig defines which actions are enabled for a cluster's workload configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "audit_mode": { + Type: schema.TypeString, + Required: true, + Description: `Sets which mode of auditing should be used for the cluster's workloads. Accepted values are DISABLED, BASIC.`, + }, + }, + }, + AtLeastOneOf: []string{ + "protect_config.0.workload_config", + "protect_config.0.workload_vulnerability_mode", + }, + }, + "workload_vulnerability_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Sets which mode to use for Protect workload vulnerability scanning feature. Accepted values are DISABLED, BASIC.`, + AtLeastOneOf: []string{ + "protect_config.0.workload_config", + "protect_config.0.workload_vulnerability_mode", + }, + }, + }, + }, + }, +{{- end }} + + "security_posture_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Description: `Defines the config needed to enable/disable features for the Security Posture API`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "BASIC", "ENTERPRISE", "MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's off-cluster features. Available options include DISABLED, BASIC, and ENTERPRISE.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("MODE_UNSPECIFIED"), + }, + "vulnerability_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"VULNERABILITY_DISABLED", "VULNERABILITY_BASIC", "VULNERABILITY_ENTERPRISE", "VULNERABILITY_MODE_UNSPECIFIED"}, false), + Description: `Sets the mode of the Kubernetes security posture API's workload vulnerability scanning. Available options include VULNERABILITY_DISABLED, VULNERABILITY_BASIC and VULNERABILITY_ENTERPRISE.`, + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("VULNERABILITY_MODE_UNSPECIFIED"), + }, + }, + }, + }, + "monitoring_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Monitoring configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_components": { + Type: schema.TypeList, + Optional: true, + Computed: true, +{{- if eq $.TargetVersionName "ga" }} + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT and STATEFULSET.`, +{{- else }} + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET and WORKLOADS.`, +{{- end }} + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "managed_prometheus": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for Google Cloud Managed Services for Prometheus.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not the managed collection is enabled.`, + }, + }, + }, + }, + "advanced_datapath_observability_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 2, + Description: `Configuration of Advanced Datapath Observability features.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_metrics": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not the advanced datapath metrics are enabled.`, + }, + "enable_relay": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not Relay is enabled.`, + }, + "relay_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Mode used to make Relay available.`, + ValidateFunc: validation.StringInSlice([]string{"DISABLED", "INTERNAL_VPC_LB", "EXTERNAL_LB"}, false), + }, + }, + }, + }, + }, + }, + }, + + "notification_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The notification config for sending cluster upgrade notifications`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `Notification config for Cloud Pub/Sub`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether or not the notification config is enabled`, + }, + "topic": { + Type: schema.TypeString, + Optional: true, + Description: `The pubsub topic to push upgrade notifications to. Must be in the same project as the cluster. Must be in the format: projects/{project}/topics/{topic}.`, + }, + "filter": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Allows filtering to one or more specific event types. If event types are present, those and only those event types will be transmitted to the cluster. Other types will be skipped. If no filter is specified, or no event types are present, all event types will be sent`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "event_type": { + Type: schema.TypeList, + Required: true, + Description: `Can be used to filter what notifications are sent. Valid values include include UPGRADE_AVAILABLE_EVENT, UPGRADE_EVENT and SECURITY_BULLETIN_EVENT`, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"UPGRADE_AVAILABLE_EVENT", "UPGRADE_EVENT", "SECURITY_BULLETIN_EVENT"}, false), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + "confidential_nodes": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for the confidential nodes feature, which makes nodes run on confidential VMs. Warning: This configuration can't be changed (or added/removed) after cluster creation without deleting and recreating the entire cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether Confidential Nodes feature is enabled for all nodes in this cluster.`, + }, + }, + }, + }, + + "master_auth": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Description: `The authentication information for accessing the Kubernetes master. Some values in this block are only returned by the API if your service account has permission to get credentials for your GKE cluster. If you see an unexpected diff unsetting your client cert, ensure you have the container.clusters.getCredentials permission.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_certificate_config": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + ForceNew: true, + Description: `Whether client certificate authorization is enabled for this cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "issue_client_certificate": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether client certificate authorization is enabled for this cluster.`, + }, + }, + }, + }, + + "client_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `Base64 encoded public certificate used by clients to authenticate to the cluster endpoint.`, + }, + + "client_key": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + Description: `Base64 encoded private key used by clients to authenticate to the cluster endpoint.`, + }, + + "cluster_ca_certificate": { + Type: schema.TypeString, + Computed: true, + Description: `Base64 encoded public certificate that is the root of trust for the cluster.`, + }, + }, + }, + }, + + "master_authorized_networks_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: masterAuthorizedNetworksConfig, + Description: `The desired configuration options for master authorized networks. Omit the nested cidr_blocks attribute to disallow external access (except the cluster node IPs, which GKE automatically whitelists).`, + }, + + "min_master_version": { + Type: schema.TypeString, + Optional: true, + Description: `The minimum version of the master. GKE will auto-update the master to new versions, so this does not guarantee the current master version--use the read-only master_version field to obtain that. If unset, the cluster's version will be set by GKE to the version of the most recent official release (which is not necessarily the latest version).`, + }, + + "monitoring_service": { + Type: schema.TypeString, + Optional: true, + Computed: true, +{{- if ne $.TargetVersionName "ga" }} + ConflictsWith: []string{"cluster_telemetry"}, +{{- end }} + ValidateFunc: validation.StringInSlice([]string{"monitoring.googleapis.com", "monitoring.googleapis.com/kubernetes", "none"}, false), + Description: `The monitoring service that the cluster should write metrics to. Automatically send metrics from pods in the cluster to the Google Cloud Monitoring API. VM metrics will be collected by Google Compute Engine regardless of this setting Available options include monitoring.googleapis.com(Legacy Stackdriver), monitoring.googleapis.com/kubernetes(Stackdriver Kubernetes Engine Monitoring), and none. Defaults to monitoring.googleapis.com/kubernetes.`, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Default: "default", + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the Google Compute Engine network to which the cluster is connected. For Shared VPC, set this to the self link of the shared network.`, + }, + + "network_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Configuration options for the NetworkPolicy feature.`, + ConflictsWith: []string{"enable_autopilot"}, + DiffSuppressFunc: containerClusterNetworkPolicyDiffSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether network policy is enabled on the cluster.`, + }, + "provider": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "CALICO"}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("PROVIDER_UNSPECIFIED"), + Description: `The selected network policy provider.`, + }, + }, + }, + }, + + "node_config": clusterSchemaNodeConfig(), + + "node_pool": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, // TODO: Add ability to add/remove nodePools + Elem: &schema.Resource{ + Schema: schemaNodePool, + }, + Description: `List of node pools associated with this cluster. See google_container_node_pool for schema. Warning: node pools defined inside a cluster can't be changed (or added/removed) after cluster creation without deleting and recreating the entire cluster. Unless you absolutely need the ability to say "these are the only node pools associated with this cluster", use the google_container_node_pool resource instead of this property.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "node_pool_defaults": clusterSchemaNodePoolDefaults(), + + "node_pool_auto_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Node pool configs that apply to all auto-provisioned node pools in autopilot clusters and node auto-provisioning enabled clusters.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network_tags": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Collection of Compute Engine network tags that can be applied to a node's underlying VM instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tags": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `List of network tags applied to auto-provisioned node pools.`, + }, + }, + }, + }, + }, + }, + }, + + "node_version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Kubernetes version on the nodes. Must either be unset or set to the same value as min_master_version on create. Defaults to the default version set by GKE which is not necessarily the latest version. This only affects nodes in the default node pool. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way. To update nodes in other node pools, use the version attribute on the node pool.`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "pod_security_policy_config": { + Type: schema.TypeList, + Optional: true, + Description: `Configuration for the PodSecurityPolicy feature.`, + MaxItems: 1, + DiffSuppressFunc: podSecurityPolicyCfgSuppress, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enable the PodSecurityPolicy controller for this cluster. If enabled, pods must be valid under a PodSecurityPolicy to be created.`, + }, + }, + }, + }, +{{- end }} + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The name or self_link of the Google Compute Engine subnetwork in which the cluster's instances are launched.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `Server-defined URL for the resource.`, + }, + + "endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address of this cluster's Kubernetes master.`, + }, + + "master_version": { + Type: schema.TypeString, + Computed: true, + Description: `The current version of the master in the cluster. This may be different than the min_master_version set in the config if the master has been updated by GKE.`, + }, + + "services_ipv4_cidr": { + Type: schema.TypeString, + Computed: true, + Description: `The IP address range of the Kubernetes services in this cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are typically put in the last /16 from the container CIDR.`, + }, + + "ip_allocation_policy": { + Type: schema.TypeList, + MaxItems: 1, + ForceNew: true, + Computed: true, + Optional: true, + ConflictsWith: []string{"cluster_ipv4_cidr"}, + Description: `Configuration of cluster IP allocation for VPC-native clusters. Adding this block enables IP aliasing, making the cluster VPC-native instead of routes-based.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // GKE creates/deletes secondary ranges in VPC + "cluster_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationRangeFields, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, + Description: `The IP address range for the cluster pod IPs. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, + }, + + "services_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationRangeFields, + DiffSuppressFunc: tpgresource.CidrOrSizeDiffSuppress, + Description: `The IP address range of the services IPs in this cluster. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) from the RFC-1918 private networks (e.g. 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16) to pick a specific range to use.`, + }, + + // User manages secondary ranges manually + "cluster_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationCidrBlockFields, + Description: `The name of the existing secondary range in the cluster's subnetwork to use for pod IP addresses. Alternatively, cluster_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, + }, + + "services_secondary_range_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ConflictsWith: ipAllocationCidrBlockFields, + Description: `The name of the existing secondary range in the cluster's subnetwork to use for service ClusterIPs. Alternatively, services_ipv4_cidr_block can be used to automatically create a GKE-managed one.`, + }, + + "stack_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: "IPV4", + ValidateFunc: validation.StringInSlice([]string{"IPV4", "IPV4_IPV6"}, false), + Description: `The IP Stack type of the cluster. Choose between IPV4 and IPV4_IPV6. Default type is IPV4 Only if not set`, + }, + "pod_cidr_overprovision_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for cluster level pod cidr overprovision. Default is disabled=false.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "additional_pod_ranges_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: `AdditionalPodRangesConfig is the configuration for additional pod secondary ranges supporting the ClusterUpdate message.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_range_names": { + Type: schema.TypeSet, + MinItems: 1, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Name for pod secondary ipv4 range which has the actual range defined ahead.`, + }, + }, + }, + }, + }, + }, + }, + + "networking_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"VPC_NATIVE", "ROUTES"}, false), + Description: `Determines whether alias IPs or routes will be used for pod IPs in the cluster.`, + }, + + "remove_default_node_pool": { + Type: schema.TypeBool, + Optional: true, + Description: `If true, deletes the default node pool upon cluster creation. If you're using google_container_node_pool resources with no default node pool, this should be set to true, alongside setting initial_node_count to at least 1.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "private_cluster_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `Configuration for private clusters, clusters with private nodes.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // enable_private_endpoint is orthogonal to private_endpoint_subnetwork. + // User can create a private_cluster_config block without including + // either one of those two fields. Both fields are optional. + // At the same time, we use 'AtLeastOneOf' to prevent an empty block + // like 'private_cluster_config{}' + "enable_private_endpoint": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `When true, the cluster's private endpoint is used as the cluster endpoint and access through the public endpoint is disabled. When false, either endpoint can be used.`, + }, + "enable_private_nodes": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: containerClusterPrivateClusterConfigSuppress, + Description: `Enables the private cluster feature, creating a private endpoint on the cluster. In a private cluster, nodes only have RFC 1918 private addresses and communicate with the master's private endpoint via private networking.`, + }, + "master_ipv4_cidr_block": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + ValidateFunc: verify.OrEmpty(validation.IsCIDRNetwork(28, 28)), + Description: `The IP range in CIDR notation to use for the hosted master network. This range will be used for assigning private IP addresses to the cluster master(s) and the ILB VIP. This range must not overlap with any other ranges in use within the cluster's network, and it must be a /28 subnet. See Private Cluster Limitations for more details. This field only applies to private clusters, when enable_private_nodes is true.`, + }, + "peering_name": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the peering between this cluster and the Google owned VPC.`, + }, + "private_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The internal IP address of this cluster's master endpoint.`, + }, + "private_endpoint_subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: privateClusterConfigKeys, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `Subnetwork in cluster's network where master's endpoint will be provisioned.`, + }, + "public_endpoint": { + Type: schema.TypeString, + Computed: true, + Description: `The external IP address of this cluster's master endpoint.`, + }, + "master_global_access_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + AtLeastOneOf: privateClusterConfigKeys, + Description: "Controls cluster master global access settings.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the cluster master is accessible globally or not.`, + }, + }, + }, + }, + }, + }, + }, + + "resource_labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The GCE resource labels (a map of key/value pairs) to be applied to the cluster.`, + }, + + "label_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `The fingerprint of the set of labels for this cluster.`, + }, + + "default_max_pods_per_node": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The default maximum number of pods per node in this cluster. This doesn't work on "routes-based" clusters, clusters that don't have IP Aliasing enabled.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + + "vertical_pod_autoscaling": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Vertical Pod Autoscaling automatically adjusts the resources of pods controlled by it.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Enables vertical pod autoscaling.`, + }, + }, + }, + }, + "workload_identity_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + // Computed is unsafe to remove- this API may return `"workloadIdentityConfig": {},` or omit the key entirely + // and both will be valid. Note that we don't handle the case where the API returns nothing & the user has defined + // workload_identity_config today. + Computed: true, + Description: `Configuration for the use of Kubernetes Service Accounts in GCP IAM policies.`, + ConflictsWith: []string{"enable_autopilot"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "workload_pool": { + Type: schema.TypeString, + Optional: true, + Description: "The workload pool to attach all Kubernetes service accounts to.", + }, + }, + }, + }, + + "identity_service_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Configuration for Identity Service which allows customers to use external identity providers with the K8S API.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to enable the Identity Service component.", + }, + }, + }, + }, + + "service_external_ips_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `If set, and enabled=true, services with external ips field will not be blocked`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled, services with external ips specified will be allowed.`, + }, + }, + }, + }, + + "mesh_certificates": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `If set, and enable_certificates=true, the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_certificates": { + Type: schema.TypeBool, + Required: true, + Description: `When enabled the GKE Workload Identity Certificates controller and node agent will be deployed in the cluster.`, + }, + }, + }, + }, + + "database_encryption": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Application-layer Secrets Encryption settings. The object format is {state = string, key_name = string}. Valid values of state are: "ENCRYPTED"; "DECRYPTED". key_name is the name of a CloudKMS key.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"ENCRYPTED", "DECRYPTED"}, false), + Description: `ENCRYPTED or DECRYPTED.`, + }, + "key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The key to use to encrypt/decrypt secrets.`, + }, + }, + }, + }, + + "release_channel": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration options for the Release channel feature, which provide more control over automatic upgrades of your GKE clusters. Note that removing this field from your config will not unenroll it. Instead, use the "UNSPECIFIED" channel.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "channel": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"UNSPECIFIED", "RAPID", "REGULAR", "STABLE"}, false), + Description: `The selected release channel. Accepted values are: +* UNSPECIFIED: Not set. +* RAPID: Weekly upgrade cadence; Early testers and developers who requires new features. +* REGULAR: Multiple per month upgrade cadence; Production users who need features not yet offered in the Stable channel. +* STABLE: Every few months upgrade cadence; Production users who need stability above all else, and for whom frequent upgrades are too risky.`, + }, + }, + }, + }, + + "tpu_ipv4_cidr_block": { + Computed: true, + Type: schema.TypeString, + Description: `The IP address range of the Cloud TPUs in this cluster, in CIDR notation (e.g. 1.2.3.4/29).`, + }, + +{{ if ne $.TargetVersionName `ga` -}} + "cluster_telemetry": { + Type: schema.TypeList, + Optional: true, + Computed: true, + Description: `Telemetry integration for the cluster.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"DISABLED","ENABLED","SYSTEM_ONLY"}, false), + Description: `Type of the integration.`, + }, + }, + }, + }, +{{- end }} + + "default_snat_status": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Whether the cluster disables default in-node sNAT rules. In-node sNAT rules will be disabled when defaultSnatStatus is disabled.`, + Elem: &schema.Resource { + Schema: map[string]*schema.Schema { + "disabled": { + Type: schema.TypeBool, + Required: true, + Description: `When disabled is set to false, default IP masquerade rules will be applied to the nodes to prevent sNAT on cluster internal traffic.`, + }, + }, + }, + }, + + "datapath_provider": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The desired datapath provider for this cluster. By default, uses the IPTables-based kube-proxy implementation.`, + ValidateFunc: validation.StringInSlice([]string{"DATAPATH_PROVIDER_UNSPECIFIED", "LEGACY_DATAPATH", "ADVANCED_DATAPATH"}, false), + DiffSuppressFunc: tpgresource.EmptyOrDefaultStringSuppress("DATAPATH_PROVIDER_UNSPECIFIED"), + }, + + "enable_intranode_visibility": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether Intra-node visibility is enabled for this cluster. This makes same node pod to pod traffic visible for VPC network.`, + ConflictsWith: []string{"enable_autopilot"}, + }, + "enable_l4_ilb_subsetting": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether L4ILB Subsetting is enabled for this cluster.`, + Default: false, + }, +{{- if ne $.TargetVersionName "ga" }} + "enable_multi_networking": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether multi-networking is enabled for this cluster.`, + Default: false, + }, + "enable_fqdn_network_policy": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether FQDN Network Policy is enabled on this cluster.`, + Default: false, + }, +{{- end }} + "private_ipv6_google_access": { + Type: schema.TypeString, + Optional: true, + Description: `The desired state of IPv6 connectivity to Google Services. By default, no private IPv6 access to or from Google Services (all access will be via IPv4).`, + Computed: true, + }, + + "cost_management_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `Cost management configuration for the cluster.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `Whether to enable GKE cost allocation. When you enable GKE cost allocation, the cluster name and namespace of your GKE workloads appear in the labels field of the billing export to BigQuery. Defaults to false.`, + }, + }, + }, + }, + + "resource_usage_export_config": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: `Configuration for the ResourceUsageExportConfig feature.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_network_egress_metering": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether to enable network egress metering for this cluster. If enabled, a daemonset will be created in the cluster to meter network egress traffic.`, + }, + "enable_resource_consumption_metering": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Whether to enable resource consumption metering on this cluster. When enabled, a table will be created in the resource export BigQuery dataset to store resource consumption data. The resulting table can be joined with the resource usage table or with BigQuery billing export. Defaults to true.`, + }, + "bigquery_destination": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: `Parameters for using BigQuery as the destination of resource usage export.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataset_id": { + Type: schema.TypeString, + Required: true, + Description: `The ID of a BigQuery Dataset.`, + }, + }, + }, + }, + }, + }, + }, + "dns_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: suppressDiffForAutopilot, + Description: `Configuration for Cloud DNS for Kubernetes Engine.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_dns": { + Type: schema.TypeString, + Default: "PROVIDER_UNSPECIFIED", + ValidateFunc: validation.StringInSlice([]string{"PROVIDER_UNSPECIFIED", "PLATFORM_DEFAULT", "CLOUD_DNS"}, false), + Description: `Which in-cluster DNS provider should be used.`, + Optional: true, + }, + "cluster_dns_scope": { + Type: schema.TypeString, + Default: "DNS_SCOPE_UNSPECIFIED", + ValidateFunc: validation.StringInSlice([]string{"DNS_SCOPE_UNSPECIFIED", "CLUSTER_SCOPE", "VPC_SCOPE"}, false), + Description: `The scope of access to cluster DNS records.`, + Optional: true, + }, + "cluster_dns_domain": { + Type: schema.TypeString, + Description: `The suffix used for all cluster service records.`, + Optional: true, + }, + }, + }, + }, + "gateway_api_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Configuration for GKE Gateway API controller.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "channel": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"CHANNEL_DISABLED", "CHANNEL_EXPERIMENTAL", "CHANNEL_STANDARD"}, false), + Description: `The Gateway API release channel to use for Gateway API.`, + }, + }, + }, + }, + }, + } +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl new file mode 100644 index 000000000000..8f4d6808b5a9 --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl @@ -0,0 +1,11037 @@ +package container_test + +import ( + "bytes" + "fmt" + "testing" + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccContainerCluster_basic(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "services_ipv4_cidr"), + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "self_link"), + resource.TestCheckResourceAttr("google_container_cluster.primary", "networking_mode", "VPC_NATIVE"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("%s/us-central1-a/%s", envvar.GetTestProjectFromEnv(), clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_resourceManagerTags(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "self_link"), + resource.TestCheckResourceAttrSet("google_container_cluster.primary", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_networkingModeRoutes(t *testing.T) { + t.Parallel() + + firstClusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + secondClusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_networkingModeRoutes(firstClusterName, secondClusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "networking_mode", "ROUTES"), + resource.TestCheckResourceAttr("google_container_cluster.secondary", "networking_mode", "ROUTES"), ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_container_cluster.secondary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_misc(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_misc(clusterName, networkName, subnetworkName), + // Explicitly check removing the default node pool since we won't + // catch it by just importing. + Check: resource.TestCheckResourceAttr( + "google_container_cluster.primary", "node_pool.#", "0"), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_misc_update(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAddons(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAddons(pid, clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + // TODO: clean up this list in `4.0.0`, remove both `workload_identity_config` fields (same for below) + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateAddons(pid, clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + // Issue with cloudrun_config addon: https://github.com/hashicorp/terraform-provider-google/issues/11943 + // { + // Config: testAccContainerCluster_withInternalLoadBalancer(pid, clusterName, networkName, subnetworkName), + // }, + // { + // ResourceName: "google_container_cluster.primary", + // ImportState: true, + // ImportStateVerify: true, + // ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + // }, + }, + }) +} + +func TestAccContainerCluster_withDeletionProtection(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withDeletionProtection(clusterName, networkName, subnetworkName, "false"), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withDeletionProtection(clusterName, networkName, subnetworkName, "true"), + }, + { + Config: testAccContainerCluster_withDeletionProtection(clusterName, networkName, subnetworkName, "true"), + Destroy: true, + ExpectError: regexp.MustCompile("Cannot destroy cluster because deletion_protection is set to true. Set it to false to proceed with cluster deletion."), + }, + { + Config: testAccContainerCluster_withDeletionProtection(clusterName, networkName, subnetworkName, "false"), + }, + }, + }) +} + +func TestAccContainerCluster_withNotificationConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + newTopic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNotificationConfig(clusterName, topic, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNotificationConfig(clusterName, newTopic, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_disableNotificationConfig(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNotificationConfig(clusterName, newTopic, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withFilteredNotificationConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + topic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + newTopic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withFilteredNotificationConfig(clusterName, topic, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.filtered_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withFilteredNotificationConfigUpdate(clusterName, newTopic, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.filtered_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_disableFilteredNotificationConfig(clusterName, newTopic, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.filtered_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withConfidentialNodes(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withConfidentialNodes(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.confidential_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_disableConfidentialNodes(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.confidential_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withConfidentialNodes(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.confidential_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withILBSubsetting(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_disableILBSubSetting(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.confidential_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withILBSubSetting(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.confidential_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_disableILBSubSetting(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.confidential_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withMultiNetworking(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_enableMultiNetworking(clusterName), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withFQDNNetworkPolicy(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withFQDNNetworkPolicy(clusterName, false), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withFQDNNetworkPolicy(clusterName, true), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withAdditiveVPC(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAdditiveVPC(clusterName), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_withMasterAuthConfig_NoCert(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMasterAuthNoCert(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_master_auth_no_cert", "master_auth.0.client_certificate", ""), + ), + }, + { + ResourceName: "google_container_cluster.with_master_auth_no_cert", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAuthenticatorGroupsConfig(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + orgDomain := envvar.GetTestOrgDomainFromEnv(t) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.primary", + "authenticator_groups_config.0.enabled"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAuthenticatorGroupsConfigUpdate(clusterName, orgDomain, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", + "authenticator_groups_config.0.security_group", fmt.Sprintf("gke-security-groups@%s", orgDomain)), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAuthenticatorGroupsConfigUpdate2(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.primary", + "authenticator_groups_config.0.enabled"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_enableMultiNetworking(clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } + + secondary_ip_range { + range_name = "another-pod" + ip_cidr_range = "10.1.32.0/22" + } + + lifecycle { + ignore_changes = [ + # The auto nodepool creates a secondary range which diffs this resource. + secondary_ip_range, + ] + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + release_channel { + channel = "RAPID" + } + enable_multi_networking = true + datapath_provider = "ADVANCED_DATAPATH" + deletion_protection = false +} +`, clusterName, clusterName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withAdditiveVPC(clusterName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + dns_config { + cluster_dns = "CLOUD_DNS" + additive_vpc_scope_dns_domain = "test.com" + cluster_dns_scope = "CLUSTER_SCOPE" + } + deletion_protection = false +} +`, clusterName) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withFQDNNetworkPolicy(clusterName string, enabled bool) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } + + secondary_ip_range { + range_name = "another-pod" + ip_cidr_range = "10.1.32.0/22" + } + + lifecycle { + ignore_changes = [ + # The auto nodepool creates a secondary range which diffs this resource. + secondary_ip_range, + ] + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + enable_fqdn_network_policy = %t + datapath_provider = "ADVANCED_DATAPATH" + deletion_protection = false +} +`, clusterName, clusterName, enabled) +} +{{- end }} + +func TestAccContainerCluster_withNetworkPolicyEnabled(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNetworkPolicyEnabled(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_network_policy_enabled", + "network_policy.#", "1"), + ), + }, + { + ResourceName: "google_container_cluster.with_network_policy_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_removeNetworkPolicy(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_network_policy_enabled", + "network_policy.0.enabled", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_network_policy_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNetworkPolicyDisabled(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_network_policy_enabled", + "network_policy.0.enabled", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_network_policy_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNetworkPolicyConfigDisabled(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_network_policy_enabled", + "addons_config.0.network_policy_config.0.disabled", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_network_policy_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNetworkPolicyConfigDisabled(clusterName, networkName, subnetworkName), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + }, + }) +} + + +func TestAccContainerCluster_withReleaseChannelEnabled(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "STABLE", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "UNSPECIFIED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withReleaseChannelEnabledDefaultVersion(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withReleaseChannelEnabledDefaultVersion(clusterName, "REGULAR", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "REGULAR", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "UNSPECIFIED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_release_channel", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withInvalidReleaseChannel(t *testing.T) { + // This is essentially a unit test, no interactions + acctest.SkipIfVcr(t) + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withReleaseChannelEnabled(clusterName, "CANARY", networkName, subnetworkName), + ExpectError: regexp.MustCompile(`expected release_channel\.0\.channel to be one of \["?UNSPECIFIED"? "?RAPID"? "?REGULAR"? "?STABLE"?\], got CANARY`), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withTelemetryEnabled(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withTelemetryEnabled(clusterName, "ENABLED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_cluster_telemetry", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withTelemetryEnabled(clusterName, "DISABLED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_cluster_telemetry", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withTelemetryEnabled(clusterName, "SYSTEM_ONLY", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_cluster_telemetry", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_withMasterAuthorizedNetworksConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{}, ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_master_authorized_networks", + "master_authorized_networks_config.#", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_master_authorized_networks", + "master_authorized_networks_config.0.cidr_blocks.#", "0"), + ), + }, + { + Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{"8.8.8.8/32"}, ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_master_authorized_networks", + "master_authorized_networks_config.0.cidr_blocks.#", "1"), + ), + }, + { + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{"10.0.0.0/8", "8.8.8.8/32"}, ""), + }, + { + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName, []string{}, ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_master_authorized_networks", + "master_authorized_networks_config.0.cidr_blocks.#", "0"), + ), + }, + { + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_removeMasterAuthorizedNetworksConfig(clusterName), + }, + { + ResourceName: "google_container_cluster.with_master_authorized_networks", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withGcpPublicCidrsAccessEnabledToggle(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withoutGcpPublicCidrsAccessEnabled(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_gcp_public_cidrs_access_enabled", + "master_authorized_networks_config.#", "0"), + ), + }, + { + ResourceName: "google_container_cluster.with_gcp_public_cidrs_access_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withGcpPublicCidrsAccessEnabled(clusterName, "false", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_gcp_public_cidrs_access_enabled", + "master_authorized_networks_config.0.gcp_public_cidrs_access_enabled", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_gcp_public_cidrs_access_enabled", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withGcpPublicCidrsAccessEnabled(clusterName, "true", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_gcp_public_cidrs_access_enabled", + "master_authorized_networks_config.0.gcp_public_cidrs_access_enabled", "true"), + ), + }, + }, + }) +} + +func testAccContainerCluster_withGcpPublicCidrsAccessEnabled(clusterName string, flag, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_gcp_public_cidrs_access_enabled" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + + master_authorized_networks_config { + gcp_public_cidrs_access_enabled = %s + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, flag, networkName, subnetworkName) +} + +func testAccContainerCluster_withoutGcpPublicCidrsAccessEnabled(clusterName, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_gcp_public_cidrs_access_enabled" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func TestAccContainerCluster_regional(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-regional-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_regional(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.regional", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_regionalWithNodePool(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-regional-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_regionalWithNodePool(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.regional", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_regionalWithNodeLocations(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_regionalNodeLocations(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_locations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_regionalUpdateNodeLocations(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_locations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withTpu(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withTpu(containerNetName, clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_tpu", "enable_tpu", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_tpu", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_withPrivateClusterConfigBasic(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateClusterConfig(containerNetName, clusterName, false), + }, + { + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withPrivateClusterConfig(containerNetName, clusterName, true), + }, + { + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withPrivateClusterConfigMissingCidrBlock(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateClusterConfigMissingCidrBlock(containerNetName, clusterName, "us-central1-a", false), + ExpectError: regexp.MustCompile("master_ipv4_cidr_block must be set if enable_private_nodes is true"), + }, + }, + }) +} + +func TestAccContainerCluster_withPrivateClusterConfigMissingCidrBlock_withAutopilot(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateClusterConfigMissingCidrBlock(containerNetName, clusterName, "us-central1", true), + }, + { + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withPrivateClusterConfigGlobalAccessEnabledOnly(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateClusterConfigGlobalAccessEnabledOnly(clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withPrivateClusterConfigGlobalAccessEnabledOnly(clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withIntraNodeVisibility(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withIntraNodeVisibility(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_intranode_visibility", "enable_intranode_visibility", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_intranode_visibility", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateIntraNodeVisibility(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_intranode_visibility", "enable_intranode_visibility", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_intranode_visibility", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withVersion(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withVersion(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_updateVersion(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withLowerVersion(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateVersion(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_version", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodeConfig(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"node_config.0.taint", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodeConfigUpdate(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"node_config.0.taint", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withLoggingVariantInNodeConfig(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, "MAX_THROUGHPUT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_logging_variant_in_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withLoggingVariantInNodePool(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, "MAX_THROUGHPUT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_logging_variant_in_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withLoggingVariantUpdates(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_logging_variant_node_pool_default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "MAX_THROUGHPUT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_logging_variant_node_pool_default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, "DEFAULT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_logging_variant_node_pool_default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAdvancedMachineFeaturesInNodePool(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAdvancedMachineFeaturesInNodePool(clusterName, nodePoolName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_advanced_machine_features_in_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withNodePoolDefaults(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.gcfs_config.0.enabled"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportStateId: fmt.Sprintf("us-central1-a/%s", clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodePoolDefaults(clusterName, "true", networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool_defaults", + "node_pool_defaults.0.node_config_defaults.0.gcfs_config.#", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool_defaults", + "node_pool_defaults.0.node_config_defaults.0.gcfs_config.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool_defaults", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodePoolDefaults(clusterName, "false", networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool_defaults", + "node_pool_defaults.0.node_config_defaults.0.gcfs_config.#", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool_defaults", + "node_pool_defaults.0.node_config_defaults.0.gcfs_config.0.enabled", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool_defaults", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, +}, + }) +} +{{- end }} + +func TestAccContainerCluster_withNodeConfigScopeAlias(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodeConfigScopeAlias(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_config_scope_alias", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfigShieldedInstanceConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodeConfigShieldedInstanceConfig(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfigReservationAffinity(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodeConfigReservationAffinity(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.#", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.0.consume_reservation_type", "ANY_RESERVATION"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodeConfigReservationAffinitySpecific(t *testing.T) { + t.Parallel() + + reservationName := fmt.Sprintf("tf-test-reservation-%s", acctest.RandString(t, 10)) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodeConfigReservationAffinitySpecific(reservationName, clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.#", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.0.consume_reservation_type", "SPECIFIC_RESERVATION"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.0.key", "compute.googleapis.com/reservation-name"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.0.values.#", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_config", + "node_config.0.reservation_affinity.0.values.0", reservationName), + ), + }, + { + ResourceName: "google_container_cluster.with_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withWorkloadMetadataConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withWorkloadMetadataConfig(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_workload_metadata_config", + "node_config.0.workload_metadata_config.0.mode", "GCE_METADATA"), + ), + }, + { + ResourceName: "google_container_cluster.with_workload_metadata_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func TestAccContainerCluster_withSandboxConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withSandboxConfig(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_sandbox_config", + "node_config.0.sandbox_config.0.sandbox_type", "gvisor"), + resource.TestCheckResourceAttr("google_container_cluster.with_sandbox_config", + "node_pool.0.node_config.0.sandbox_config.0.sandbox_type", "gvisor"), + ), + }, + { + ResourceName: "google_container_cluster.with_sandbox_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "node_config.0.taint", "deletion_protection"}, + }, + { + // GKE sets automatic labels and taints on nodes. This makes + // sure we ignore the automatic ones and keep our own. + Config: testAccContainerCluster_withSandboxConfig(clusterName, networkName, subnetworkName), + // When we use PlanOnly without ExpectNonEmptyPlan, we're + // guaranteeing that the computed fields of the resources don't + // force an unintentional change to the plan. That is, we + // expect this part of the test to pass only if the plan + // doesn't change. + PlanOnly: true, + }, + { + // Now we'll modify the labels, which should force a change to + // the plan. We make sure we don't over-suppress and end up + // eliminating the labels or taints we asked for. This will + // destroy and recreate the cluster as labels are immutable. + Config: testAccContainerCluster_withSandboxConfig_changeLabels(clusterName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_sandbox_config", + "node_config.0.labels.test.terraform.io/gke-sandbox", "true"), + resource.TestCheckResourceAttr("google_container_cluster.with_sandbox_config", + "node_config.0.labels.test.terraform.io/gke-sandbox-amended", "also-true"), + resource.TestCheckResourceAttr("google_container_cluster.with_sandbox_config", + "node_config.0.taint.0.key", "test.terraform.io/gke-sandbox"), + ), + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_withBootDiskKmsKey(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBootDiskKmsKey(clusterName, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_boot_disk_kms_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_network(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_networkRef(clusterName, network), + }, + { + ResourceName: "google_container_cluster.with_net_ref_by_url", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + ResourceName: "google_container_cluster.with_net_ref_by_name", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_backend(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_backendRef(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolBasic(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolUpdateVersion(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolLowerVersion(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodePoolUpdateVersion(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolResize(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolNodeLocations(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.node_count", "2"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodePoolResize(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.node_count", "3"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolAutoscaling(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolAutoscaling(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "3"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodePoolUpdateAutoscaling(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "1"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "5"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodePoolBasic(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"), + resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolCIA(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerRegionalCluster_withNodePoolCIA(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "0"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "0"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_min_node_count", "3"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_max_node_count", "21"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.location_policy", "BALANCED"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerRegionalClusterUpdate_withNodePoolCIA(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count", "0"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count", "0"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_min_node_count", "4"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_max_node_count", "32"), + resource.TestCheckResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.location_policy", "ANY"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerRegionalCluster_withNodePoolBasic(clusterName, npName, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.min_node_count"), + resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.max_node_count"), + resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_min_node_count"), + resource.TestCheckNoResourceAttr("google_container_cluster.with_node_pool", "node_pool.0.autoscaling.0.total_max_node_count"), + ), + }, + { + ResourceName: "google_container_cluster.with_node_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolNamePrefix(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + npNamePrefix := "tf-test-np-" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolNamePrefix(clusterName, npNamePrefix, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_pool_name_prefix", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"node_pool.0.name_prefix", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolMultiple(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + npNamePrefix := "tf-test-np-" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolMultiple(clusterName, npNamePrefix, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_pool_multiple", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolConflictingNameFields(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npPrefix := "tf-test-np" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolConflictingNameFields(clusterName, npPrefix), + ExpectError: regexp.MustCompile("Cannot specify both name and name_prefix for a node_pool"), + }, + }, + }) +} + +func TestAccContainerCluster_withNodePoolNodeConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withNodePoolNodeConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_node_pool_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMaintenanceWindow(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_maintenance_window" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMaintenanceWindow(clusterName, "03:00", networkName, subnetworkName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMaintenanceWindow(clusterName, "", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.daily_maintenance_window.0.start_time"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + // maintenance_policy.# = 0 is equivalent to no maintenance policy at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"maintenance_policy.#", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withRecurringMaintenanceWindow(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_recurring_maintenance_window" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withRecurringMaintenanceWindow(cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.daily_maintenance_window.0.start_time"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + + }, + { + Config: testAccContainerCluster_withRecurringMaintenanceWindow(cluster, "", "", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.daily_maintenance_window.0.start_time"), + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.recurring_window.0.start_time"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + // maintenance_policy.# = 0 is equivalent to no maintenance policy at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"maintenance_policy.#", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMaintenanceExclusionWindow(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_maintenance_exclusion_window" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withExclusion_RecurringMaintenanceWindow(cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", networkName, subnetworkName), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withExclusion_DailyMaintenanceWindow(cluster, "2020-01-01T00:00:00Z", "2020-01-02T00:00:00Z", networkName, subnetworkName), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMaintenanceExclusionOptions(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_maintenance_exclusion_options" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withExclusionOptions_RecurringMaintenanceWindow( + cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", "NO_MINOR_UPGRADES", "NO_MINOR_OR_NODE_UPGRADES", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.0.exclusion_options.0.scope", "NO_MINOR_UPGRADES"), + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.1.exclusion_options.0.scope", "NO_MINOR_OR_NODE_UPGRADES"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_deleteMaintenanceExclusionOptions(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_maintenance_exclusion_options" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withExclusionOptions_RecurringMaintenanceWindow( + cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", "NO_UPGRADES", "NO_MINOR_OR_NODE_UPGRADES", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.0.exclusion_options.0.scope", "NO_UPGRADES"), + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.1.exclusion_options.0.scope", "NO_MINOR_OR_NODE_UPGRADES"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_NoExclusionOptions_RecurringMaintenanceWindow( + cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.0.exclusion_options.0.scope"), + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.1.exclusion_options.0.scope"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_updateMaintenanceExclusionOptions(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_maintenance_exclusion_options" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + // step1: create a new cluster and initialize the maintenceExclusion without exclusion scopes, + // step2: add exclusion scopes to the maintenancePolicy, + // step3: update the maintenceExclusion with new scopes + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_NoExclusionOptions_RecurringMaintenanceWindow( + cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.0.exclusion_options.0.scope"), + resource.TestCheckNoResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.1.exclusion_options.0.scope"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withExclusionOptions_RecurringMaintenanceWindow( + cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", "NO_MINOR_UPGRADES", "NO_MINOR_OR_NODE_UPGRADES", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.0.exclusion_options.0.scope", "NO_MINOR_UPGRADES"), + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.1.exclusion_options.0.scope", "NO_MINOR_OR_NODE_UPGRADES"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateExclusionOptions_RecurringMaintenanceWindow( + cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", "NO_UPGRADES", "NO_MINOR_UPGRADES", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.0.exclusion_options.0.scope", "NO_UPGRADES"), + resource.TestCheckResourceAttr(resourceName, + "maintenance_policy.0.maintenance_exclusion.1.exclusion_options.0.scope", "NO_MINOR_UPGRADES"), + ), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + + +func TestAccContainerCluster_deleteExclusionWindow(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_maintenance_exclusion_window" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withExclusion_DailyMaintenanceWindow(cluster, "2020-01-01T00:00:00Z", "2020-01-02T00:00:00Z", networkName, subnetworkName), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withExclusion_RecurringMaintenanceWindow(cluster, "2019-01-01T00:00:00Z", "2019-01-02T00:00:00Z", "2019-05-01T00:00:00Z", "2019-05-02T00:00:00Z", networkName, subnetworkName), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withExclusion_NoMaintenanceWindow(cluster, "2020-01-01T00:00:00Z", "2020-01-02T00:00:00Z", networkName, subnetworkName), + }, + { + ResourceName: resourceName, + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(containerNetName, clusterName), + }, + { + ResourceName: "google_container_cluster.with_ip_allocation_policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withIPAllocationPolicy_specificIPRanges(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withIPAllocationPolicy_specificIPRanges(containerNetName, clusterName), + }, + { + ResourceName: "google_container_cluster.with_ip_allocation_policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withIPAllocationPolicy_specificSizes(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withIPAllocationPolicy_specificSizes(containerNetName, clusterName), + }, + { + ResourceName: "google_container_cluster.with_ip_allocation_policy", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_stackType_withDualStack(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_stack_type" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_stackType_withDualStack(containerNetName, clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "ip_allocation_policy.0.stack_type", "IPV4_IPV6"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_stackType_withSingleStack(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_stack_type" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_stackType_withSingleStack(containerNetName, clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "ip_allocation_policy.0.stack_type", "IPV4"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_with_PodCIDROverprovisionDisabled(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + resourceName := "google_container_cluster.with_pco_disabled" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_with_PodCIDROverprovisionDisabled(containerNetName, clusterName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(resourceName, "ip_allocation_policy.0.pod_cidr_overprovision_config.0.disabled", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioning(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioning(clusterName, networkName, subnetworkName, true, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning", + "cluster_autoscaling.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioning(clusterName, networkName, subnetworkName, false, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning", + "cluster_autoscaling.0.enabled", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioningDefaults(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + includeMinCpuPlatform := true + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaults(clusterName, networkName, subnetworkName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning", + "cluster_autoscaling.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaults(clusterName, networkName, subnetworkName, true), + PlanOnly: true, + ExpectNonEmptyPlan: false, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsMinCpuPlatform(clusterName, networkName, subnetworkName, includeMinCpuPlatform), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsMinCpuPlatform(clusterName, networkName, subnetworkName, !includeMinCpuPlatform), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_autoprovisioningDefaultsUpgradeSettings(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsUpgradeSettings(clusterName, networkName, subnetworkName, 2, 1, "SURGE"), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsUpgradeSettings(clusterName, networkName, subnetworkName, 2, 1, "BLUE_GREEN"), + ExpectError: regexp.MustCompile(`Surge upgrade settings max_surge/max_unavailable can only be used when strategy is set to SURGE`), + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsUpgradeSettingsWithBlueGreenStrategy(clusterName, networkName, subnetworkName, "3.500s", "BLUE_GREEN"), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioningNetworkTags(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioning(clusterName, networkName, subnetworkName, true, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autoprovisioning", + "node_pool_auto_config.0.network_tags.0.tags.0", "test-network-tag"), + ), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withShieldedNodes(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withShieldedNodes(clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_shielded_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withShieldedNodes(clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_shielded_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAutopilot(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, false, ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autopilot", "networking_mode", "VPC_NATIVE"), + ), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerClusterCustomServiceAccount_withAutopilot(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + serviceAccountName := fmt.Sprintf("tf-test-sa-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, false, serviceAccountName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autopilot", + "cluster_autoscaling.0.enabled", "true"), + resource.TestCheckResourceAttr("google_container_cluster.with_autopilot", + "cluster_autoscaling.0.auto_provisioning_defaults.0.service_account", + fmt.Sprintf("%s@%s.iam.gserviceaccount.com", serviceAccountName, pid)), + resource.TestCheckResourceAttr("google_container_cluster.with_autopilot", + "cluster_autoscaling.0.auto_provisioning_defaults.0.oauth_scopes.0", "https://www.googleapis.com/auth/cloud-platform"), + ), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_errorAutopilotLocation(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1-a", true, false, ""), + ExpectError: regexp.MustCompile(`Autopilot clusters must be regional clusters.`), + }, + }, + }) +} + +func TestAccContainerCluster_withAutopilotNetworkTags(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilot(pid, containerNetName, clusterName, "us-central1", true, true, ""), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAutopilotResourceManagerTags(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + clusterNetName := fmt.Sprintf("tf-test-container-net-%s", randomSuffix) + clusterSubnetName := fmt.Sprintf("tf-test-container-subnet-%s", randomSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutopilotResourceManagerTags(pid, clusterName, clusterNetName, clusterSubnetName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.with_autopilot", "self_link"), + resource.TestCheckResourceAttrSet("google_container_cluster.with_autopilot", "node_pool_auto_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotResourceManagerTagsUpdate1(pid, clusterName, clusterNetName, clusterSubnetName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_cluster.with_autopilot", "node_pool_auto_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutopilotResourceManagerTagsUpdate2(pid, clusterName, clusterNetName, clusterSubnetName, randomSuffix), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withWorkloadIdentityConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withWorkloadIdentityConfigEnabled(pid, clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_workload_identity_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateWorkloadIdentityConfig(pid, clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_workload_identity_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateWorkloadIdentityConfig(pid, clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_workload_identity_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withWorkloadIdentityConfigAutopilot(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + pid := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withWorkloadIdentityConfigEnabledAutopilot(pid, clusterName), + }, + { + ResourceName: "google_container_cluster.with_workload_identity_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withIdentityServiceConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withIdentityServiceConfigEnabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withIdentityServiceConfigUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withSecretManagerConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withSecretManagerConfigEnabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withSecretManagerConfigUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_withLoggingConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withLoggingConfigEnabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withLoggingConfigDisabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withLoggingConfigUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabled(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabled(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMonitoringConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigEnabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigDisabled(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigPrometheusUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + // Back to basic settings to test setting Prometheus on its own + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigPrometheusOnly(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withMonitoringConfigPrometheusOnly2(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withSoleTenantGroup(t *testing.T) { + t.Parallel() + + resourceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withSoleTenantGroup(resourceName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAutoscalingProfile(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutoscalingProfile(clusterName, "BALANCED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.autoscaling_with_profile", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withAutoscalingProfile(clusterName, "OPTIMIZE_UTILIZATION", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.autoscaling_with_profile", + ImportStateIdPrefix: "us-central1-a/", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withInvalidAutoscalingProfile(t *testing.T) { + // This is essentially a unit test, no interactions + acctest.SkipIfVcr(t) + t.Parallel() + clusterName := fmt.Sprintf("cluster-test-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withAutoscalingProfile(clusterName, "AS_CHEAP_AS_POSSIBLE", networkName, subnetworkName), + ExpectError: regexp.MustCompile(`expected cluster_autoscaling\.0\.autoscaling_profile to be one of \["?BALANCED"? "?OPTIMIZE_UTILIZATION"?\], got AS_CHEAP_AS_POSSIBLE`), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccContainerCluster_sharedVpc(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + org := envvar.GetTestOrgFromEnv(t) + billingId := envvar.GetTestBillingAccountFromEnv(t) + projectName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + suffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_sharedVpc(org, billingId, projectName, clusterName, suffix), + }, + { + ResourceName: "google_container_cluster.shared_vpc_cluster", + ImportStateId: fmt.Sprintf("%s-service/us-central1-a/%s", projectName, clusterName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withBinaryAuthorizationEnabledBool(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBinaryAuthorizationEnabledBool(clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_enabled_bool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withBinaryAuthorizationEnabledBool(clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_enabled_bool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withBinaryAuthorizationEvaluationModeAutopilot(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, true, "PROJECT_SINGLETON_POLICY_ENFORCE", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, true, "DISABLED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withBinaryAuthorizationEvaluationModeClassic(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, false, "PROJECT_SINGLETON_POLICY_ENFORCE", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName, false, "DISABLED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_binary_authorization_evaluation_mode", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withFlexiblePodCIDR(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withFlexiblePodCIDR(containerNetName, clusterName), + }, + { + ResourceName: "google_container_cluster.with_flexible_cidr", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_nodeAutoprovisioningDefaultsDiskSizeGb(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + includeDiskSizeGb := true + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsDiskSizeGb(clusterName, networkName, subnetworkName, includeDiskSizeGb), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsDiskSizeGb(clusterName, networkName, subnetworkName, !includeDiskSizeGb), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioningDefaultsDiskType(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + includeDiskType := true + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsDiskType(clusterName, networkName, subnetworkName, includeDiskType), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsDiskType(clusterName, networkName, subnetworkName, !includeDiskType), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioningDefaultsImageType(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + includeImageType := true + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsImageType(clusterName, networkName, subnetworkName, includeImageType), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsImageType(clusterName, networkName, subnetworkName, !includeImageType), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioningDefaultsBootDiskKmsKey(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsBootDiskKmsKey(clusterName, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.nap_boot_disk_kms_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "min_master_version", + "deletion_protection", + "node_pool", // cluster_autoscaling (node auto-provisioning) creates new node pools automatically + }, + }, + }, + }) +} + +func TestAccContainerCluster_nodeAutoprovisioningDefaultsShieldedInstance(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsShieldedInstance(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.nap_shielded_instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_autoprovisioningDefaultsManagement(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autoprovisioningDefaultsManagement(clusterName, networkName, subnetworkName, false, false), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_management", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autoprovisioningDefaultsManagement(clusterName, networkName, subnetworkName, true, true), + }, + { + ResourceName: "google_container_cluster.with_autoprovisioning_management", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +// This resource originally cleaned up the dangling cluster directly, but now +// taints it, having Terraform clean it up during the next apply. This test +// name is now inexact, but is being preserved to maintain the test history. +func TestAccContainerCluster_errorCleanDanglingCluster(t *testing.T) { + acctest.SkipIfVcr(t) // skipped because the timeout step doesn't record operation GET interactions + t.Parallel() + + suffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", suffix) + clusterNameError := fmt.Sprintf("tf-test-cluster-err-%s", suffix) + clusterNameErrorWithTimeout := fmt.Sprintf("tf-test-cluster-timeout-%s", suffix) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + initConfig := testAccContainerCluster_withInitialCIDR(containerNetName, clusterName) + overlapConfig := testAccContainerCluster_withCIDROverlap(initConfig, clusterNameError) + overlapConfigWithTimeout := testAccContainerCluster_withCIDROverlapWithTimeout(initConfig, clusterNameErrorWithTimeout, "1s") + + checkTaintApplied := func(st *terraform.State) error { + // Return an error if there is no tainted (i.e. marked for deletion) cluster. + ms := st.RootModule() + errCluster, ok := ms.Resources["google_container_cluster.cidr_error_overlap"] + if !ok { + var resourceNames []string + for rn := range ms.Resources { + resourceNames = append(resourceNames, rn) + } + return fmt.Errorf("could not find google_container_cluster.cidr_error_overlap in resources: %v", resourceNames) + } + if !errCluster.Primary.Tainted { + return fmt.Errorf("cluster with ID %s should be tainted, but is not", errCluster.Primary.ID) + } + return nil + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: initConfig, + }, + { + ResourceName: "google_container_cluster.cidr_error_preempt", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + // First attempt to create the overlapping cluster with no timeout, this should fail and taint the resource. + Config: overlapConfig, + ExpectError: regexp.MustCompile("Error waiting for creating GKE cluster"), + }, + { + // Check that the tainted resource is in the config. + Config: overlapConfig, + PlanOnly: true, + ExpectNonEmptyPlan: true, + Check: checkTaintApplied, + }, + { + // Next attempt to create the overlapping cluster with a 1s timeout. This will fail with a different error. + Config: overlapConfigWithTimeout, + ExpectError: regexp.MustCompile("timeout while waiting for state to become 'DONE'"), + }, + { + // Check that the tainted resource is in the config. + Config: overlapConfig, + PlanOnly: true, + ExpectNonEmptyPlan: true, + Check: checkTaintApplied, + }, + }, + }) +} + +func TestAccContainerCluster_errorNoClusterCreated(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withInvalidLocation("wonderland"), + ExpectError: regexp.MustCompile(`(Location "wonderland" does not exist)|(Permission denied on 'locations\/wonderland' \(or it may not exist\))`), + }, + }, + }) +} + +func TestAccContainerCluster_withExternalIpsConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withExternalIpsConfig(pid, clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_external_ips_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withExternalIpsConfig(pid, clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_external_ips_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMeshCertificatesConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMeshCertificatesConfigEnabled(pid, clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_mesh_certificates_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateMeshCertificatesConfig(pid, clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_mesh_certificates_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateMeshCertificatesConfig(pid, clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_mesh_certificates_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"remove_default_node_pool", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withCostManagementConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + pid := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_updateCostManagementConfig(pid, clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.with_cost_management_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_updateCostManagementConfig(pid, clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.with_cost_management_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withDatabaseEncryption(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + // Use the bootstrapped KMS key so we can avoid creating keys needlessly + // as they will pile up in the project because they can not be completely + // deleted. Also, we need to create the key in the same location as the + // cluster as GKE does not support the "global" location for KMS keys. + // See https://cloud.google.com/kubernetes-engine/docs/how-to/encrypting-secrets#creating_a_key + kmsData := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withDatabaseEncryption(clusterName, kmsData, networkName, subnetworkName), + Check: resource.TestCheckResourceAttrSet("data.google_kms_key_ring_iam_policy.test_key_ring_iam_policy", "policy_data"), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withAdvancedDatapath(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withDatapathProvider(clusterName, "ADVANCED_DATAPATH", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_enableCiliumPolicies(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withDatapathProvider(clusterName, "ADVANCED_DATAPATH", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "enable_cilium_clusterwide_network_policy", "false"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_enableCiliumPolicies(clusterName, networkName, subnetworkName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.primary", "enable_cilium_clusterwide_network_policy", "true"), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_enableCiliumPolicies_withAutopilot(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + clusterNetName := fmt.Sprintf("tf-test-container-net-%s", randomSuffix) + clusterSubnetName := fmt.Sprintf("tf-test-container-subnet-%s", randomSuffix) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_enableCiliumPolicies_withAutopilot(clusterName, clusterNetName, clusterSubnetName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autopilot", "enable_cilium_clusterwide_network_policy", "false"), + ), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_enableCiliumPolicies_withAutopilotUpdate(clusterName, clusterNetName, clusterSubnetName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.with_autopilot", "enable_cilium_clusterwide_network_policy", "true"), + ), + }, + { + ResourceName: "google_container_cluster.with_autopilot", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withResourceUsageExportConfig(t *testing.T) { + t.Parallel() + + suffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", suffix) + datesetId := fmt.Sprintf("tf_test_cluster_resource_usage_%s", suffix) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withResourceUsageExportConfig(clusterName, datesetId, "true", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_resource_usage_export_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withResourceUsageExportConfig(clusterName, datesetId, "false", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_resource_usage_export_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withResourceUsageExportConfigNoConfig(clusterName, datesetId, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_resource_usage_export_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withMasterAuthorizedNetworksDisabled(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withMasterAuthorizedNetworksDisabled(containerNetName, clusterName), + Check: resource.ComposeTestCheckFunc( + testAccContainerCluster_masterAuthorizedNetworksDisabled(t, "google_container_cluster.with_private_cluster"), + ), + }, + { + ResourceName: "google_container_cluster.with_private_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withEnableKubernetesAlpha(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withEnableKubernetesAlpha(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withEnableKubernetesBetaAPIs(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withEnableKubernetesBetaAPIs(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withEnableKubernetesBetaAPIsOnExistingCluster(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withoutEnableKubernetesBetaAPIs(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withEnableKubernetesBetaAPIs(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withIncompatibleMasterVersionNodeVersion(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func(){acctest.AccTestPreCheck(t)}, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withIncompatibleMasterVersionNodeVersion(clusterName), + PlanOnly: true, + ExpectError: regexp.MustCompile(`Resource argument node_version`), + }, + }, + }) +} + +func TestAccContainerCluster_withIPv4Error(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withIPv4Error(clusterName), + ExpectError: regexp.MustCompile("master_ipv4_cidr_block can only be set if"), + }, + }, + }) +} + +func TestAccContainerCluster_withDNSConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + domainName := fmt.Sprintf("tf-test-domain-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_basic(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withDNSConfig(clusterName, "CLOUD_DNS", domainName, "VPC_SCOPE", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withGatewayApiConfig(t *testing.T) { + t.Parallel() + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withGatewayApiConfig(clusterName, "CANARY", networkName, subnetworkName), + ExpectError: regexp.MustCompile(`expected gateway_api_config\.0\.channel to be one of [^,]+, got CANARY`), + }, + { + Config: testAccContainerCluster_withGatewayApiConfig(clusterName, "CHANNEL_DISABLED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withGatewayApiConfig(clusterName, "CHANNEL_STANDARD", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withTPUConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withTPUConfig(containerNetName, clusterName), + }, + { + ResourceName: "google_container_cluster.with_tpu_config", + ImportState: true, + ImportStateVerify: true, + // TODO: remove when tpu_config can be read from the API + ImportStateVerifyIgnore: []string{"tpu_config", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withProtectConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withProtectConfig(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withProtectConfigUpdated(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerCluster_withSecurityPostureConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_SetSecurityPostureToStandard(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_security_posture_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_SetSecurityPostureToEnterprise(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_security_posture_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_SetWorkloadVulnerabilityToStandard(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_security_posture_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_SetWorkloadVulnerabilityToEnterprise(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_security_posture_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_DisableALL(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_security_posture_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_withFleetConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + projectID := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withFleetConfig(clusterName, projectID, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withFleetConfig(clusterName, "random-project", networkName, subnetworkName), + ExpectError: regexp.MustCompile(`changing existing fleet host project is not supported`), + }, + { + Config: testAccContainerCluster_DisableFleet(clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerCluster_withWorkloadALTSConfig(t *testing.T) { + t.Parallel() + + networkName := "gke-cluster-alts" + subnetworkName := "gke-cluster-alts" + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + pid := envvar.GetTestProjectFromEnv() + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withWorkloadALTSConfig(pid, networkName, subnetworkName, clusterName, true), + }, + { + ResourceName: "google_container_cluster.with_workload_alts_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + Check: resource.TestCheckResourceAttr( + "google_container_cluster.with_workload_alts_config", "workload_alts_config.enable_alts", "true"), + }, + { + Config: testAccContainerCluster_withWorkloadALTSConfig(pid, networkName, subnetworkName, clusterName, false), + }, + { + ResourceName: "google_container_cluster.with_workload_alts_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + Check: resource.TestCheckResourceAttr( + "google_container_cluster.with_workload_alts_config", "workload_alts_config.enable_alts", "false"), + }, + }, + }) +} + +func TestAccContainerCluster_withWorkloadALTSConfigAutopilot(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + pid := envvar.GetTestProjectFromEnv() + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withWorkloadALTSConfigAutopilot(pid, clusterName, true), + }, + { + ResourceName: "google_container_cluster.with_workload_alts_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.with_workload_alts_config", "workload_identity_config.workload_pool", fmt.Sprintf("%s.svc.id.goog", pid)), + resource.TestCheckResourceAttr( + "google_container_cluster.with_workload_alts_config", "workload_alts_config.enable_alts", "true")), + }, + }, + }) +} +{{- end }} + +func testAccContainerCluster_withFleetConfig(name, projectID, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + fleet { + project = "%s" + } + + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, projectID, networkName, subnetworkName) +} + +func testAccContainerCluster_DisableFleet(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + +func testAccContainerCluster_withIncompatibleMasterVersionNodeVersion(name string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "gke_cluster" { + name = "%s" + location = "us-central1" + + min_master_version = "1.10.9-gke.5" + node_version = "1.10.6-gke.11" + initial_node_count = 1 + + } + `, name) +} + +func testAccContainerCluster_SetSecurityPostureToStandard(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_security_posture_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + security_posture_config { + mode = "BASIC" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + +func testAccContainerCluster_SetSecurityPostureToEnterprise(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_security_posture_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + security_posture_config { + mode = "ENTERPRISE" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + +func testAccContainerCluster_SetWorkloadVulnerabilityToStandard(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_security_posture_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + security_posture_config { + vulnerability_mode = "VULNERABILITY_BASIC" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + +func testAccContainerCluster_SetWorkloadVulnerabilityToEnterprise(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_security_posture_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + security_posture_config { + vulnerability_mode = "VULNERABILITY_ENTERPRISE" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + +func testAccContainerCluster_DisableALL(resource_name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_security_posture_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + security_posture_config { + mode = "DISABLED" + vulnerability_mode = "VULNERABILITY_DISABLED" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, resource_name, networkName, subnetworkName) +} + +func TestAccContainerCluster_autopilot_minimal(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autopilot_minimal(clusterName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_autopilot_net_admin(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_autopilot_net_admin(clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autopilot_net_admin(clusterName, networkName, subnetworkName, false), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_autopilot_net_admin(clusterName, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_additional_pod_ranges_config_on_create(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_additional_pod_ranges_config(clusterName, 1), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func TestAccContainerCluster_additional_pod_ranges_config_on_update(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_additional_pod_ranges_config(clusterName, 0), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_additional_pod_ranges_config(clusterName, 2), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_additional_pod_ranges_config(clusterName, 0), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_additional_pod_ranges_config(clusterName, 1), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_additional_pod_ranges_config(clusterName, 0), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_masterAuthorizedNetworksDisabled(t *testing.T, resource_name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resource_name] + if !ok { + return fmt.Errorf("can't find %s in state", resource_name) + } + + config := acctest.GoogleProviderConfig(t) + attributes := rs.Primary.Attributes + + cluster, err := config.NewContainerClient(config.UserAgent).Projects.Zones.Clusters.Get( + config.Project, attributes["location"], attributes["name"]).Do() + if err != nil { + return err + } + + if cluster.MasterAuthorizedNetworksConfig.Enabled { + return fmt.Errorf("Cluster's master authorized networks config is enabled, but expected to be disabled.") + } + + return nil + } +} + +func testAccCheckContainerClusterDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_cluster" { + continue + } + + attributes := rs.Primary.Attributes + _, err := config.NewContainerClient(config.UserAgent).Projects.Locations.Clusters.Get( + fmt.Sprintf("projects/%s/locations/%s/clusters/%s", config.Project, attributes["location"], attributes["name"])).Do() + if err == nil { + return fmt.Errorf("Cluster still exists") + } + } + + return nil + } +} + +func testAccContainerCluster_basic(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_networkingModeRoutes(firstName, secondName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + networking_mode = "ROUTES" + deletion_protection = false +} + +resource "google_container_cluster" "secondary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + cluster_ipv4_cidr = "10.96.0.0/14" + deletion_protection = false + } +`, firstName, secondName) +} + +func testAccContainerCluster_misc(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + remove_default_node_pool = true + + node_locations = [ + "us-central1-b", + "us-central1-c", + ] + + enable_legacy_abac = true + + resource_labels = { + created-by = "terraform" + } + + vertical_pod_autoscaling { + enabled = true + } + + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } +{{- if ne $.TargetVersionName "ga" }} + enable_intranode_visibility = true +{{- end }} + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_misc_update(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + remove_default_node_pool = true # Not worth updating + + node_locations = [ + "us-central1-f", + "us-central1-c", + ] + + enable_legacy_abac = false + + resource_labels = { + created-by = "terraform-update" + new-label = "update" + } + + vertical_pod_autoscaling { + enabled = true + } + + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } +{{- if ne $.TargetVersionName "ga" }} + enable_intranode_visibility = true +{{- end }} + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withAddons(projectID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + min_master_version = "latest" + + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + + addons_config { + http_load_balancing { + disabled = true + } + horizontal_pod_autoscaling { + disabled = true + } + network_policy_config { + disabled = true + } + gcp_filestore_csi_driver_config { + enabled = false + } + cloudrun_config { + disabled = true + } + dns_cache_config { + enabled = false + } + gce_persistent_disk_csi_driver_config { + enabled = false + } + gke_backup_agent_config { + enabled = false + } + config_connector_config { + enabled = false + } + gcs_fuse_csi_driver_config { + enabled = false + } + stateful_ha_config { + enabled = false + } +{{- if ne $.TargetVersionName "ga" }} + istio_config { + disabled = true + auth = "AUTH_MUTUAL_TLS" + } + kalm_config { + enabled = false + } +{{- end }} + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_updateAddons(projectID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + min_master_version = "latest" + + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + + addons_config { + http_load_balancing { + disabled = false + } + horizontal_pod_autoscaling { + disabled = false + } + network_policy_config { + disabled = false + } + gcp_filestore_csi_driver_config { + enabled = true + } + cloudrun_config { + # https://github.com/hashicorp/terraform-provider-google/issues/11943 + # disabled = false + disabled = true + } + dns_cache_config { + enabled = true + } + gce_persistent_disk_csi_driver_config { + enabled = true + } + gke_backup_agent_config { + enabled = true + } + config_connector_config { + enabled = true + } + gcs_fuse_csi_driver_config { + enabled = true + } + stateful_ha_config { + enabled = true + } +{{- if ne $.TargetVersionName "ga" }} + istio_config { + disabled = false + auth = "AUTH_NONE" + } + kalm_config { + enabled = true + } +{{- end }} + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, networkName, subnetworkName) +} + +// Issue with cloudrun_config addon: https://github.com/hashicorp/terraform-provider-google/issues/11943/ +// func testAccContainerCluster_withInternalLoadBalancer(projectID string, clusterName, networkName, subnetworkName string) string { +// return fmt.Sprintf(` +// data "google_project" "project" { +// project_id = "%s" +// } + +// resource "google_container_cluster" "primary" { +// name = "%s" +// location = "us-central1-a" +// initial_node_count = 1 + +// min_master_version = "latest" + +// workload_identity_config { +// workload_pool = "${data.google_project.project.project_id}.svc.id.goog" +// } + +// addons_config { +// http_load_balancing { +// disabled = false +// } +// horizontal_pod_autoscaling { +// disabled = false +// } +// network_policy_config { +// disabled = false +// } +// cloudrun_config { +// disabled = false +// load_balancer_type = "LOAD_BALANCER_TYPE_INTERNAL" +// } +// } +// deletion_protection = false +// network = "%s" +// subnetwork = "%s" +// } +// `, projectID, clusterName, networkName, subnetworkName) +// } + +func testAccContainerCluster_withNotificationConfig(clusterName, topic, networkName, subnetworkName string) string { + return fmt.Sprintf(` + +resource "google_pubsub_topic" "%s" { + name = "%s" +} + +resource "google_container_cluster" "notification_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + notification_config { + pubsub { + enabled = true + topic = google_pubsub_topic.%s.id + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, topic, topic, clusterName, topic, networkName, subnetworkName) +} + +func testAccContainerCluster_disableNotificationConfig(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "notification_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + notification_config { + pubsub { + enabled = false + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withFilteredNotificationConfig(clusterName, topic, networkName, subnetworkName string) string { + + return fmt.Sprintf(` + +resource "google_pubsub_topic" "%s" { + name = "%s" +} + +resource "google_container_cluster" "filtered_notification_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + notification_config { + pubsub { + enabled = true + topic = google_pubsub_topic.%s.id + filter { + event_type = ["UPGRADE_EVENT", "SECURITY_BULLETIN_EVENT"] + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, topic, topic, clusterName, topic, networkName, subnetworkName) +} + +func testAccContainerCluster_withFilteredNotificationConfigUpdate(clusterName, topic, networkName, subnetworkName string) string { + + return fmt.Sprintf(` + +resource "google_pubsub_topic" "%s" { + name = "%s" +} + +resource "google_container_cluster" "filtered_notification_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + notification_config { + pubsub { + enabled = true + topic = google_pubsub_topic.%s.id + filter { + event_type = ["UPGRADE_AVAILABLE_EVENT"] + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, topic, topic, clusterName, topic, networkName, subnetworkName) +} + +func testAccContainerCluster_disableFilteredNotificationConfig(clusterName, topic, networkName, subnetworkName string) string { + + return fmt.Sprintf(` + +resource "google_pubsub_topic" "%s" { + name = "%s" +} + +resource "google_container_cluster" "filtered_notification_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + notification_config { + pubsub { + enabled = true + topic = google_pubsub_topic.%s.id + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, topic, topic, clusterName, topic, networkName, subnetworkName) +} + +func testAccContainerCluster_withConfidentialNodes(clusterName, npName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "confidential_nodes" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" + } + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "n2d-standard-2" // can't be e2 because Confidential Nodes require AMD CPUs + } + } + + confidential_nodes { + enabled = true + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, networkName, subnetworkName) +} + +func testAccContainerCluster_disableConfidentialNodes(clusterName, npName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "confidential_nodes" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" + } + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "n2d-standard-2" + } + } + + confidential_nodes { + enabled = false + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, networkName, subnetworkName) +} + +func testAccContainerCluster_withILBSubSetting(clusterName, npName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "confidential_nodes" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" + } + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "e2-medium" + } + } + + enable_l4_ilb_subsetting = true + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, networkName, subnetworkName) +} + +func testAccContainerCluster_disableILBSubSetting(clusterName, npName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "confidential_nodes" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" + } + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "e2-medium" + } + } + + enable_l4_ilb_subsetting = false + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNetworkPolicyEnabled(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_network_policy_enabled" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + + network_policy { + enabled = true + provider = "CALICO" + } + + addons_config { + network_policy_config { + disabled = false + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withDeletionProtection(clusterName, networkName, subnetworkName, deletionProtection string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + deletion_protection = %s + network = "%s" + subnetwork = "%s" +} +`, clusterName, deletionProtection, networkName, subnetworkName) +} + +func testAccContainerCluster_withReleaseChannelEnabled(clusterName, channel, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_release_channel" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + release_channel { + channel = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, channel, networkName, subnetworkName) +} + +func testAccContainerCluster_withReleaseChannelEnabledDefaultVersion(clusterName, channel, networkName, subnetworkName string) string { + return fmt.Sprintf(` + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_release_channel" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.release_channel_default_version["%s"] + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, channel, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withTelemetryEnabled(clusterName, telemetryType, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_cluster_telemetry" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + cluster_telemetry { + type = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, telemetryType, networkName, subnetworkName) +} +{{- end }} + +func testAccContainerCluster_removeNetworkPolicy(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_network_policy_enabled" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNetworkPolicyDisabled(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_network_policy_enabled" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + + network_policy { + enabled = false + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNetworkPolicyConfigDisabled(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_network_policy_enabled" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + + network_policy { + enabled = false + } + + addons_config { + network_policy_config { + disabled = true + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withAuthenticatorGroupsConfigUpdate(name, orgDomain, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + authenticator_groups_config { + security_group = "gke-security-groups@%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, orgDomain, networkName, subnetworkName) +} + +func testAccContainerCluster_withAuthenticatorGroupsConfigUpdate2(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + authenticator_groups_config { + security_group = "" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + + +func testAccContainerCluster_withMasterAuthorizedNetworksConfig(clusterName string, cidrs []string, emptyValue string) string { + + cidrBlocks := emptyValue + if len(cidrs) > 0 { + var buf bytes.Buffer + for _, c := range cidrs { + buf.WriteString(fmt.Sprintf(` + cidr_blocks { + cidr_block = "%s" + display_name = "disp-%s" + }`, c, c)) + } + cidrBlocks = buf.String() + } + + return fmt.Sprintf(` +resource "google_container_cluster" "with_master_authorized_networks" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + master_authorized_networks_config { + %s + } + deletion_protection = false +} +`, clusterName, cidrBlocks) +} + +func testAccContainerCluster_removeMasterAuthorizedNetworksConfig(clusterName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_master_authorized_networks" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false +} +`, clusterName) +} + +func testAccContainerCluster_regional(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "regional" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func TestAccContainerCluster_withPrivateEndpointSubnetwork(t *testing.T) { + t.Parallel() + + r := acctest.RandString(t, 10) + + subnet1Name := fmt.Sprintf("tf-test-container-subnetwork1-%s", r) + subnet1Cidr := "10.0.36.0/24" + + subnet2Name := fmt.Sprintf("tf-test-container-subnetwork2-%s", r) + subnet2Cidr := "10.9.26.0/24" + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", r) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateEndpointSubnetwork(containerNetName, clusterName, subnet1Name, subnet1Cidr, subnet2Name, subnet2Cidr), + }, + { + ResourceName: "google_container_cluster.with_private_endpoint_subnetwork", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withPrivateEndpointSubnetwork(containerNetName, clusterName, s1Name, s1Cidr, s2Name, s2Cidr string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork1" { + name = "%s" + network = google_compute_network.container_network.name + ip_cidr_range = "%s" + region = "us-central1" + private_ip_google_access = true +} + +resource "google_compute_subnetwork" "container_subnetwork2" { + name = "%s" + network = google_compute_network.container_network.name + ip_cidr_range = "%s" + region = "us-central1" + private_ip_google_access = true +} + +resource "google_container_cluster" "with_private_endpoint_subnetwork" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork1.name + + private_cluster_config { + private_endpoint_subnetwork = google_compute_subnetwork.container_subnetwork2.name + } + deletion_protection = false +} +`, containerNetName, s1Name, s1Cidr, s2Name, s2Cidr, clusterName) +} + +func TestAccContainerCluster_withPrivateClusterConfigPrivateEndpointSubnetwork(t *testing.T) { + t.Parallel() + + r := acctest.RandString(t, 10) + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", r) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withPrivateClusterConfigPrivateEndpointSubnetwork(containerNetName, clusterName), + }, + { + ResourceName: "google_container_cluster.with_private_endpoint_subnetwork", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withPrivateClusterConfigPrivateEndpointSubnetwork(containerNetName, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_private_endpoint_subnetwork" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + networking_mode = "VPC_NATIVE" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + private_cluster_config { + enable_private_nodes = true + enable_private_endpoint = true + private_endpoint_subnetwork = google_compute_subnetwork.container_subnetwork.name + } + master_authorized_networks_config { + gcp_public_cidrs_access_enabled = false + } + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func TestAccContainerCluster_withCidrBlockWithoutPrivateEndpointSubnetwork(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + containerNetName := fmt.Sprintf("tf-test-container-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withCidrBlockWithoutPrivateEndpointSubnetwork(containerNetName, clusterName, "us-central1-a"), + }, + { + ResourceName: "google_container_cluster.with_private_flexible_cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withCidrBlockWithoutPrivateEndpointSubnetwork(containerNetName, clusterName, location string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" +} + +resource "google_container_cluster" "with_private_flexible_cluster" { + name = "%s" + location = "%s" + min_master_version = "1.29" + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + private_cluster_config { + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + deletion_protection = false +} +`, containerNetName, clusterName, location) +} + +func TestAccContainerCluster_withEnablePrivateEndpointToggle(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withEnablePrivateEndpoint(clusterName, "true", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_enable_private_endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + { + Config: testAccContainerCluster_withEnablePrivateEndpoint(clusterName, "false", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_enable_private_endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withEnablePrivateEndpoint(clusterName, flag, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_enable_private_endpoint" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + + master_authorized_networks_config { + gcp_public_cidrs_access_enabled = false + } + + private_cluster_config { + enable_private_endpoint = %s + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, flag, networkName, subnetworkName) +} + +func testAccContainerCluster_regionalWithNodePool(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "regional" { + name = "%s" + location = "us-central1" + + node_pool { + name = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_regionalNodeLocations(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_locations" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + node_locations = [ + "us-central1-f", + "us-central1-c", + ] + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_regionalUpdateNodeLocations(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_locations" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + node_locations = [ + "us-central1-f", + "us-central1-b", + ] + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withTpu(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.0.35.0/24" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.1.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.2.0.0/22" + } +} + +resource "google_container_cluster" "with_tpu" { + name = "%s" + location = "us-central1-b" + initial_node_count = 1 + + enable_tpu = true + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + networking_mode = "VPC_NATIVE" + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + + master_authorized_networks_config { + } + + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +{{ end }} +func testAccContainerCluster_withIntraNodeVisibility(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_intranode_visibility" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + enable_intranode_visibility = true + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_updateIntraNodeVisibility(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_intranode_visibility" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + enable_intranode_visibility = false + private_ipv6_google_access = "PRIVATE_IPV6_GOOGLE_ACCESS_BIDIRECTIONAL" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withVersion(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_version" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withLowerVersion(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_version" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.release_channel_default_version["STABLE"] + node_version = data.google_container_engine_versions.central1a.release_channel_default_version["STABLE"] + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withMasterAuthNoCert(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_master_auth_no_cert" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + master_auth { + client_certificate_config { + issue_client_certificate = false + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_updateVersion(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_version" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.release_channel_latest_version["STABLE"] + node_version = data.google_container_engine_versions.central1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfig(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + disk_type = "pd-ssd" + local_ssd_count = 1 + oauth_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + ] + service_account = "default" + metadata = { + foo = "bar" + disable-legacy-endpoints = "true" + } + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + preemptible = true + min_cpu_platform = "Intel Broadwell" + + taint { + key = "taint_key" + value = "taint_value" + effect = "PREFER_NO_SCHEDULE" + } + + taint { + key = "taint_key2" + value = "taint_value2" + effect = "NO_EXECUTE" + } + + // Updatable fields + image_type = "COS_CONTAINERD" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantInNodeConfig(clusterName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_in_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + logging_variant = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantInNodePool(clusterName, nodePoolName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_in_node_pool" { + name = "%s" + location = "us-central1-f" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + logging_variant = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, nodePoolName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingVariantNodePoolDefault(clusterName, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant_node_pool_default" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_pool_defaults { + node_config_defaults { + logging_variant = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, loggingVariant, networkName, subnetworkName) +} + +func testAccContainerCluster_withAdvancedMachineFeaturesInNodePool(clusterName, nodePoolName, networkName, subnetworkName string, nvEnabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_advanced_machine_features_in_node_pool" { + name = "%s" + location = "us-central1-f" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = "%t" + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, nodePoolName, nvEnabled, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withNodePoolDefaults(clusterName, enabled, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_defaults" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_pool_defaults { + node_config_defaults { + gcfs_config { + enabled = "%s" + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, enabled, networkName, subnetworkName) +} +{{- end }} + +func testAccContainerCluster_withNodeConfigUpdate(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + disk_type = "pd-ssd" + local_ssd_count = 1 + oauth_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + ] + service_account = "default" + metadata = { + foo = "bar" + disable-legacy-endpoints = "true" + } + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + preemptible = true + min_cpu_platform = "Intel Broadwell" + + taint { + key = "taint_key" + value = "taint_value" + effect = "PREFER_NO_SCHEDULE" + } + + taint { + key = "taint_key2" + value = "taint_value2" + effect = "NO_EXECUTE" + } + + // Updatable fields + image_type = "UBUNTU_CONTAINERD" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfigScopeAlias(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_config_scope_alias" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + machine_type = "e2-medium" + disk_size_gb = 15 + oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfigShieldedInstanceConfig(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + machine_type = "e2-medium" + disk_size_gb = 15 + disk_type = "pd-ssd" + oauth_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + ] + service_account = "default" + metadata = { + foo = "bar" + disable-legacy-endpoints = "true" + } + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + preemptible = true + + // Updatable fields + image_type = "COS_CONTAINERD" + + shielded_instance_config { + enable_secure_boot = true + enable_integrity_monitoring = true + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfigReservationAffinity(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + machine_type = "e2-medium" + disk_size_gb = 15 + disk_type = "pd-ssd" + oauth_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + ] + service_account = "default" + metadata = { + foo = "bar" + disable-legacy-endpoints = "true" + } + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + preemptible = true + + // Updatable fields + image_type = "COS_CONTAINERD" + + reservation_affinity { + consume_reservation_type = "ANY_RESERVATION" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfigReservationAffinitySpecific(reservation, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` + +resource "google_project_service" "compute" { + service = "compute.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "container" { + service = "container.googleapis.com" + disable_on_destroy = false + depends_on = [google_project_service.compute] +} + + +resource "google_compute_reservation" "gce_reservation" { + name = "%s" + zone = "us-central1-f" + + specific_reservation { + count = 1 + instance_properties { + machine_type = "n1-standard-1" + } + } + + specific_reservation_required = true + depends_on = [google_project_service.compute] +} + +resource "google_container_cluster" "with_node_config" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" + disk_size_gb = 15 + disk_type = "pd-ssd" + oauth_scopes = [ + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + ] + service_account = "default" + metadata = { + foo = "bar" + disable-legacy-endpoints = "true" + } + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + + // Updatable fields + image_type = "COS_CONTAINERD" + + reservation_affinity { + consume_reservation_type = "SPECIFIC_RESERVATION" + key = "compute.googleapis.com/reservation-name" + values = [ + google_compute_reservation.gce_reservation.name + ] + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" + depends_on = [google_project_service.container] +} +`, reservation, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withWorkloadMetadataConfig(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_workload_metadata_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + + workload_metadata_config { + mode = "GCE_METADATA" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func testAccContainerCluster_withSandboxConfig(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_sandbox_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of gvisor + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + + image_type = "COS_CONTAINERD" + + sandbox_config { + sandbox_type = "gvisor" + } + + labels = { + "test.terraform.io/gke-sandbox" = "true" + } + + taint { + key = "test.terraform.io/gke-sandbox" + value = "true" + effect = "NO_SCHEDULE" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withSandboxConfig_changeLabels(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_sandbox_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of gvisor + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + + image_type = "COS_CONTAINERD" + + sandbox_config { + sandbox_type = "gvisor" + } + + labels = { + "test.terraform.io/gke-sandbox" = "true" + "test.terraform.io/gke-sandbox-amended" = "also-true" + } + + taint { + key = "test.terraform.io/gke-sandbox" + value = "true" + effect = "NO_SCHEDULE" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} +{{- end }} + +func testAccContainerCluster_withBootDiskKmsKey(clusterName, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_boot_disk_kms_key" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + release_channel { + channel = "RAPID" + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + + image_type = "COS_CONTAINERD" + + boot_disk_kms_key = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, kmsKeyName, networkName, subnetworkName) +} + +func testAccContainerCluster_networkRef(cluster, network string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = true +} + +resource "google_container_cluster" "with_net_ref_by_url" { + name = "%s-url" + location = "us-central1-a" + initial_node_count = 1 + + network = google_compute_network.container_network.self_link + deletion_protection = false +} + +resource "google_container_cluster" "with_net_ref_by_name" { + name = "%s-name" + location = "us-central1-a" + initial_node_count = 1 + + network = google_compute_network.container_network.name + deletion_protection = false +} +`, network, cluster, cluster) +} + +func testAccContainerCluster_autoprovisioningDefaultsManagement(clusterName, networkName, subnetworkName string, autoUpgrade, autoRepair bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_autoprovisioning_management" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + cluster_autoscaling { + enabled = true + + resource_limits { + resource_type = "cpu" + maximum = 2 + } + + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + auto_provisioning_defaults { + management { + auto_upgrade = %t + auto_repair = %t + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, autoUpgrade, autoRepair, networkName, subnetworkName) +} + +func testAccContainerCluster_backendRef(cluster, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "my-backend-service" { + name = "%s-backend" + port_name = "http" + protocol = "HTTP" + + backend { + group = element(google_container_cluster.primary.node_pool[0].managed_instance_group_urls, 1) + } + + health_checks = [google_compute_http_health_check.default.self_link] +} + +resource "google_compute_http_health_check" "default" { + name = "%s-hc" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + + node_locations = [ + "us-central1-b", + "us-central1-c", + ] + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, cluster, cluster, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolBasic(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + deletion_protection = false + + node_pool { + name = "%s" + initial_node_count = 2 + } + + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolLowerVersion(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_pool { + name = "%s" + initial_node_count = 2 + version = data.google_container_engine_versions.central1a.valid_node_versions[2] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolUpdateVersion(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + node_pool { + name = "%s" + initial_node_count = 2 + version = data.google_container_engine_versions.central1a.valid_node_versions[1] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolNodeLocations(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + + node_locations = [ + "us-central1-b", + "us-central1-c", + ] + + node_pool { + name = "%s" + node_count = 2 + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolResize(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + + node_locations = [ + "us-central1-b", + "us-central1-c", + ] + + node_pool { + name = "%s" + node_count = 3 + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_withAutoscalingProfile(cluster, autoscalingProfile, networkName, subnetworkName string) string { + config := fmt.Sprintf(` +resource "google_container_cluster" "autoscaling_with_profile" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + cluster_autoscaling { + enabled = false + autoscaling_profile = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, autoscalingProfile, networkName, subnetworkName) + return config +} + +func testAccContainerCluster_autoprovisioning(cluster, networkName, subnetworkName string, autoprovisioning, withNetworkTag bool) string { + config := fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autoprovisioning" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +`, cluster, networkName, subnetworkName) + if autoprovisioning { + config += ` + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + }` + } else { + config += ` + cluster_autoscaling { + enabled = false + }` + } + if withNetworkTag { + config += ` + node_pool_auto_config { + network_tags { + tags = ["test-network-tag"] + } + }` + } + config += ` +}` + return config +} + +func testAccContainerCluster_autoprovisioningDefaults(cluster, networkName, subnetworkName string, monitoringWrite bool) string { + config := fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autoprovisioning" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + initial_node_count = 1 + deletion_protection = false + + network = "%s" + subnetwork = "%s" + + logging_service = "none" + monitoring_service = "none" + + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + auto_provisioning_defaults { + oauth_scopes = [ + "https://www.googleapis.com/auth/pubsub", + "https://www.googleapis.com/auth/devstorage.read_only",`, + cluster, networkName, subnetworkName) + +if monitoringWrite { + config += ` + "https://www.googleapis.com/auth/monitoring.write", +` + } + config += ` + ] + } + } +}` + return config +} + +func testAccContainerCluster_autoprovisioningDefaultsMinCpuPlatform(cluster, networkName, subnetworkName string, includeMinCpuPlatform bool) string { + minCpuPlatformCfg := "" + if includeMinCpuPlatform { + minCpuPlatformCfg = `min_cpu_platform = "Intel Haswell"` + } + + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autoprovisioning" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + network = "%s" + subnetwork = "%s" + + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + cluster_autoscaling { + enabled = true + + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + auto_provisioning_defaults { + %s + } + } + deletion_protection = false +} +`, cluster, networkName, subnetworkName, minCpuPlatformCfg) +} + +func testAccContainerCluster_autoprovisioningDefaultsUpgradeSettings(clusterName, networkName, subnetworkName string, maxSurge, maxUnavailable int, strategy string) string { + blueGreenSettings := "" + if strategy == "BLUE_GREEN" { + blueGreenSettings = ` + blue_green_settings { + node_pool_soak_duration = "3.500s" + standard_rollout_policy { + batch_percentage = 0.5 + batch_soak_duration = "3.500s" + } + } + ` + } + + return fmt.Sprintf(` + resource "google_container_cluster" "with_autoprovisioning_upgrade_settings" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + cluster_autoscaling { + enabled = true + + resource_limits { + resource_type = "cpu" + maximum = 2 + } + + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + auto_provisioning_defaults { + upgrade_settings { + max_surge = %d + max_unavailable = %d + strategy = "%s" + %s + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" + } + `, clusterName, maxSurge, maxUnavailable, strategy, blueGreenSettings, networkName, subnetworkName) +} + +func testAccContainerCluster_autoprovisioningDefaultsUpgradeSettingsWithBlueGreenStrategy(clusterName, networkName, subnetworkName string, duration, strategy string) string { + return fmt.Sprintf(` + resource "google_container_cluster" "with_autoprovisioning_upgrade_settings" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + + cluster_autoscaling { + enabled = true + + resource_limits { + resource_type = "cpu" + maximum = 2 + } + + resource_limits { + resource_type = "memory" + maximum = 2048 + } + + auto_provisioning_defaults { + upgrade_settings { + strategy = "%s" + blue_green_settings { + node_pool_soak_duration = "%s" + standard_rollout_policy { + batch_percentage = 0.5 + batch_soak_duration = "%s" + } + } + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" + } + `, clusterName, strategy, duration, duration, networkName, subnetworkName) +} + +func testAccContainerCluster_autoprovisioningDefaultsDiskSizeGb(cluster, networkName, subnetworkName string, includeDiskSizeGb bool) string { + DiskSizeGbCfg := "" + if includeDiskSizeGb { + DiskSizeGbCfg = `disk_size = 120` + } + + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} +resource "google_container_cluster" "with_autoprovisioning" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + auto_provisioning_defaults { + %s + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, DiskSizeGbCfg, networkName, subnetworkName) +} + +func testAccContainerCluster_autoprovisioningDefaultsDiskType(cluster, networkName, subnetworkName string, includeDiskType bool) string { + DiskTypeCfg := "" + if includeDiskType { + DiskTypeCfg = `disk_type = "pd-balanced"` + } + + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} +resource "google_container_cluster" "with_autoprovisioning" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + auto_provisioning_defaults { + %s + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, DiskTypeCfg, networkName, subnetworkName) +} + +func testAccContainerCluster_autoprovisioningDefaultsImageType(cluster, networkName, subnetworkName string, includeImageType bool) string { + imageTypeCfg := "" + if includeImageType { + imageTypeCfg = `image_type = "COS_CONTAINERD"` + } + + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} +resource "google_container_cluster" "with_autoprovisioning" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + auto_provisioning_defaults { + %s + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, imageTypeCfg, networkName, subnetworkName) +} + +func testAccContainerCluster_autoprovisioningDefaultsBootDiskKmsKey(clusterName, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "nap_boot_disk_kms_key" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + release_channel { + channel = "RAPID" + } + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + auto_provisioning_defaults { + boot_disk_kms_key = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, kmsKeyName, networkName, subnetworkName) +} + +func testAccContainerCluster_autoprovisioningDefaultsShieldedInstance(cluster, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} +resource "google_container_cluster" "nap_shielded_instance" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + cluster_autoscaling { + enabled = true + resource_limits { + resource_type = "cpu" + maximum = 2 + } + resource_limits { + resource_type = "memory" + maximum = 2048 + } + auto_provisioning_defaults { + shielded_instance_config { + enable_integrity_monitoring = true + enable_secure_boot = true + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolAutoscaling(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + + node_pool { + name = "%s" + initial_node_count = 2 + autoscaling { + min_node_count = 1 + max_node_count = 3 + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, np, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolUpdateAutoscaling(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1-a" + + node_pool { + name = "%s" + initial_node_count = 2 + autoscaling { + min_node_count = 1 + max_node_count = 5 + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, np, networkName, subnetworkName) +} + +func testAccContainerRegionalCluster_withNodePoolCIA(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + node_pool { + name = "%s" + initial_node_count = 2 + autoscaling { + total_min_node_count = 3 + total_max_node_count = 21 + location_policy = "BALANCED" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, np, networkName, subnetworkName) +} + +func testAccContainerRegionalClusterUpdate_withNodePoolCIA(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + node_pool { + name = "%s" + initial_node_count = 2 + autoscaling { + total_min_node_count = 4 + total_max_node_count = 32 + location_policy = "ANY" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, np, networkName, subnetworkName) +} + +func testAccContainerRegionalCluster_withNodePoolBasic(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_node_pool" { + name = "%s" + location = "us-central1" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + node_pool { + name = "%s" + initial_node_count = 2 + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, nodePool, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolNamePrefix(cluster, npPrefix, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_name_prefix" { + name = "%s" + location = "us-central1-a" + + node_pool { + name_prefix = "%s" + node_count = 2 + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, npPrefix, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolMultiple(cluster, npPrefix, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_multiple" { + name = "%s" + location = "us-central1-a" + + node_pool { + name = "%s-one" + node_count = 2 + } + + node_pool { + name = "%s-two" + node_count = 3 + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, npPrefix, npPrefix, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolConflictingNameFields(cluster, npPrefix string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_multiple" { + name = "%s" + location = "us-central1-a" + + node_pool { + # ERROR: name and name_prefix cannot be both specified + name = "%s-notok" + name_prefix = "%s" + node_count = 1 + } + deletion_protection = false +} +`, cluster, npPrefix, npPrefix) +} + +func testAccContainerCluster_withNodePoolNodeConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_node_pool_node_config" { + name = "%s" + location = "us-central1-a" + node_pool { + name = "%s" + node_count = 2 + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + local_ssd_count = 1 + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + service_account = "default" + metadata = { + foo = "bar" + disable-legacy-endpoints = "true" + } + image_type = "COS_CONTAINERD" + labels = { + foo = "bar" + } + tags = ["foo", "bar"] + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, np, networkName, subnetworkName) +} + +func testAccContainerCluster_withMaintenanceWindow(clusterName, startTime, networkName, subnetworkName string) string { + maintenancePolicy := "" + if len(startTime) > 0 { + maintenancePolicy = fmt.Sprintf(` + maintenance_policy { + daily_maintenance_window { + start_time = "%s" + } + }`, startTime) + } + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_window" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + %s + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, maintenancePolicy, networkName, subnetworkName) +} + +func testAccContainerCluster_withRecurringMaintenanceWindow(clusterName, startTime, endTime, networkName, subnetworkName string) string { + maintenancePolicy := "" + if len(startTime) > 0 { + maintenancePolicy = fmt.Sprintf(` + maintenance_policy { + recurring_window { + start_time = "%s" + end_time = "%s" + recurrence = "FREQ=DAILY" + } + }`, startTime, endTime) + } + + return fmt.Sprintf(` +resource "google_container_cluster" "with_recurring_maintenance_window" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + %s + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, maintenancePolicy, networkName, subnetworkName) + +} + +func testAccContainerCluster_withExclusion_RecurringMaintenanceWindow(clusterName string, w1startTime, w1endTime, w2startTime, w2endTime, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_exclusion_window" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + maintenance_policy { + recurring_window { + start_time = "%s" + end_time = "%s" + recurrence = "FREQ=DAILY" + } + maintenance_exclusion { + exclusion_name = "batch job" + start_time = "%s" + end_time = "%s" + } + maintenance_exclusion { + exclusion_name = "holiday data load" + start_time = "%s" + end_time = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, w1startTime, w1endTime, w1startTime, w1endTime, w2startTime, w2endTime, networkName, subnetworkName) +} + +func testAccContainerCluster_withExclusionOptions_RecurringMaintenanceWindow(cclusterName, w1startTime, w1endTime, w2startTime, w2endTime, scope1, scope2, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_exclusion_options" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + maintenance_policy { + recurring_window { + start_time = "%s" + end_time = "%s" + recurrence = "FREQ=DAILY" + } + maintenance_exclusion { + exclusion_name = "batch job" + start_time = "%s" + end_time = "%s" + exclusion_options { + scope = "%s" + } + } + maintenance_exclusion { + exclusion_name = "holiday data load" + start_time = "%s" + end_time = "%s" + exclusion_options { + scope = "%s" + } + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cclusterName, w1startTime, w1endTime, w1startTime, w1endTime, scope1, w2startTime, w2endTime, scope2, networkName, subnetworkName) +} + +func testAccContainerCluster_NoExclusionOptions_RecurringMaintenanceWindow(cclusterName, w1startTime, w1endTime, w2startTime, w2endTime, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_exclusion_options" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + maintenance_policy { + recurring_window { + start_time = "%s" + end_time = "%s" + recurrence = "FREQ=DAILY" + } + maintenance_exclusion { + exclusion_name = "batch job" + start_time = "%s" + end_time = "%s" + } + maintenance_exclusion { + exclusion_name = "holiday data load" + start_time = "%s" + end_time = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cclusterName, w1startTime, w1endTime, w1startTime, w1endTime, w2startTime, w2endTime, networkName, subnetworkName) +} + +func testAccContainerCluster_updateExclusionOptions_RecurringMaintenanceWindow(cclusterName, w1startTime, w1endTime, w2startTime, w2endTime, scope1, scope2, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_exclusion_options" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + + maintenance_policy { + recurring_window { + start_time = "%s" + end_time = "%s" + recurrence = "FREQ=DAILY" + } + maintenance_exclusion { + exclusion_name = "batch job" + start_time = "%s" + end_time = "%s" + exclusion_options { + scope = "%s" + } + } + maintenance_exclusion { + exclusion_name = "holiday data load" + start_time = "%s" + end_time = "%s" + exclusion_options { + scope = "%s" + } + } + } + network = "%s" + subnetwork = "%s" +} +`, cclusterName, w1startTime, w1endTime, w1startTime, w1endTime, scope1, w2startTime, w2endTime, scope2, networkName, subnetworkName) +} + +func testAccContainerCluster_withExclusion_NoMaintenanceWindow(clusterName string, w1startTime, w1endTime, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_exclusion_window" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + maintenance_policy { + recurring_window { + start_time = "%s" + end_time = "%s" + recurrence = "FREQ=DAILY" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, w1startTime, w1endTime, networkName, subnetworkName) +} + +func testAccContainerCluster_withExclusion_DailyMaintenanceWindow(clusterName, w1startTime, w1endTime, networkName, subnetworkName string) string { + + return fmt.Sprintf(` +resource "google_container_cluster" "with_maintenance_exclusion_window" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + maintenance_policy { + daily_maintenance_window { + start_time = "03:00" + } + maintenance_exclusion { + exclusion_name = "batch job" + start_time = "%s" + end_time = "%s" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, w1startTime, w1endTime, networkName, subnetworkName) +} + +func testAccContainerCluster_withIPAllocationPolicy_existingSecondaryRanges(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.0.0.0/24" + + secondary_ip_range { + range_name = "pods" + ip_cidr_range = "10.1.0.0/16" + } + secondary_ip_range { + range_name = "services" + ip_cidr_range = "10.2.0.0/20" + } +} + +resource "google_container_cluster" "with_ip_allocation_policy" { + name = "%s" + location = "us-central1-a" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + networking_mode = "VPC_NATIVE" + initial_node_count = 1 + ip_allocation_policy { + cluster_secondary_range_name = "pods" + services_secondary_range_name = "services" + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_withIPAllocationPolicy_specificIPRanges(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" +} + +resource "google_container_cluster" "with_ip_allocation_policy" { + name = "%s" + location = "us-central1-a" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_withIPAllocationPolicy_specificSizes(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" +} + +resource "google_container_cluster" "with_ip_allocation_policy" { + name = "%s" + location = "us-central1-a" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + ip_allocation_policy { + cluster_ipv4_cidr_block = "/16" + services_ipv4_cidr_block = "/22" + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_stackType_withDualStack(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + + resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" + stack_type = "IPV4_IPV6" + ipv6_access_type = "EXTERNAL" +} + +resource "google_container_cluster" "with_stack_type" { + name = "%s" + location = "us-central1-a" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + enable_l4_ilb_subsetting = true + + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" + stack_type = "IPV4_IPV6" + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_stackType_withSingleStack(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + + resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.2.0.0/16" +} + +resource "google_container_cluster" "with_stack_type" { + name = "%s" + location = "us-central1-a" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + enable_l4_ilb_subsetting = true + + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" + stack_type = "IPV4" + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_with_PodCIDROverprovisionDisabled(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + + resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + region = "us-central1" + + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_container_cluster" "with_pco_disabled" { + name = "%s" + location = "us-central1-a" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + min_master_version = "1.27" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.1.0.0/16" + services_ipv4_cidr_block = "10.2.0.0/16" + pod_cidr_overprovision_config { + disabled = true + } + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_withResourceUsageExportConfig(clusterName, datasetId, enableMetering, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_bigquery_dataset" "default" { + dataset_id = "%s" + description = "gke resource usage dataset tests" + delete_contents_on_destroy = true +} + +resource "google_container_cluster" "with_resource_usage_export_config" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + resource_usage_export_config { + enable_network_egress_metering = true + enable_resource_consumption_metering = %s + bigquery_destination { + dataset_id = google_bigquery_dataset.default.dataset_id + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, datasetId, clusterName, enableMetering, networkName, subnetworkName) +} + +func testAccContainerCluster_withResourceUsageExportConfigNoConfig(clusterName, datasetId, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_bigquery_dataset" "default" { + dataset_id = "%s" + description = "gke resource usage dataset tests" + delete_contents_on_destroy = true +} + +resource "google_container_cluster" "with_resource_usage_export_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, datasetId, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withPrivateClusterConfigMissingCidrBlock(containerNetName, clusterName, location string, autopilotEnabled bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_private_cluster" { + name = "%s" + location = "%s" + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + } + + enable_autopilot = %t + + master_authorized_networks_config {} + + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, containerNetName, clusterName, location, autopilotEnabled) +} + +func testAccContainerCluster_withPrivateClusterConfig(containerNetName string, clusterName string, masterGlobalAccessEnabled bool) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_private_cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + default_snat_status { + disabled = true + } + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + master_global_access_config { + enabled = %t + } + } + master_authorized_networks_config { + } + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, containerNetName, clusterName, masterGlobalAccessEnabled) +} + +func testAccContainerCluster_withPrivateClusterConfigGlobalAccessEnabledOnly(clusterName, networkName, subnetworkName string, masterGlobalAccessEnabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_private_cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + private_cluster_config { + enable_private_endpoint = false + master_global_access_config { + enabled = %t + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, masterGlobalAccessEnabled, networkName, subnetworkName) +} + +func testAccContainerCluster_withShieldedNodes(clusterName, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_shielded_nodes" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + enable_shielded_nodes = %v + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, enabled, networkName, subnetworkName) +} + +func testAccContainerCluster_withWorkloadIdentityConfigEnabled(projectID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_workload_identity_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + remove_default_node_pool = true + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withWorkloadIdentityConfigEnabledAutopilot(projectID string, clusterName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_workload_identity_config" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + enable_autopilot = true + deletion_protection = false +} +`, projectID, clusterName) +} + + +func testAccContainerCluster_updateWorkloadIdentityConfig(projectID, clusterName, networkName, subnetworkName string, enable bool) string { + workloadIdentityConfig := "" + if enable { + workloadIdentityConfig = ` + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + }` + } else { + workloadIdentityConfig = ` + workload_identity_config { + workload_pool = "" + }` + } + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_workload_identity_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + %s + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, workloadIdentityConfig, networkName, subnetworkName) +} + + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func testAccContainerCluster_sharedVpc(org, billingId, projectName, name string, suffix string) string { + return fmt.Sprintf(` +resource "google_project" "host_project" { + name = "Test Project XPN Host" + project_id = "%s-host" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "host_project" { + project = google_project.host_project.project_id + service = "container.googleapis.com" +} + +resource "google_compute_shared_vpc_host_project" "host_project" { + project = google_project_service.host_project.project +} + +resource "google_project" "service_project" { + name = "Test Project XPN Service" + project_id = "%s-service" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "service_project" { + project = google_project.service_project.project_id + service = "container.googleapis.com" +} + +resource "google_compute_shared_vpc_service_project" "service_project" { + host_project = google_compute_shared_vpc_host_project.host_project.project + service_project = google_project_service.service_project.project +} + +resource "google_project_iam_member" "host_service_agent" { + project = google_project_service.host_project.project + role = "roles/container.hostServiceAgentUser" + member = "serviceAccount:service-${google_project.service_project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_service.service_project] +} + +resource "google_compute_subnetwork_iam_member" "service_network_cloud_services" { + project = google_compute_shared_vpc_host_project.host_project.project + subnetwork = google_compute_subnetwork.shared_subnetwork.name + role = "roles/compute.networkUser" + member = "serviceAccount:${google_project.service_project.number}@cloudservices.gserviceaccount.com" +} + +resource "google_compute_subnetwork_iam_member" "service_network_gke_user" { + project = google_compute_shared_vpc_host_project.host_project.project + subnetwork = google_compute_subnetwork.shared_subnetwork.name + role = "roles/compute.networkUser" + member = "serviceAccount:service-${google_project.service_project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_compute_network" "shared_network" { + name = "test-%s" + project = google_compute_shared_vpc_host_project.host_project.project + + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "shared_subnetwork" { + name = "test-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = google_compute_network.shared_network.self_link + project = google_compute_shared_vpc_host_project.host_project.project + + secondary_ip_range { + range_name = "pods" + ip_cidr_range = "10.1.0.0/16" + } + + secondary_ip_range { + range_name = "services" + ip_cidr_range = "10.2.0.0/20" + } +} + +resource "google_container_cluster" "shared_vpc_cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + project = google_compute_shared_vpc_service_project.service_project.service_project + + networking_mode = "VPC_NATIVE" + network = google_compute_network.shared_network.self_link + subnetwork = google_compute_subnetwork.shared_subnetwork.self_link + + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.shared_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.shared_subnetwork.secondary_ip_range[1].range_name + } + + depends_on = [ + google_project_iam_member.host_service_agent, + google_compute_subnetwork_iam_member.service_network_cloud_services, + google_compute_subnetwork_iam_member.service_network_gke_user, + ] + deletion_protection = false +} +`, projectName, org, billingId, projectName, org, billingId, suffix, suffix, name) +} + +func testAccContainerCluster_withBinaryAuthorizationEnabledBool(clusterName, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_binary_authorization_enabled_bool" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + binary_authorization { + enabled = %v + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, enabled, networkName, subnetworkName) +} + +func testAccContainerCluster_withBinaryAuthorizationEvaluationMode(clusterName string, autopilot_enabled bool, evaluation_mode, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_binary_authorization_evaluation_mode" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + ip_allocation_policy { + } + enable_autopilot = %v + + binary_authorization { + evaluation_mode = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, autopilot_enabled, evaluation_mode, networkName, subnetworkName) +} + +func testAccContainerCluster_withFlexiblePodCIDR(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.35.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.1.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.2.0.0/22" + } +} + +resource "google_container_cluster" "with_flexible_cidr" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + + networking_mode = "VPC_NATIVE" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + + master_authorized_networks_config { + } + + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + default_max_pods_per_node = 100 + deletion_protection = false +} +`, containerNetName, clusterName) +} +{{- end }} + +func testAccContainerCluster_withInitialCIDR(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.128.0.0/9" +} + +resource "google_container_cluster" "cidr_error_preempt" { + name = "%s" + location = "us-central1-a" + + networking_mode = "VPC_NATIVE" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_withCIDROverlap(initConfig, secondCluster string) string { + return fmt.Sprintf(` +%s + +resource "google_container_cluster" "cidr_error_overlap" { + name = "%s" + location = "us-central1-a" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" + } + deletion_protection = false +} +`, initConfig, secondCluster) +} + + +func testAccContainerCluster_withCIDROverlapWithTimeout(initConfig, secondCluster, createTimeout string) string { + return fmt.Sprintf(` +%s + +resource "google_container_cluster" "cidr_error_overlap" { + name = "%s" + location = "us-central1-a" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + ip_allocation_policy { + cluster_ipv4_cidr_block = "10.0.0.0/16" + services_ipv4_cidr_block = "10.1.0.0/16" + } + deletion_protection = false + timeouts { + create = "%s" + } +} +`, initConfig, secondCluster, createTimeout) +} + +func testAccContainerCluster_withInvalidLocation(location string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_resource_labels" { + name = "invalid-gke-cluster" + location = "%s" + initial_node_count = 1 + deletion_protection = false +} +`, location) +} + +func testAccContainerCluster_withExternalIpsConfig(projectID, clusterName, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_external_ips_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + service_external_ips_config { + enabled = %v + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, enabled, networkName, subnetworkName) +} + +func testAccContainerCluster_withMeshCertificatesConfigEnabled(projectID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_mesh_certificates_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + mesh_certificates { + enable_certificates = true + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_updateMeshCertificatesConfig(projectID, clusterName, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_mesh_certificates_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + remove_default_node_pool = true + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + mesh_certificates { + enable_certificates = %v + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, enabled, networkName, subnetworkName) +} + +func testAccContainerCluster_updateCostManagementConfig(projectID, clusterName, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "with_cost_management_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + cost_management_config { + enabled = %v + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, projectID, clusterName, enabled, networkName, subnetworkName) +} + +func testAccContainerCluster_withDatabaseEncryption(clusterName string, kmsData acctest.BootstrappedKMS, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { +} + +resource "google_kms_key_ring_iam_member" "test_key_ring_iam_policy" { + key_ring_id = "%[1]s" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +data "google_kms_key_ring_iam_policy" "test_key_ring_iam_policy" { + key_ring_id = "%[1]s" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + initial_node_count = 1 + + database_encryption { + state = "ENCRYPTED" + key_name = "%[2]s" + } + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" +} +`, kmsData.KeyRing.Name, kmsData.CryptoKey.Name, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withDatapathProvider(clusterName, datapathProvider, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + ip_allocation_policy { + } + + datapath_provider = "%s" + + release_channel { + channel = "RAPID" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, datapathProvider, networkName, subnetworkName) +} + +func testAccContainerCluster_enableCiliumPolicies(clusterName, networkName, subnetworkName string, enableCilium bool) string { + ciliumPolicies := "" + if enableCilium { + ciliumPolicies = "enable_cilium_clusterwide_network_policy = true" + } else { + ciliumPolicies = "enable_cilium_clusterwide_network_policy = false" + } + + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + ip_allocation_policy { + } + + datapath_provider = "ADVANCED_DATAPATH" + %s + + release_channel { + channel = "RAPID" + } + + network = "%s" + subnetwork = "%s" + + deletion_protection = false +} +`, clusterName, ciliumPolicies, networkName, subnetworkName) +} + +func testAccContainerCluster_enableCiliumPolicies_withAutopilot(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%[2]s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "%[3]s" + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_autopilot" { + name = "%[1]s" + location = "us-central1" + enable_autopilot = true + + release_channel { + channel = "RAPID" + } + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + addons_config { + horizontal_pod_autoscaling { + disabled = false + } + } + + vertical_pod_autoscaling { + enabled = true + } + + datapath_provider = "ADVANCED_DATAPATH" + + deletion_protection = false + + timeouts { + create = "30m" + update = "40m" + } +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_enableCiliumPolicies_withAutopilotUpdate(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%[2]s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "%[3]s" + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_autopilot" { + name = "%[1]s" + location = "us-central1" + enable_autopilot = true + + release_channel { + channel = "RAPID" + } + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + addons_config { + horizontal_pod_autoscaling { + disabled = false + } + } + + vertical_pod_autoscaling { + enabled = true + } + + datapath_provider = "ADVANCED_DATAPATH" + enable_cilium_clusterwide_network_policy = true + + deletion_protection = false + + timeouts { + create = "30m" + update = "40m" + } +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withMasterAuthorizedNetworksDisabled(containerNetName string, clusterName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_private_cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + + private_cluster_config { + enable_private_endpoint = false + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, containerNetName, clusterName) +} + +func testAccContainerCluster_withEnableKubernetesAlpha(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + enable_kubernetes_alpha = true + + node_pool { + name = "%s" + initial_node_count = 1 + management { + auto_repair = false + auto_upgrade = false + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, cluster, np, networkName, subnetworkName) +} + +func testAccContainerCluster_withoutEnableKubernetesBetaAPIs(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withEnableKubernetesBetaAPIs(cluster, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + deletion_protection = false + + # This feature has been available since GKE 1.27, and currently the only + # supported Beta API is authentication.k8s.io/v1beta1/selfsubjectreviews. + # However, in the future, more Beta APIs will be supported, such as the + # resource.k8s.io group. At the same time, some existing Beta APIs will be + # deprecated as the feature will be GAed, and the Beta API will be eventually + # removed. In the case of the SelfSubjectReview API, it is planned to be GAed + # in Kubernetes as of 1.28. And, the Beta API of SelfSubjectReview will be removed + # after at least 3 minor version bumps, so it will be removed as of Kubernetes 1.31 + # or later. + # https://pr.k8s.io/117713 + # https://kubernetes.io/docs/reference/using-api/deprecation-guide/ + # + # The new Beta APIs will be available since GKE 1.28 + # - admissionregistration.k8s.io/v1beta1/validatingadmissionpolicies + # - admissionregistration.k8s.io/v1beta1/validatingadmissionpolicybindings + # https://pr.k8s.io/118644 + # + # Removing the Beta API from Kubernetes will break the test. + # TODO: Replace the Beta API with one available on the version of GKE + # if the test is broken. + enable_k8s_beta_apis { + enabled_apis = ["authentication.k8s.io/v1beta1/selfsubjectreviews"] + } + network = "%s" + subnetwork = "%s" +} +`, cluster, networkName, subnetworkName) +} + +func testAccContainerCluster_withIPv4Error(name string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = false + master_ipv4_cidr_block = "10.42.0.0/28" + } + deletion_protection = false +} +`, name) +} + +func testAccContainerCluster_withAutopilot(projectID string, containerNetName string, clusterName string, location string, enabled bool, withNetworkTag bool, serviceAccount string) string { + config := "" + clusterAutoscaling := "" + if serviceAccount != "" { + config += fmt.Sprintf(` +resource "google_service_account" "service_account" { + account_id = "%[1]s" + project = "%[2]s" + display_name = "Service Account" +} + +resource "google_project_iam_member" "project" { + project = "%[2]s" + role = "roles/container.nodeServiceAccount" + member = "serviceAccount:%[1]s@%[2]s.iam.gserviceaccount.com" +}`, serviceAccount, projectID) + + clusterAutoscaling = fmt.Sprintf(` + cluster_autoscaling { + auto_provisioning_defaults { + service_account = "%s@%s.iam.gserviceaccount.com" + oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + } + }`, serviceAccount, projectID) + } + + config += fmt.Sprintf(` + +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autopilot" { + name = "%s" + location = "%s" + enable_autopilot = %v + deletion_protection = false + min_master_version = "latest" + release_channel { + channel = "RAPID" + } + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + addons_config { + horizontal_pod_autoscaling { + disabled = false + } + } + %s + vertical_pod_autoscaling { + enabled = true + }`, containerNetName, clusterName, location, enabled, clusterAutoscaling) + if withNetworkTag { + config += ` + node_pool_auto_config { + network_tags { + tags = ["test-network-tag"] + } + }` + } + config += ` +}` + return config +} + +func testAccContainerCluster_withDNSConfig(clusterName, clusterDns, clusterDnsDomain, clusterDnsScope, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + dns_config { + cluster_dns = "%s" + cluster_dns_domain = "%s" + cluster_dns_scope = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, clusterDns, clusterDnsDomain, clusterDnsScope, networkName, subnetworkName) +} + +func testAccContainerCluster_withGatewayApiConfig(clusterName, gatewayApiChannel, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + gateway_api_config { + channel = "%s" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, gatewayApiChannel, networkName, subnetworkName) +} + +func testAccContainerCluster_withIdentityServiceConfigEnabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + identity_service_config { + enabled = true + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withIdentityServiceConfigUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + identity_service_config { + enabled = false + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withSecretManagerConfigEnabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + secret_manager_config { + enabled = true + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withSecretManagerConfigUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + secret_manager_config { + enabled = false + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} +{{- end }} + +func testAccContainerCluster_withLoggingConfigEnabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + logging_config { + enable_components = [ "SYSTEM_COMPONENTS" ] + } + monitoring_config { + enable_components = [ "SYSTEM_COMPONENTS" ] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingConfigDisabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + logging_config { + enable_components = [] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withLoggingConfigUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + logging_config { + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER"] + } + monitoring_config { + enable_components = [ "SYSTEM_COMPONENTS" ] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigEnabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + monitoring_config { + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER" ] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigDisabled(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + monitoring_config { + enable_components = [] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + monitoring_config { + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER" ] + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigPrometheusUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + monitoring_config { + enable_components = [ "SYSTEM_COMPONENTS", "APISERVER", "CONTROLLER_MANAGER", "SCHEDULER" ] + managed_prometheus { + enabled = true + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigPrometheusOnly(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + monitoring_config { + enable_components = [] + managed_prometheus { + enabled = true + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigPrometheusOnly2(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + monitoring_config { + managed_prometheus { + enabled = true + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabled(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + monitoring_config { + enable_components = [] + advanced_datapath_observability_config { + enable_metrics = true + enable_relay = true + } + } + deletion_protection = false +} +`, name, name) +} + +func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigEnabledOld(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + monitoring_config { + enable_components = [] + advanced_datapath_observability_config { + enable_metrics = true + relay_mode = "INTERNAL_VPC_LB" + } + } + deletion_protection = false +} +`, name, name) +} + +func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabled(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + monitoring_config { + enable_components = [] + advanced_datapath_observability_config { + enable_metrics = false + enable_relay = false + } + } + deletion_protection = false +} +`, name, name) +} + +func testAccContainerCluster_withMonitoringConfigAdvancedDatapathObservabilityConfigDisabledOld(name string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-nw" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "services-range" + ip_cidr_range = "192.168.1.0/24" + } + + secondary_ip_range { + range_name = "pod-ranges" + ip_cidr_range = "192.168.64.0/22" + } +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + datapath_provider = "ADVANCED_DATAPATH" + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + monitoring_config { + enable_components = [] + advanced_datapath_observability_config { + enable_metrics = false + relay_mode = "DISABLED" + } + } + deletion_protection = false +} +`, name, name) +} + +func testAccContainerCluster_withSoleTenantGroup(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_compute_node_template" "soletenant-tmpl" { + name = "%s" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_group" "group" { + name = "%s" + zone = "us-central1-f" + description = "example google_compute_node_group for Terraform Google Provider" + + initial_size = 1 + node_template = google_compute_node_template.soletenant-tmpl.id +} + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + disk_type = "pd-ssd" + node_group = google_compute_node_group.group.name + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, name, name, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withTPUConfig(network, cluster string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "with_tpu_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + + tpu_config { + enabled = true + use_service_networking = true + } + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + networking_mode = "VPC_NATIVE" + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + master_authorized_networks_config { + } + + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, network, cluster) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withProtectConfig(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + protect_config { + workload_config { + audit_mode = "BASIC" + } + workload_vulnerability_mode = "BASIC" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} + +func testAccContainerCluster_withProtectConfigUpdated(name, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + protect_config { + workload_config { + audit_mode = "DISABLED" + } + workload_vulnerability_mode = "DISABLED" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, networkName, subnetworkName) +} +{{- end }} + + +func testAccContainerCluster_autopilot_minimal(name string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1" + enable_autopilot = true + deletion_protection = false +}`, name) +} + +func testAccContainerCluster_autopilot_net_admin(name, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1" + enable_autopilot = true + allow_net_admin = %t + min_master_version = 1.27 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, name, enabled, networkName, subnetworkName) +} + + +func TestAccContainerCluster_customPlacementPolicy(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + policy := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_customPlacementPolicy(cluster, np, policy, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_cluster.cluster", "node_pool.0.placement_policy.0.type", "COMPACT"), + resource.TestCheckResourceAttr("google_container_cluster.cluster", "node_pool.0.placement_policy.0.policy_name", policy), + resource.TestCheckResourceAttr("google_container_cluster.cluster", "node_pool.0.node_config.0.machine_type", "c2-standard-4"), + ), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_customPlacementPolicy(cluster, np, policyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` + +resource "google_compute_resource_policy" "policy" { + name = "%s" + region = "us-central1" + group_placement_policy { + collocation = "COLLOCATED" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + + node_pool { + name = "%s" + initial_node_count = 2 + + node_config { + machine_type = "c2-standard-4" + } + + placement_policy { + type = "COMPACT" + policy_name = google_compute_resource_policy.policy.name + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, policyName, cluster, np, networkName, subnetworkName) +} + +func testAccContainerCluster_additional_pod_ranges_config(name string, nameCount int) string { + var podRangeNamesStr string + names := []string{"\"gke-autopilot-pods-add\",", "\"gke-autopilot-pods-add-2\""} + for i := 0; i < nameCount; i++ { + podRangeNamesStr += names[i] + } + var aprc string + if len(podRangeNamesStr) > 0 { + aprc = fmt.Sprintf(` + additional_pod_ranges_config { + pod_range_names = [%s] + } + `, podRangeNamesStr) + } + + return fmt.Sprintf(` + resource "google_compute_network" "main" { + name = "%s" + auto_create_subnetworks = false + } + resource "google_compute_subnetwork" "main" { + ip_cidr_range = "10.10.0.0/16" + name = "%s" + network = google_compute_network.main.self_link + region = "us-central1" + + secondary_ip_range { + range_name = "gke-autopilot-services" + ip_cidr_range = "10.11.0.0/20" + } + + secondary_ip_range { + range_name = "gke-autopilot-pods" + ip_cidr_range = "10.12.0.0/16" + } + + secondary_ip_range { + range_name = "gke-autopilot-pods-add" + ip_cidr_range = "10.100.0.0/16" + } + secondary_ip_range { + range_name = "gke-autopilot-pods-add-2" + ip_cidr_range = "100.0.0.0/16" + } + } + resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1" + + enable_autopilot = true + + release_channel { + channel = "REGULAR" + } + + network = google_compute_network.main.name + subnetwork = google_compute_subnetwork.main.name + + private_cluster_config { + enable_private_endpoint = false + enable_private_nodes = true + master_ipv4_cidr_block = "172.16.0.0/28" + } + + # supresses permadiff + dns_config { + cluster_dns = "CLOUD_DNS" + cluster_dns_domain = "cluster.local" + cluster_dns_scope = "CLUSTER_SCOPE" + } + + ip_allocation_policy { + cluster_secondary_range_name = "gke-autopilot-pods" + services_secondary_range_name = "gke-autopilot-services" + %s + } + deletion_protection = false + } + `, name, name, name, aprc) +} + +func TestAccContainerCluster_withConfidentialBootDisk(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-node-pool-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withConfidentialBootDisk(clusterName, npName, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withConfidentialBootDisk(clusterName, npName, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" +} + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + machine_type = "n2-standard-2" + enable_confidential_storage = true + disk_type = "hyperdisk-balanced" + } +} + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, kmsKeyName, networkName, subnetworkName) +} + +func TestAccContainerCluster_withConfidentialBootDiskNodeConfig(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withConfidentialBootDiskNodeConfig(clusterName, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.with_confidential_boot_disk_node_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_withConfidentialBootDiskNodeConfig(clusterName, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_confidential_boot_disk_node_config" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + release_channel { + channel = "RAPID" + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + machine_type = "n2-standard-2" + enable_confidential_storage = true + disk_type = "hyperdisk-balanced" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, kmsKeyName, networkName, subnetworkName) +} + +func TestAccContainerCluster_withoutConfidentialBootDisk(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + npName := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_withoutConfidentialBootDisk(clusterName, npName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.without_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} +func testAccContainerCluster_withoutConfidentialBootDisk(clusterName, npName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "without_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + release_channel { + channel = "RAPID" + } + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + machine_type = "n2-standard-2" + enable_confidential_storage = false + disk_type = "pd-balanced" + } + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, clusterName, npName, networkName, subnetworkName) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccContainerCluster_withWorkloadALTSConfig(projectID, name, networkName, subnetworkName string, enable bool) string { + return fmt.Sprintf(` + data "google_project" "project" { + provider = google-beta + project_id = "%s" + } + resource "google_compute_network" "network" { + provider = google-beta + name = "%s" + auto_create_subnetworks = false + enable_ula_internal_ipv6 = true + } + resource "google_compute_subnetwork" "subnet" { + provider = google-beta + name = "%s" + network = google_compute_network.network.id + ip_cidr_range = "9.12.22.0/24" + region = "us-central1" + } + resource "google_container_cluster" "with_workload_alts_config" { + provider = google-beta + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + network = google_compute_network.network.name + subnetwork = google_compute_subnetwork.subnet.name + workload_alts_config { + enable_alts = %v + } + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + deletion_protection = false + } +`, projectID, networkName, subnetworkName, name, enable) +} + +func testAccContainerCluster_withWorkloadALTSConfigAutopilot(projectID, name string, enable bool) string { + return fmt.Sprintf(` + data "google_project" "project" { + provider = google-beta + project_id = "%s" + } + resource "google_container_cluster" "with_workload_alts_config" { + provider = google-beta + name = "%s" + location = "us-central1" + initial_node_count = 1 + workload_alts_config { + enable_alts = %v + } + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + enable_autopilot = true + deletion_protection = false + } +`, projectID, name, enable) +} + +{{ end }} + +func testAccContainerCluster_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key" { + parent = "projects/%[1]s" + short_name = "foobarbaz-%[2]s" + description = "For foo/bar resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value" { + parent = "tagKeys/${google_tags_tag_key.key.name}" + short_name = "foo-%[2]s" + description = "For foo resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key.name}" = "tagValues/${google_tags_tag_value.value.name}" + } + } + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withAutopilotResourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_compute_network.container_network] +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [ + google_compute_network.container_network, + google_tags_tag_key.key1 + ] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +resource "google_compute_network" "container_network" { + name = "%[4]s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "%[5]s" + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autopilot" { + name = "%[3]s" + location = "us-central1" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + enable_autopilot = true + + deletion_protection = false + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + node_pool_auto_config { + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key1.name}" = "tagValues/${google_tags_tag_value.value1.name}" + } + } + + addons_config { + horizontal_pod_autoscaling { + disabled = false + } + } + vertical_pod_autoscaling { + enabled = true + } + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withAutopilotResourceManagerTagsUpdate1(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_compute_network.container_network] +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [ + google_compute_network.container_network, + google_tags_tag_key.key1 + ] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +resource "google_compute_network" "container_network" { + name = "%[4]s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "%[5]s" + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autopilot" { + name = "%[3]s" + location = "us-central1" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + enable_autopilot = true + + deletion_protection = false + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + node_pool_auto_config { + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key1.name}" = "tagValues/${google_tags_tag_value.value1.name}" + "tagKeys/${google_tags_tag_key.key2.name}" = "tagValues/${google_tags_tag_value.value2.name}" + } + } + + addons_config { + horizontal_pod_autoscaling { + disabled = false + } + } + vertical_pod_autoscaling { + enabled = true + } + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withAutopilotResourceManagerTagsUpdate2(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_compute_network.container_network] +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [ + google_compute_network.container_network, + google_tags_tag_key.key1 + ] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +resource "google_compute_network" "container_network" { + name = "%[4]s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "%[5]s" + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "with_autopilot" { + name = "%[3]s" + location = "us-central1" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + enable_autopilot = true + + deletion_protection = false + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + addons_config { + horizontal_pod_autoscaling { + disabled = false + } + } + vertical_pod_autoscaling { + enabled = true + } + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func TestAccContainerCluster_privateRegistry(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePoolName := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + secretID := fmt.Sprintf("tf-test-secret-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerCluster_privateRegistryEnabled(secretID, clusterName, networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.private_registry_access_config.0.enabled", + "true", + ), + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.#", + "2", + ), + // First CA config + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.0.fqdns.0", + "my.custom.domain", + ), + // Second CA config + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.1.fqdns.0", + "10.1.2.32", + ), + ), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_privateRegistryDisabled(clusterName, networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.private_registry_access_config.0.enabled", + "false", + ), + resource.TestCheckResourceAttr( + "google_container_cluster.primary", + "node_pool_defaults.0.node_config_defaults.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.#", + "0", + ), + ), + }, + { + Config: testAccContainerCluster_withNodePoolPrivateRegistry(secretID, clusterName, nodePoolName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + { + Config: testAccContainerCluster_withNodeConfigPrivateRegistry(secretID, clusterName, networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerCluster_privateRegistryEnabled(secretID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "test_project" { + } + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" + } + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] + } + +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + + node_pool_defaults { + node_config_defaults { + containerd_config { + private_registry_access_config { + enabled = true + certificate_authority_domain_config { + fqdns = [ "my.custom.domain" ] + gcp_secret_manager_certificate_config { + secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + certificate_authority_domain_config { + fqdns = [ "10.1.2.32" ] + gcp_secret_manager_certificate_config { + secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + } + } + } + } +} +`, secretID, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_privateRegistryDisabled(clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + + node_pool_defaults { + node_config_defaults { + containerd_config { + private_registry_access_config { + enabled = false + } + } + } + } +} +`, clusterName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodePoolPrivateRegistry(secretID, clusterName, nodePoolName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "test_project" { + } + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" + } + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] + } +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + + node_pool { + name = "%s" + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + containerd_config { + private_registry_access_config { + enabled = true + certificate_authority_domain_config { + fqdns = [ "my.custom.domain", "10.0.0.127:8888" ] + gcp_secret_manager_certificate_config { + secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + } + } +} +} + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, secretID, clusterName, nodePoolName, networkName, subnetworkName) +} + +func testAccContainerCluster_withNodeConfigPrivateRegistry(secretID, clusterName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "test_project" { + } + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" + } + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] + } +resource "google_container_cluster" "primary" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + containerd_config { + private_registry_access_config { + enabled = true + certificate_authority_domain_config { + fqdns = [ "my.custom.domain", "10.0.0.127:8888" ] + gcp_secret_manager_certificate_config { + secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + } + } +} + deletion_protection = false + network = "%s" + subnetwork = "%s" +} +`, secretID, clusterName, networkName, subnetworkName) +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool.go.tmpl new file mode 100644 index 000000000000..b729954623c3 --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool.go.tmpl @@ -0,0 +1,2176 @@ +package container + +import ( + "fmt" + "log" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/container/v1" +{{- else }} + container "google.golang.org/api/container/v1beta1" +{{- end }} +) + +var clusterIdRegex = regexp.MustCompile("projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)") + +func ResourceContainerNodePool() *schema.Resource { + return &schema.Resource{ + Create: resourceContainerNodePoolCreate, + Read: resourceContainerNodePoolRead, + Update: resourceContainerNodePoolUpdate, + Delete: resourceContainerNodePoolDelete, + Exists: resourceContainerNodePoolExists, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + SchemaVersion: 1, + MigrateState: resourceContainerNodePoolMigrateState, + + Importer: &schema.ResourceImporter{ + State: resourceContainerNodePoolStateImporter, + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + resourceNodeConfigEmptyGuestAccelerator, + ), + + UseJSONNumber: true, + + Schema: tpgresource.MergeSchemas( + schemaNodePool, + map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which to create the node pool. If blank, the provider-configured project will be used.`, + }, + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The cluster to create the node pool for. Cluster must be present in location provided for zonal clusters.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The location (region or zone) of the cluster.`, + }, + "operation": { + Type: schema.TypeString, + Computed: true, + }, + }), + } +} + +var schemaBlueGreenSettings = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "standard_rollout_policy": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `Standard rollout policy is the default policy for blue-green.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "batch_percentage": { + Type: schema.TypeFloat, + Optional: true, + Computed: true, + Description: `Percentage of the blue pool nodes to drain in a batch.`, + ValidateFunc: validation.FloatBetween(0.0, 1.0), + }, + "batch_node_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + Description: `Number of blue nodes to drain in a batch.`, + }, + "batch_soak_duration": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Soak time after each batch gets drained.`, + }, + }, + }, + }, + "node_pool_soak_duration": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Time needed after draining entire blue pool. After this period, blue pool will be cleaned up.`, + }, + }, + }, + Description: `Settings for BlueGreen node pool upgrade.`, +} + +var schemaNodePool = map[string]*schema.Schema{ + "autoscaling": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Configuration required by cluster autoscaler to adjust the size of the node pool to the current cluster usage.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Minimum number of nodes per zone in the node pool. Must be >=0 and <= max_node_count. Cannot be used with total limits.`, + }, + + "max_node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of nodes per zone in the node pool. Must be >= min_node_count. Cannot be used with total limits.`, + }, + + "total_min_node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Minimum number of all nodes in the node pool. Must be >=0 and <= total_max_node_count. Cannot be used with per zone limits.`, + }, + + "total_max_node_count": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `Maximum number of all nodes in the node pool. Must be >= total_min_node_count. Cannot be used with per zone limits.`, + }, + + "location_policy": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"BALANCED", "ANY"}, false), + Description: `Location policy specifies the algorithm used when scaling-up the node pool. "BALANCED" - Is a best effort policy that aims to balance the sizes of available zones. "ANY" - Instructs the cluster autoscaler to prioritize utilization of unused reservations, and reduces preemption risk for Spot VMs.`, + }, + }, + }, + }, + + "placement_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies the node placement policy`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + Description: `Type defines the type of placement policy`, + }, + "policy_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `If set, refers to the name of a custom resource policy supplied by the user. The resource policy must be in the same project and region as the node pool. If not found, InvalidArgument error is returned.`, + }, + "tpu_topology": { + Type: schema.TypeString, + Optional: true, + Description: `TPU placement topology for pod slice node pool. https://cloud.google.com/tpu/docs/types-topologies#tpu_topologies`, + }, + }, + }, + }, + + "queued_provisioning": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Specifies the configuration of queued provisioning`, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + ForceNew: true, + Description: `Whether nodes in this node pool are obtainable solely through the ProvisioningRequest API`, + }, + }, + }, + }, + + "max_pods_per_node": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The maximum number of pods per node in this node pool. Note that this does not work on node pools which are "route-based" - that is, node pools belonging to clusters that do not have IP Aliasing enabled.`, + }, + + "node_locations": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The list of zones in which the node pool's nodes should be located. Nodes must be in the region of their regional cluster or in the same region as their cluster's zone for zonal clusters. If unspecified, the cluster-level node_locations will be used.`, + }, + + "upgrade_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Specify node upgrade settings to change how many nodes GKE attempts to upgrade at once. The number of nodes upgraded simultaneously is the sum of max_surge and max_unavailable. The maximum number of nodes upgraded simultaneously is limited to 20.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_surge": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of additional nodes that can be added to the node pool during an upgrade. Increasing max_surge raises the number of nodes that can be upgraded simultaneously. Can be set to 0 or greater.`, + }, + + "max_unavailable": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of nodes that can be simultaneously unavailable during an upgrade. Increasing max_unavailable raises the number of nodes that can be upgraded in parallel. Can be set to 0 or greater.`, + }, + + "strategy": { + Type: schema.TypeString, + Optional: true, + Default: "SURGE", + ValidateFunc: validation.StringInSlice([]string{"SURGE", "BLUE_GREEN"}, false), + Description: `Update strategy for the given nodepool.`, + }, + + "blue_green_settings": schemaBlueGreenSettings, + }, + }, + }, + + "initial_node_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The initial number of nodes for the pool. In regional or multi-zonal clusters, this is the number of nodes per zone. Changing this will force recreation of the resource.`, + }, + + "instance_group_urls": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `The resource URLs of the managed instance groups associated with this node pool.`, + }, + + "managed_instance_group_urls": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `List of instance group URLs which have been assigned to this node pool.`, + }, + + "management": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Node management configuration, wherein auto-repair and auto-upgrade is configured.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_repair": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Whether the nodes will be automatically repaired. Enabled by default.`, + }, + + "auto_upgrade": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Whether the nodes will be automatically upgraded. Enabled by default.`, + }, + }, + }, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of the node pool. If left blank, Terraform will auto-generate a unique name.`, + }, + + "name_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Creates a unique name for the node pool beginning with the specified prefix. Conflicts with name.`, + }, + + "node_config": schemaNodeConfig(), + + "node_count": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntAtLeast(0), + Description: `The number of nodes per instance group. This field can be used to update the number of nodes per instance group but should not be used alongside autoscaling.`, + }, + + "version": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Kubernetes version for the nodes in this pool. Note that if this field and auto_upgrade are both specified, they will fight each other for what the node version should be, so setting both is highly discouraged. While a fuzzy version can be specified, it's recommended that you specify explicit versions as Terraform will see spurious diffs when fuzzy versions are used. See the google_container_engine_versions data source's version_prefix field to approximate fuzzy versions in a Terraform-compatible way.`, + }, + + "network_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `Networking configuration for this NodePool. If specified, it overrides the cluster-level defaults.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "create_pod_range": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to create a new range for pod IPs in this node pool. Defaults are provided for pod_range and pod_ipv4_cidr_block if they are not specified.`, + }, + "enable_private_nodes": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Whether nodes have internal IP addresses only.`, + }, + "pod_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The ID of the secondary range for pod IPs. If create_pod_range is true, this ID is used for the new range. If create_pod_range is false, uses an existing secondary range with this ID.`, + }, + "pod_ipv4_cidr_block": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: verify.ValidateIpCidrRange, + Description: `The IP address range for pod IPs in this node pool. Only applicable if create_pod_range is true. Set to blank to have a range chosen with the default size. Set to /netmask (e.g. /14) to have a range chosen with a specific netmask. Set to a CIDR notation (e.g. 10.96.0.0/14) to pick a specific range to use.`, + }, +{{- if ne $.TargetVersionName "ga" }} + "additional_node_network_configs": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `We specify the additional node networks for this node pool using this list. Each node network corresponds to an additional interface`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the VPC where the additional interface belongs.`, + }, + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the subnetwork where the additional interface belongs.`, + }, + }, + }, + }, + "additional_pod_network_configs": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `We specify the additional pod networks for this node pool using this list. Each pod network corresponds to an additional alias IP range for the node`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `Name of the subnetwork where the additional pod network belongs.`, + }, + "secondary_pod_range": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of the secondary range on the subnet which provides IP address for this pod range.`, + }, + "max_pods_per_node": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The maximum number of pods per node which use this pod network.`, + }, + }, + }, + }, +{{- end }} + "pod_cidr_overprovision_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + ForceNew: true, + MaxItems: 1, + Description: `Configuration for node-pool level pod cidr overprovision. If not set, the cluster level setting will be inherited`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "disabled": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, + "network_performance_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Network bandwidth tier configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_egress_bandwidth_tier": { + Type: schema.TypeString, + Required: true, + Description: `Specifies the total network bandwidth tier for the NodePool.`, + }, + }, + }, + }, + }, + }, + }, + +} + +type NodePoolInformation struct { + project string + location string + cluster string +} + +func (nodePoolInformation *NodePoolInformation) fullyQualifiedName(nodeName string) string { + return fmt.Sprintf( + "projects/%s/locations/%s/clusters/%s/nodePools/%s", + nodePoolInformation.project, + nodePoolInformation.location, + nodePoolInformation.cluster, + nodeName, + ) +} + +func (nodePoolInformation *NodePoolInformation) parent() string { + return fmt.Sprintf( + "projects/%s/locations/%s/clusters/%s", + nodePoolInformation.project, + nodePoolInformation.location, + nodePoolInformation.cluster, + ) +} + +func (nodePoolInformation *NodePoolInformation) clusterLockKey() string { + return containerClusterMutexKey(nodePoolInformation.project, + nodePoolInformation.location, nodePoolInformation.cluster) +} + +func (nodePoolInformation *NodePoolInformation) nodePoolLockKey(nodePoolName string) string { + return fmt.Sprintf( + "projects/%s/locations/%s/clusters/%s/nodePools/%s", + nodePoolInformation.project, + nodePoolInformation.location, + nodePoolInformation.cluster, + nodePoolName, + ) +} + +func extractNodePoolInformation(d *schema.ResourceData, config *transport_tpg.Config) (*NodePoolInformation, error) { + cluster := d.Get("cluster").(string) + + if fieldValues := clusterIdRegex.FindStringSubmatch(cluster); fieldValues != nil { + log.Printf("[DEBUG] matching parent cluster %s to regex %s", cluster, clusterIdRegex.String()) + return &NodePoolInformation{ + project: fieldValues[1], + location: fieldValues[2], + cluster: fieldValues[3], + }, nil + } + log.Printf("[DEBUG] parent cluster %s does not match regex %s", cluster, clusterIdRegex.String()) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + location, err := tpgresource.GetLocation(d, config) + if err != nil { + return nil, err + } + + return &NodePoolInformation{ + project: project, + location: location, + cluster: cluster, + }, nil +} + +func resourceContainerNodePoolCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + + nodePool, err := expandNodePool(d, "") + if err != nil { + return err + } + + // Acquire read-lock on cluster. + clusterLockKey := nodePoolInfo.clusterLockKey() + transport_tpg.MutexStore.RLock(clusterLockKey) + defer transport_tpg.MutexStore.RUnlock(clusterLockKey) + + // Acquire write-lock on nodepool. + npLockKey := nodePoolInfo.nodePoolLockKey(nodePool.Name) + transport_tpg.MutexStore.Lock(npLockKey) + defer transport_tpg.MutexStore.Unlock(npLockKey) + + req := &container.CreateNodePoolRequest{ + NodePool: nodePool, + } + + timeout := d.Timeout(schema.TimeoutCreate) + startTime := time.Now() + + // we attempt to prefetch the node pool to make sure it doesn't exist before creation + var id = fmt.Sprintf("projects/%s/locations/%s/clusters/%s/nodePools/%s", nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, nodePool.Name) + name := getNodePoolName(id) + clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + _, err = clusterNodePoolsGetCall.Do() + if err != nil && transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + // Set the ID before we attempt to create if the resource doesn't exist. That + // way, if we receive an error but the resource is created anyway, it will be + // refreshed on the next call to apply. + d.SetId(id) + } else if err == nil { + return fmt.Errorf("resource - %s - already exists", id) + } + + var operation *container.Operation + err = retry.Retry(timeout, func() *retry.RetryError { + clusterNodePoolsCreateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Create(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterNodePoolsCreateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + operation, err = clusterNodePoolsCreateCall.Do() + + if err != nil { + if tpgresource.IsFailedPreconditionError(err) || tpgresource.IsQuotaError(err) { + // We get failed precondition errors if the cluster is updating + // while we try to add the node pool. + // We get quota errors if there the number of running concurrent + // operations reaches the quota. + return retry.RetryableError(err) + } + return retry.NonRetryableError(err) + } + return nil + }) + if err != nil { + return fmt.Errorf("error creating NodePool: %s", err) + } + timeout -= time.Since(startTime) + + waitErr := ContainerOperationWait(config, + operation, nodePoolInfo.project, + nodePoolInfo.location, "creating GKE NodePool", userAgent, timeout) + + if waitErr != nil { + // Check if the create operation failed because Terraform was prematurely terminated. If it was we can persist the + // operation id to state so that a subsequent refresh of this resource will wait until the operation has terminated + // before attempting to Read the state of the cluster. This allows a graceful resumption of a Create that was killed + // by the upstream Terraform process exiting early such as a sigterm. + select { + case <-config.Context.Done(): + log.Printf("[DEBUG] Persisting %s so this operation can be resumed \n", operation.Name) + if err := d.Set("operation", operation.Name); err != nil { + return fmt.Errorf("Error setting operation: %s", err) + } + return nil + default: + // leaving default case to ensure this is non blocking + } + // Check if resource was created but apply timed out. + // Common cause for that is GCE_STOCKOUT which will wait for resources and return error after timeout, + // but in fact nodepool will be created so we have to capture that in state. + _, err = clusterNodePoolsGetCall.Do() + if err != nil { + d.SetId("") + return waitErr + } + } + + log.Printf("[INFO] GKE NodePool %s has been created", nodePool.Name) + + if err = resourceContainerNodePoolRead(d, meta); err != nil { + return err + } + + state, err := containerNodePoolAwaitRestingState(config, d.Id(), nodePoolInfo.project, userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + if containerNodePoolRestingStates[state] == ErrorState { + return fmt.Errorf("NodePool %s was created in the error state %q", nodePool.Name, state) + } + + return nil +} + +func resourceContainerNodePoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + + operation := d.Get("operation").(string) + if operation != "" { + log.Printf("[DEBUG] in progress operation detected at %v, attempting to resume", operation) + op := &container.Operation{ + Name: operation, + } + if err := d.Set("operation", ""); err != nil { + return fmt.Errorf("Error setting operation: %s", err) + } + waitErr := ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "resuming GKE node pool", userAgent, d.Timeout(schema.TimeoutRead)) + if waitErr != nil { + return waitErr + } + } + + name := getNodePoolName(d.Id()) + + clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + nodePool, err := clusterNodePoolsGetCall.Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("NodePool %q from cluster %q", name, nodePoolInfo.cluster)) + } + + npMap, err := flattenNodePool(d, config, nodePool, "") + if err != nil { + return err + } + + for k, v := range npMap { + if err := d.Set(k, v); err != nil { + return fmt.Errorf("Error setting %s: %s", k, err) + } + } + + if err := d.Set("location", nodePoolInfo.location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("project", nodePoolInfo.project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return nil +} + +func resourceContainerNodePoolUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + name := getNodePoolName(d.Id()) + + _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + d.Partial(true) + if err := nodePoolUpdate(d, meta, nodePoolInfo, "", d.Timeout(schema.TimeoutUpdate)); err != nil { + return err + } + d.Partial(false) + + //Check cluster is in running state + _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return resourceContainerNodePoolRead(d, meta) +} + +func resourceContainerNodePoolDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return err + } + + name := getNodePoolName(d.Id()) + + _, err = containerNodePoolAwaitRestingState(config, nodePoolInfo.fullyQualifiedName(name), nodePoolInfo.project, userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + // If the node pool doesn't get created and then we try to delete it, we get an error, + // but I don't think we need an error during delete if it doesn't exist + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + log.Printf("node pool %q not found, doesn't need to be cleaned up", name) + return nil + } else { + return err + } + } + + // Acquire read-lock on cluster. + clusterLockKey := nodePoolInfo.clusterLockKey() + transport_tpg.MutexStore.RLock(clusterLockKey) + defer transport_tpg.MutexStore.RUnlock(clusterLockKey) + + // Acquire write-lock on nodepool. + npLockKey := nodePoolInfo.nodePoolLockKey(name) + transport_tpg.MutexStore.Lock(npLockKey) + defer transport_tpg.MutexStore.Unlock(npLockKey) + + timeout := d.Timeout(schema.TimeoutDelete) + startTime := time.Now() + + var operation *container.Operation + err = retry.Retry(timeout, func() *retry.RetryError { + clusterNodePoolsDeleteCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Delete(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsDeleteCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + operation, err = clusterNodePoolsDeleteCall.Do() + + if err != nil { + if tpgresource.IsFailedPreconditionError(err) || tpgresource.IsQuotaError(err) { + // We get failed precondition errors if the cluster is updating + // while we try to delete the node pool. + // We get quota errors if there the number of running concurrent + // operations reaches the quota. + return retry.RetryableError(err) + } + return retry.NonRetryableError(err) + } + + return nil + }) + + if err != nil { + return fmt.Errorf("Error deleting NodePool: %s", err) + } + + timeout -= time.Since(startTime) + + // Wait until it's deleted + waitErr := ContainerOperationWait(config, operation, nodePoolInfo.project, nodePoolInfo.location, "deleting GKE NodePool", userAgent, timeout) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] GKE NodePool %s has been deleted", d.Id()) + + d.SetId("") + + return nil +} + +func resourceContainerNodePoolExists(d *schema.ResourceData, meta interface{}) (bool, error) { + config := meta.(*transport_tpg.Config) + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return false, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return false, err + } + + name := getNodePoolName(d.Id()) + clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(nodePoolInfo.fullyQualifiedName(name)) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + _, err = clusterNodePoolsGetCall.Do() + + if err != nil { + if err = transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Container NodePool %s", name)); err == nil { + return false, nil + } + // There was some other error in reading the resource + return true, err + } + return true, nil +} + +func resourceContainerNodePoolStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/locations/(?P[^/]+)/clusters/(?P[^/]+)/nodePools/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/clusters/{{"{{"}}cluster{{"}}"}}/nodePools/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, err + } + + d.SetId(id) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + nodePoolInfo, err := extractNodePoolInformation(d, config) + if err != nil { + return nil, err + } + + //Check cluster is in running state + _, err = containerClusterAwaitRestingState(config, nodePoolInfo.project, nodePoolInfo.location, nodePoolInfo.cluster, userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return nil, err + } + + if _, err := containerNodePoolAwaitRestingState(config, d.Id(), project, userAgent, d.Timeout(schema.TimeoutCreate)); err != nil { + return nil, err + } + + return []*schema.ResourceData{d}, nil +} + +func expandNodePool(d *schema.ResourceData, prefix string) (*container.NodePool, error) { + var name string + if v, ok := d.GetOk(prefix + "name"); ok { + if _, ok := d.GetOk(prefix + "name_prefix"); ok { + return nil, fmt.Errorf("Cannot specify both name and name_prefix for a node_pool") + } + name = v.(string) + } else if v, ok := d.GetOk(prefix + "name_prefix"); ok { + name = id.PrefixedUniqueId(v.(string)) + } else { + name = id.UniqueId() + } + + nodeCount := 0 + if initialNodeCount, ok := d.GetOk(prefix + "initial_node_count"); ok { + nodeCount = initialNodeCount.(int) + } + if nc, ok := d.GetOk(prefix + "node_count"); ok { + if nodeCount != 0 { + return nil, fmt.Errorf("Cannot set both initial_node_count and node_count on node pool %s", name) + } + nodeCount = nc.(int) + } + + var locations []string + if v, ok := d.GetOk("node_locations"); ok && v.(*schema.Set).Len() > 0 { + locations = tpgresource.ConvertStringSet(v.(*schema.Set)) + } + + np := &container.NodePool{ + Name: name, + InitialNodeCount: int64(nodeCount), + Config: expandNodeConfig(d.Get(prefix + "node_config")), + Locations: locations, + Version: d.Get(prefix + "version").(string), + NetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), + } + + if v, ok := d.GetOk(prefix + "autoscaling"); ok { + if autoscaling, ok := v.([]interface{})[0].(map[string]interface{}); ok { + np.Autoscaling = &container.NodePoolAutoscaling{ + Enabled: true, + MinNodeCount: int64(autoscaling["min_node_count"].(int)), + MaxNodeCount: int64(autoscaling["max_node_count"].(int)), + TotalMinNodeCount: int64(autoscaling["total_min_node_count"].(int)), + TotalMaxNodeCount: int64(autoscaling["total_max_node_count"].(int)), + LocationPolicy: autoscaling["location_policy"].(string), + ForceSendFields: []string{"MinNodeCount", "MaxNodeCount", "TotalMinNodeCount", "TotalMaxNodeCount"}, + } + } + } + + if v, ok := d.GetOk(prefix + "placement_policy"); ok { + if v.([]interface{}) != nil && v.([]interface{})[0] != nil { + placement_policy := v.([]interface{})[0].(map[string]interface{}) + np.PlacementPolicy = &container.PlacementPolicy{ + Type: placement_policy["type"].(string), + PolicyName: placement_policy["policy_name"].(string), + TpuTopology: placement_policy["tpu_topology"].(string), + } + } + } + + if v, ok := d.GetOk(prefix + "queued_provisioning"); ok { + if v.([]interface{}) != nil && v.([]interface{})[0] != nil { + queued_provisioning := v.([]interface{})[0].(map[string]interface{}) + np.QueuedProvisioning = &container.QueuedProvisioning{ + Enabled: queued_provisioning["enabled"].(bool), + } + } + } + + if v, ok := d.GetOk(prefix + "max_pods_per_node"); ok { + np.MaxPodsConstraint = &container.MaxPodsConstraint{ + MaxPodsPerNode: int64(v.(int)), + } + } + + if v, ok := d.GetOk(prefix + "management"); ok { + managementConfig := v.([]interface{})[0].(map[string]interface{}) + np.Management = &container.NodeManagement{} + + if v, ok := managementConfig["auto_repair"]; ok { + np.Management.AutoRepair = v.(bool) + } + + if v, ok := managementConfig["auto_upgrade"]; ok { + np.Management.AutoUpgrade = v.(bool) + } + } + + if v, ok := d.GetOk(prefix + "upgrade_settings"); ok { + upgradeSettingsConfig := v.([]interface{})[0].(map[string]interface{}) + np.UpgradeSettings = &container.UpgradeSettings{} + + if v, ok := upgradeSettingsConfig["strategy"]; ok { + np.UpgradeSettings.Strategy = v.(string) + } + + if d.HasChange(prefix + "upgrade_settings.0.max_surge") { + if np.UpgradeSettings.Strategy != "SURGE" { + return nil, fmt.Errorf("Surge upgrade settings may not be changed when surge strategy is not enabled") + } + if v, ok := upgradeSettingsConfig["max_surge"]; ok { + np.UpgradeSettings.MaxSurge = int64(v.(int)) + } + } + + if d.HasChange(prefix + "upgrade_settings.0.max_unavailable") { + if np.UpgradeSettings.Strategy != "SURGE" { + return nil, fmt.Errorf("Surge upgrade settings may not be changed when surge strategy is not enabled") + } + if v, ok := upgradeSettingsConfig["max_unavailable"]; ok { + np.UpgradeSettings.MaxUnavailable = int64(v.(int)) + } + } + + if v, ok := upgradeSettingsConfig["blue_green_settings"]; ok && len(v.([]interface{})) > 0 { + blueGreenSettingsConfig := v.([]interface{})[0].(map[string]interface{}) + np.UpgradeSettings.BlueGreenSettings = &container.BlueGreenSettings{} + + if np.UpgradeSettings.Strategy != "BLUE_GREEN" { + return nil, fmt.Errorf("Blue-green upgrade settings may not be changed when blue-green strategy is not enabled") + } + + if v, ok := blueGreenSettingsConfig["node_pool_soak_duration"]; ok { + np.UpgradeSettings.BlueGreenSettings.NodePoolSoakDuration = v.(string) + } + + if v, ok := blueGreenSettingsConfig["standard_rollout_policy"]; ok && len(v.([]interface{})) > 0 { + standardRolloutPolicyConfig := v.([]interface{})[0].(map[string]interface{}) + standardRolloutPolicy := &container.StandardRolloutPolicy{} + + if v, ok := standardRolloutPolicyConfig["batch_soak_duration"]; ok { + standardRolloutPolicy.BatchSoakDuration = v.(string) + } + if v, ok := standardRolloutPolicyConfig["batch_node_count"]; ok { + standardRolloutPolicy.BatchNodeCount = int64(v.(int)) + } + if v, ok := standardRolloutPolicyConfig["batch_percentage"]; ok { + standardRolloutPolicy.BatchPercentage = v.(float64) + } + + np.UpgradeSettings.BlueGreenSettings.StandardRolloutPolicy = standardRolloutPolicy + } + } + } + + return np, nil +} + +func flattenNodePoolStandardRolloutPolicy(rp *container.StandardRolloutPolicy) []map[string]interface{} { + if rp == nil { + return nil + } + + return []map[string]interface{}{ + { + "batch_node_count": rp.BatchNodeCount, + "batch_percentage": rp.BatchPercentage, + "batch_soak_duration": rp.BatchSoakDuration, + }, + } +} + +func flattenNodePoolBlueGreenSettings(bg *container.BlueGreenSettings) []map[string]interface{} { + if bg == nil { + return nil + } + return []map[string]interface{}{ + { + "node_pool_soak_duration": bg.NodePoolSoakDuration, + "standard_rollout_policy": flattenNodePoolStandardRolloutPolicy(bg.StandardRolloutPolicy), + }, + } +} + +func flattenNodePoolUpgradeSettings(us *container.UpgradeSettings) []map[string]interface{} { + if us == nil { + return nil + } + + upgradeSettings := make(map[string]interface{}) + + upgradeSettings["blue_green_settings"] = flattenNodePoolBlueGreenSettings(us.BlueGreenSettings) + upgradeSettings["max_surge"] = us.MaxSurge + upgradeSettings["max_unavailable"] = us.MaxUnavailable + + upgradeSettings["strategy"] = us.Strategy + return []map[string]interface{}{upgradeSettings} +} + +func flattenNodePool(d *schema.ResourceData, config *transport_tpg.Config, np *container.NodePool, prefix string) (map[string]interface{}, error) { + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + // Node pools don't expose the current node count in their API, so read the + // instance groups instead. They should all have the same size, but in case a resize + // failed or something else strange happened, we'll just use the average size. + size := 0 + igmUrls := []string{} + managedIgmUrls := []string{} + for _, url := range np.InstanceGroupUrls { + // retrieve instance group manager (InstanceGroupUrls are actually URLs for InstanceGroupManagers) + matches := instanceGroupManagerURL.FindStringSubmatch(url) + if len(matches) < 4 { + return nil, fmt.Errorf("Error reading instance group manage URL '%q'", url) + } + igm, err := config.NewComputeClient(userAgent).InstanceGroupManagers.Get(matches[1], matches[2], matches[3]).Do() + if transport_tpg.IsGoogleApiErrorWithCode(err, 404) { + // The IGM URL in is stale; don't include it + continue + } + if err != nil { + return nil, fmt.Errorf("Error reading instance group manager returned as an instance group URL: %q", err) + } + size += int(igm.TargetSize) + igmUrls = append(igmUrls, url) + managedIgmUrls = append(managedIgmUrls, igm.InstanceGroup) + } + nodeCount := 0 + if len(igmUrls) > 0 { + nodeCount = size / len(igmUrls) + } + nodePool := map[string]interface{}{ + "name": np.Name, + "name_prefix": d.Get(prefix + "name_prefix"), + "initial_node_count": np.InitialNodeCount, + "node_locations": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(np.Locations)), + "node_count": nodeCount, + "node_config": flattenNodeConfig(np.Config, d.Get(prefix + "node_config")), + "instance_group_urls": igmUrls, + "managed_instance_group_urls": managedIgmUrls, + "version": np.Version, + "network_config": flattenNodeNetworkConfig(np.NetworkConfig, d, prefix), + } + + if np.Autoscaling != nil { + if np.Autoscaling.Enabled { + nodePool["autoscaling"] = []map[string]interface{}{ + { + "min_node_count": np.Autoscaling.MinNodeCount, + "max_node_count": np.Autoscaling.MaxNodeCount, + "total_min_node_count": np.Autoscaling.TotalMinNodeCount, + "total_max_node_count": np.Autoscaling.TotalMaxNodeCount, + "location_policy": np.Autoscaling.LocationPolicy, + }, + } + } else { + nodePool["autoscaling"] = []map[string]interface{}{} + } + } + + if np.PlacementPolicy != nil { + nodePool["placement_policy"] = []map[string]interface{}{ + { + "type": np.PlacementPolicy.Type, + "policy_name": np.PlacementPolicy.PolicyName, + "tpu_topology": np.PlacementPolicy.TpuTopology, + }, + } + } + + if np.QueuedProvisioning != nil { + nodePool["queued_provisioning"] = []map[string]interface{}{ + { + "enabled": np.QueuedProvisioning.Enabled, + }, + } + } + + if np.MaxPodsConstraint != nil { + nodePool["max_pods_per_node"] = np.MaxPodsConstraint.MaxPodsPerNode + } + + nodePool["management"] = []map[string]interface{}{ + { + "auto_repair": np.Management.AutoRepair, + "auto_upgrade": np.Management.AutoUpgrade, + }, + } + + if np.UpgradeSettings != nil { + nodePool["upgrade_settings"] = flattenNodePoolUpgradeSettings(np.UpgradeSettings) + } else { + delete(nodePool, "upgrade_settings") + } + + return nodePool, nil +} + +func flattenNodeNetworkConfig(c *container.NodeNetworkConfig, d *schema.ResourceData, prefix string) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "create_pod_range": d.Get(prefix + "network_config.0.create_pod_range"), // API doesn't return this value so we set the old one. Field is ForceNew + Required + "pod_ipv4_cidr_block": c.PodIpv4CidrBlock, + "pod_range": c.PodRange, + "enable_private_nodes": c.EnablePrivateNodes, + "pod_cidr_overprovision_config": flattenPodCidrOverprovisionConfig(c.PodCidrOverprovisionConfig), + "network_performance_config": flattenNodeNetworkPerformanceConfig(c.NetworkPerformanceConfig), +{{- if ne $.TargetVersionName "ga" }} + "additional_node_network_configs": flattenAdditionalNodeNetworkConfig(c.AdditionalNodeNetworkConfigs), + "additional_pod_network_configs": flattenAdditionalPodNetworkConfig(c.AdditionalPodNetworkConfigs), +{{- end }} + }) + } + return result +} + +func flattenNodeNetworkPerformanceConfig(c *container.NetworkPerformanceConfig) []map[string]interface{} { + result := []map[string]interface{}{} + if c != nil { + result = append(result, map[string]interface{}{ + "total_egress_bandwidth_tier": c.TotalEgressBandwidthTier, + }) + } + return result +} + +{{ if ne $.TargetVersionName `ga` -}} +func flattenAdditionalNodeNetworkConfig(c []*container.AdditionalNodeNetworkConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := []map[string]interface{}{} + for _, nodeNetworkConfig := range c { + result = append(result, map[string]interface{}{ + "network": nodeNetworkConfig.Network, + "subnetwork": nodeNetworkConfig.Subnetwork, + }) + } + return result +} + +func flattenAdditionalPodNetworkConfig(c []*container.AdditionalPodNetworkConfig) []map[string]interface{} { + if c == nil { + return nil + } + + result := []map[string]interface{}{} + for _, podNetworkConfig := range c { + result = append(result, map[string]interface{}{ + "subnetwork": podNetworkConfig.Subnetwork, + "secondary_pod_range": podNetworkConfig.SecondaryPodRange, + "max_pods_per_node": podNetworkConfig.MaxPodsPerNode.MaxPodsPerNode, + }) + } + return result +} +{{- end }} + +func expandNodeNetworkConfig(v interface{}) *container.NodeNetworkConfig { + networkNodeConfigs := v.([]interface{}) + + nnc := &container.NodeNetworkConfig{} + + if len(networkNodeConfigs) == 0 { + return nnc + } + + networkNodeConfig := networkNodeConfigs[0].(map[string]interface{}) + + if v, ok := networkNodeConfig["create_pod_range"]; ok { + nnc.CreatePodRange = v.(bool) + } + + if v, ok := networkNodeConfig["pod_range"]; ok { + nnc.PodRange = v.(string) + } + + if v, ok := networkNodeConfig["pod_ipv4_cidr_block"]; ok { + nnc.PodIpv4CidrBlock = v.(string) + } + + if v, ok := networkNodeConfig["enable_private_nodes"]; ok { + nnc.EnablePrivateNodes = v.(bool) + nnc.ForceSendFields = []string{"EnablePrivateNodes"} + } + +{{ if ne $.TargetVersionName `ga` -}} + if v, ok := networkNodeConfig["additional_node_network_configs"]; ok && len(v.([]interface{})) > 0 { + node_network_configs := v.([]interface{}) + nodeNetworkConfigs := make([]*container.AdditionalNodeNetworkConfig, 0, len(node_network_configs)) + for _, raw := range node_network_configs { + data := raw.(map[string]interface{}) + networkConfig := &container.AdditionalNodeNetworkConfig{ + Network: data["network"].(string), + Subnetwork: data["subnetwork"].(string), + } + nodeNetworkConfigs = append(nodeNetworkConfigs, networkConfig) + } + nnc.AdditionalNodeNetworkConfigs = nodeNetworkConfigs + } + + if v, ok := networkNodeConfig["additional_pod_network_configs"]; ok && len(v.([]interface{})) > 0 { + pod_network_configs := v.([]interface{}) + podNetworkConfigs := make([]*container.AdditionalPodNetworkConfig, 0, len(pod_network_configs)) + for _, raw := range pod_network_configs { + data := raw.(map[string]interface{}) + podnetworkConfig := &container.AdditionalPodNetworkConfig{ + Subnetwork: data["subnetwork"].(string), + SecondaryPodRange: data["secondary_pod_range"].(string), + MaxPodsPerNode: &container.MaxPodsConstraint{ + MaxPodsPerNode: int64(data["max_pods_per_node"].(int)), + }, + } + podNetworkConfigs = append(podNetworkConfigs, podnetworkConfig) + } + nnc.AdditionalPodNetworkConfigs = podNetworkConfigs + } +{{- end }} + + nnc.PodCidrOverprovisionConfig = expandPodCidrOverprovisionConfig(networkNodeConfig["pod_cidr_overprovision_config"]) + + if v, ok := networkNodeConfig["network_performance_config"]; ok && len(v.([]interface{})) > 0 { + nnc.NetworkPerformanceConfig = &container.NetworkPerformanceConfig{} + network_performance_config := v.([]interface{})[0].(map[string]interface{}) + if total_egress_bandwidth_tier, ok := network_performance_config["total_egress_bandwidth_tier"]; ok { + nnc.NetworkPerformanceConfig.TotalEgressBandwidthTier = total_egress_bandwidth_tier.(string) + } + } + + return nnc +} + + +func nodePoolUpdate(d *schema.ResourceData, meta interface{}, nodePoolInfo *NodePoolInformation, prefix string, timeout time.Duration) error { + config := meta.(*transport_tpg.Config) + name := d.Get(prefix + "name").(string) + + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Acquire read-lock on cluster. + clusterLockKey := nodePoolInfo.clusterLockKey() + transport_tpg.MutexStore.RLock(clusterLockKey) + defer transport_tpg.MutexStore.RUnlock(clusterLockKey) + + // Nodepool write-lock will be acquired when update function is called. + npLockKey := nodePoolInfo.nodePoolLockKey(name) + + if d.HasChange(prefix + "autoscaling") { + update := &container.ClusterUpdate{ + DesiredNodePoolId: name, + } + if v, ok := d.GetOk(prefix + "autoscaling"); ok { + autoscaling := v.([]interface{})[0].(map[string]interface{}) + update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{ + Enabled: true, + MinNodeCount: int64(autoscaling["min_node_count"].(int)), + MaxNodeCount: int64(autoscaling["max_node_count"].(int)), + TotalMinNodeCount: int64(autoscaling["total_min_node_count"].(int)), + TotalMaxNodeCount: int64(autoscaling["total_max_node_count"].(int)), + LocationPolicy: autoscaling["location_policy"].(string), + ForceSendFields: []string{"MinNodeCount", "TotalMinNodeCount"}, + } + } else { + update.DesiredNodePoolAutoscaling = &container.NodePoolAutoscaling{ + Enabled: false, + } + } + + req := &container.UpdateClusterRequest{ + Update: update, + } + + updateF := func() error { + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated autoscaling in Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config") { + + if d.HasChange(prefix + "node_config.0.logging_variant") { + if v, ok := d.GetOk(prefix + "node_config.0.logging_variant"); ok { + loggingVariant := v.(string) + req := &container.UpdateNodePoolRequest{ + Name: name, + LoggingConfig: &container.NodePoolLoggingConfig{ + VariantConfig: &container.LoggingVariantConfig{ + Variant: loggingVariant, + }, + }, + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool logging_variant", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated logging_variant for node pool %s", name) + } + } + + if d.HasChange("node_config.0.disk_size_gb") || + d.HasChange("node_config.0.disk_type") || + d.HasChange("node_config.0.machine_type") { + req := &container.UpdateNodePoolRequest{ + Name: name, + DiskSizeGb: int64(d.Get("node_config.0.disk_size_gb").(int)), + DiskType: d.Get("node_config.0.disk_type").(string), + MachineType: d.Get("node_config.0.machine_type").(string), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool disk_size_gb/disk_type/machine_type", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated disk disk_size_gb/disk_type/machine_type for Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config.0.taint") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.taint"); ok { + taintsList := v.([]interface{}) + taints := make([]*container.NodeTaint, 0, len(taintsList)) + for _, v := range taintsList { + if v != nil { + data := v.(map[string]interface{}) + taint := &container.NodeTaint{ + Key: data["key"].(string), + Value: data["value"].(string), + Effect: data["effect"].(string), + } + taints = append(taints, taint) + } + } + ntaints := &container.NodeTaints{ + Taints: taints, + } + req.Taints = ntaints + } + + if req.Taints == nil { + taints := make([]*container.NodeTaint, 0, 0) + ntaints := &container.NodeTaints{ + Taints: taints, + } + req.Taints = ntaints + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool taints", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated taints for Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config.0.tags") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.tags"); ok { + tagsList := v.([]interface{}) + tags := []string{} + for _, v := range tagsList { + if v != nil { + tags = append(tags, v.(string)) + } + } + ntags := &container.NetworkTags{ + Tags: tags, + } + req.Tags = ntags + } + + // sets tags to the empty list when user removes a previously defined list of tags entriely + // aka the node pool goes from having tags to no longer having any + if req.Tags == nil { + tags := []string{} + ntags := &container.NetworkTags{ + Tags: tags, + } + req.Tags = ntags + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool tags", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated tags for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.resource_manager_tags") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + if v, ok := d.GetOk(prefix + "node_config.0.resource_manager_tags"); ok { + req.ResourceManagerTags = expandResourceManagerTags(v) + } + + // sets resource manager tags to the empty list when user removes a previously defined list of tags entriely + // aka the node pool goes from having tags to no longer having any + if req.ResourceManagerTags == nil { + tags := make(map[string]string) + rmTags := &container.ResourceManagerTags{ + Tags: tags, + } + req.ResourceManagerTags = rmTags + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool resource manager tags", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated resource manager tags for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.resource_labels") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + + if v, ok := d.GetOk(prefix + "node_config.0.resource_labels"); ok { + resourceLabels := v.(map[string]interface{}) + req.ResourceLabels = &container.ResourceLabels{ + Labels: tpgresource.ConvertStringMap(resourceLabels), + } + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool resource labels", userAgent, + timeout) + } + + // Call update serially. + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated resource labels for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.labels") { + req := &container.UpdateNodePoolRequest{ + Name: name, + } + + if v, ok := d.GetOk(prefix + "node_config.0.labels"); ok { + labels := v.(map[string]interface{}) + req.Labels = &container.NodeLabels{ + Labels: tpgresource.ConvertStringMap(labels), + } + } + + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name), req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool labels", userAgent, + timeout) + } + + // Call update serially. + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated labels for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.image_type") { + req := &container.UpdateClusterRequest{ + Update: &container.ClusterUpdate{ + DesiredNodePoolId: name, + DesiredImageType: d.Get(prefix + "node_config.0.image_type").(string), + }, + } + + updateF := func() error { + clusterUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.Update(nodePoolInfo.parent(), req) + if config.UserProjectOverride { + clusterUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated image type in Node Pool %s", d.Id()) + } + + if d.HasChange(prefix + "node_config.0.workload_metadata_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + WorkloadMetadataConfig: expandWorkloadMetadataConfig( + d.Get(prefix + "node_config.0.workload_metadata_config")), + } + if req.WorkloadMetadataConfig == nil { + req.ForceSendFields = []string{"WorkloadMetadataConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool workload_metadata_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated workload_metadata_config for node pool %s", name) + } + + if d.HasChange(prefix + "node_config.0.kubelet_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + KubeletConfig: expandKubeletConfig( + d.Get(prefix + "node_config.0.kubelet_config")), + } + if req.KubeletConfig == nil { + req.ForceSendFields = []string{"KubeletConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool kubelet_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated kubelet_config for node pool %s", name) + } + if d.HasChange(prefix + "node_config.0.linux_node_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + LinuxNodeConfig: expandLinuxNodeConfig( + d.Get(prefix + "node_config.0.linux_node_config")), + } + if req.LinuxNodeConfig == nil { + req.ForceSendFields = []string{"LinuxNodeConfig"} + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool linux_node_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated linux_node_config for node pool %s", name) + } + if d.HasChange(prefix + "node_config.0.fast_socket") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + FastSocket: &container.FastSocket{}, + } + if v, ok := d.GetOk(prefix + "node_config.0.fast_socket"); ok { + fastSocket := v.([]interface{})[0].(map[string]interface{}) + req.FastSocket = &container.FastSocket{ + Enabled: fastSocket["enabled"].(bool), + } + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool fast_socket", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated fast_socket for node pool %s", name) + } + } + + if d.HasChange(prefix + "node_count") { + newSize := int64(d.Get(prefix + "node_count").(int)) + req := &container.SetNodePoolSizeRequest{ + NodeCount: newSize, + } + updateF := func() error { + clusterNodePoolsSetSizeCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.SetSize(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsSetSizeCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsSetSizeCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool size", userAgent, + timeout) + } + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] GKE node pool %s size has been updated to %d", name, newSize) + } + + if d.HasChange(prefix + "management") { + management := &container.NodeManagement{} + if v, ok := d.GetOk(prefix + "management"); ok { + managementConfig := v.([]interface{})[0].(map[string]interface{}) + management.AutoRepair = managementConfig["auto_repair"].(bool) + management.AutoUpgrade = managementConfig["auto_upgrade"].(bool) + management.ForceSendFields = []string{"AutoRepair", "AutoUpgrade"} + } + req := &container.SetNodePoolManagementRequest{ + Management: management, + } + + updateF := func() error { + clusterNodePoolsSetManagementCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.SetManagement(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsSetManagementCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsSetManagementCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool management", userAgent, timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated management in Node Pool %s", name) + } + + if d.HasChange(prefix + "version") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + NodeVersion: d.Get(prefix + "version").(string), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, "updating GKE node pool version", userAgent, timeout) + } + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated version in Node Pool %s", name) + } + + if d.HasChange(prefix + "node_locations") { + req := &container.UpdateNodePoolRequest{ + Locations: tpgresource.ConvertStringSet(d.Get(prefix + "node_locations").(*schema.Set)), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool node locations", userAgent, timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated node locations in Node Pool %s", name) + } + + if d.HasChange(prefix + "upgrade_settings") { + upgradeSettings := &container.UpgradeSettings{} + if v, ok := d.GetOk(prefix + "upgrade_settings"); ok { + upgradeSettingsConfig := v.([]interface{})[0].(map[string]interface{}) + upgradeSettings.Strategy = upgradeSettingsConfig["strategy"].(string) + + if d.HasChange(prefix + "upgrade_settings.0.max_surge") { + if upgradeSettings.Strategy != "SURGE" { + return fmt.Errorf("Surge upgrade settings may not be changed when surge strategy is not enabled") + } + if v, ok := upgradeSettingsConfig["max_surge"]; ok { + upgradeSettings.MaxSurge = int64(v.(int)) + } + // max_unavailable not be preserved if only max_surge is updated + if v, ok := upgradeSettingsConfig["max_unavailable"]; ok { + upgradeSettings.MaxUnavailable = int64(v.(int)) + } + } + + if d.HasChange(prefix + "upgrade_settings.0.max_unavailable") { + if upgradeSettings.Strategy != "SURGE" { + return fmt.Errorf("Surge upgrade settings may not be changed when surge strategy is not enabled") + } + if v, ok := upgradeSettingsConfig["max_unavailable"]; ok { + upgradeSettings.MaxUnavailable = int64(v.(int)) + } + // max_surge not be preserved if only max_unavailable is updated + if v, ok := upgradeSettingsConfig["max_surge"]; ok { + upgradeSettings.MaxSurge = int64(v.(int)) + } + } + + if d.HasChange(prefix + "upgrade_settings.0.blue_green_settings") { + if upgradeSettings.Strategy != "BLUE_GREEN" { + return fmt.Errorf("Blue-green upgrade settings may not be changed when blue-green strategy is not enabled") + } + + blueGreenSettings := &container.BlueGreenSettings{} + blueGreenSettingsConfig := upgradeSettingsConfig["blue_green_settings"].([]interface{})[0].(map[string]interface{}) + blueGreenSettings.NodePoolSoakDuration = blueGreenSettingsConfig["node_pool_soak_duration"].(string) + + if v, ok := blueGreenSettingsConfig["standard_rollout_policy"]; ok && len(v.([]interface{})) > 0 { + standardRolloutPolicy := &container.StandardRolloutPolicy{} + if standardRolloutPolicyConfig, ok := v.([]interface{})[0].(map[string]interface{}); ok { + standardRolloutPolicy.BatchSoakDuration = standardRolloutPolicyConfig["batch_soak_duration"].(string) + if v, ok := standardRolloutPolicyConfig["batch_node_count"]; ok { + standardRolloutPolicy.BatchNodeCount = int64(v.(int)) + } + if v, ok := standardRolloutPolicyConfig["batch_percentage"]; ok { + standardRolloutPolicy.BatchPercentage = v.(float64) + } + } + blueGreenSettings.StandardRolloutPolicy = standardRolloutPolicy + } + upgradeSettings.BlueGreenSettings = blueGreenSettings + } + } + req := &container.UpdateNodePoolRequest{ + UpgradeSettings: upgradeSettings, + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, nodePoolInfo.project, nodePoolInfo.location, "updating GKE node pool upgrade settings", userAgent, timeout) + } + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + log.Printf("[INFO] Updated upgrade settings in Node Pool %s", name) + } + + if d.HasChange(prefix + "network_config") { + if d.HasChange(prefix + "network_config.0.enable_private_nodes") || d.HasChange(prefix + "network_config.0.network_performance_config") { + req := &container.UpdateNodePoolRequest{ + NodePoolId: name, + NodeNetworkConfig: expandNodeNetworkConfig(d.Get(prefix + "network_config")), + } + updateF := func() error { + clusterNodePoolsUpdateCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Update(nodePoolInfo.fullyQualifiedName(name),req) + if config.UserProjectOverride { + clusterNodePoolsUpdateCall.Header().Add("X-Goog-User-Project", nodePoolInfo.project) + } + op, err := clusterNodePoolsUpdateCall.Do() + + if err != nil { + return err + } + + // Wait until it's updated + return ContainerOperationWait(config, op, + nodePoolInfo.project, + nodePoolInfo.location, + "updating GKE node pool network_config", userAgent, + timeout) + } + + if err := retryWhileIncompatibleOperation(timeout, npLockKey, updateF); err != nil { + return err + } + + log.Printf("[INFO] Updated network_config for node pool %s", name) + } + } + + return nil +} + +func getNodePoolName(id string) string { + // name can be specified with name, name_prefix, or neither, so read it from the id. + splits := strings.Split(id, "/") + return splits[len(splits)-1] +} + +var containerNodePoolRestingStates = RestingStates{ + "RUNNING": ReadyState, + "RUNNING_WITH_ERROR": ErrorState, + "ERROR": ErrorState, +} + +// takes in a config object, full node pool name, project name and the current CRUD action timeout +// returns a state with no error if the state is a resting state, and the last state with an error otherwise +func containerNodePoolAwaitRestingState(config *transport_tpg.Config, name, project, userAgent string, timeout time.Duration) (state string, err error) { + err = retry.Retry(timeout, func() *retry.RetryError { + clusterNodePoolsGetCall := config.NewContainerClient(userAgent).Projects.Locations.Clusters.NodePools.Get(name) + if config.UserProjectOverride { + clusterNodePoolsGetCall.Header().Add("X-Goog-User-Project", project) + } + nodePool, gErr := clusterNodePoolsGetCall.Do() + if gErr != nil { + return retry.NonRetryableError(gErr) + } + + state = nodePool.Status + switch stateType := containerNodePoolRestingStates[state]; stateType { + case ReadyState: + log.Printf("[DEBUG] NodePool %q has status %q with message %q.", name, state, nodePool.StatusMessage) + return nil + case ErrorState: + log.Printf("[DEBUG] NodePool %q has error state %q with message %q.", name, state, nodePool.StatusMessage) + return nil + default: + return retry.RetryableError(fmt.Errorf("NodePool %q has state %q with message %q", name, state, nodePool.StatusMessage)) + } + }) + + return state, err +} + +// Retries an operation while the canonical error code is FAILED_PRECONDTION +// or RESOURCE_EXHAUSTED which indicates there is an incompatible operation +// already running on the cluster or there are the number of allowed +// concurrent operations running on the cluster. These errors can be safely +// retried until the incompatible operation completes, and the newly +// requested operation can begin. +func retryWhileIncompatibleOperation(timeout time.Duration, lockKey string, f func() error) error { + return retry.Retry(timeout, func() *retry.RetryError { + if err := transport_tpg.LockedCall(lockKey, f); err != nil { + if tpgresource.IsFailedPreconditionError(err) || tpgresource.IsQuotaError(err) { + return retry.RetryableError(err) + } + return retry.NonRetryableError(err) + } + return nil + }) +} diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl new file mode 100644 index 000000000000..352156d401a7 --- /dev/null +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl @@ -0,0 +1,4874 @@ +package container_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccContainerNodePool_basic(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_resourceManagerTags(t *testing.T) { + t.Parallel() + pid := envvar.GetTestProjectFromEnv() + + randomSuffix := acctest.RandString(t, 10) + clusterName := fmt.Sprintf("tf-test-cluster-%s", randomSuffix) + + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_resourceManagerTags(pid, clusterName, networkName, subnetworkName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_node_pool.primary_nodes", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_node_pool.primary_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "cluster"}, + }, + { + Config: testAccContainerNodePool_resourceManagerTagsUpdate1(pid, clusterName, networkName, subnetworkName, randomSuffix), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_container_node_pool.primary_nodes", "node_config.0.resource_manager_tags.%"), + ), + }, + { + ResourceName: "google_container_node_pool.primary_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "cluster"}, + }, + { + Config: testAccContainerNodePool_resourceManagerTagsUpdate2(pid, clusterName, networkName, subnetworkName, randomSuffix), + }, + { + ResourceName: "google_container_node_pool.primary_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version", "cluster"}, + }, + }, + }) +} + +func TestAccContainerNodePool_basicWithClusterId(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basicWithClusterId(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster"}, + }, + }, + }) +} + +func TestAccContainerNodePool_nodeLocations(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_nodeLocations(cluster, np, network), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_maxPodsPerNode(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_maxPodsPerNode(cluster, np, network), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_namePrefix(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_namePrefix(cluster, "tf-np-", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name_prefix"}, + }, + }, + }) +} + +func TestAccContainerNodePool_noName(t *testing.T) { + // Randomness + acctest.SkipIfVcr(t) + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_noName(cluster, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withLoggingVariantUpdates(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_logging_variant", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "MAX_THROUGHPUT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_logging_variant", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withLoggingVariant(cluster, nodePool, "DEFAULT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_logging_variant", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withNodeConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withNodeConfig(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np_with_node_config", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, + }, + { + Config: testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np_with_node_config", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, + }, + }, + }) +} + +func TestAccContainerNodePool_withTaintsUpdate(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withTaintsUpdate(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, + }, + }, + }) +} + +func TestAccContainerNodePool_withMachineAndDiskUpdate(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_basic(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withMachineAndDiskUpdate(cluster, nodePool, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#", "node_config.0.taint"}, + }, + }, + }) +} + +func TestAccContainerNodePool_withReservationAffinity(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withReservationAffinity(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.0.consume_reservation_type", "ANY_RESERVATION"), + ), + }, + { + ResourceName: "google_container_node_pool.with_reservation_affinity", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withReservationAffinitySpecific(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + reservation := fmt.Sprintf("tf-test-reservation-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withReservationAffinitySpecific(cluster, reservation, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.0.consume_reservation_type", "SPECIFIC_RESERVATION"), + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.0.key", "compute.googleapis.com/reservation-name"), + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.0.values.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_reservation_affinity", + "node_config.0.reservation_affinity.0.values.0", reservation), + ), + }, + { + ResourceName: "google_container_node_pool.with_reservation_affinity", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withWorkloadIdentityConfig(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withWorkloadMetadataConfig(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_workload_metadata_config", + "node_config.0.workload_metadata_config.0.mode", "GCE_METADATA"), + ), + }, + { + ResourceName: "google_container_node_pool.with_workload_metadata_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadata(pid, cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_workload_metadata_config", + "node_config.0.workload_metadata_config.0.mode", "GKE_METADATA"), + ), + }, + { + ResourceName: "google_container_node_pool.with_workload_metadata_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func TestAccContainerNodePool_withSandboxConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withSandboxConfig(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_sandbox_config", + "node_config.0.sandbox_config.0.sandbox_type", "gvisor"), + ), + }, + { + ResourceName: "google_container_node_pool.with_sandbox_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccContainerNodePool_withKubeletConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "static", "100ms", networkName, subnetworkName, true, 2048), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.cpu_cfs_quota", "true"), + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.pod_pids_limit", "2048"), + ), + }, + { + ResourceName: "google_container_node_pool.with_kubelet_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "", "", networkName, subnetworkName, false, 1024), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_kubelet_config", + "node_config.0.kubelet_config.0.cpu_cfs_quota", "false"), + ), + }, + { + ResourceName: "google_container_node_pool.with_kubelet_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withInvalidKubeletCpuManagerPolicy(t *testing.T) { + t.Parallel() + // Unit test, no interactions + acctest.SkipIfVcr(t) + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withKubeletConfig(cluster, np, "dontexist", "100us", networkName, subnetworkName, true, 1024), + ExpectError: regexp.MustCompile(`.*to be one of \["?static"? "?none"? "?"?\].*`), + }, + }, + }) +} + +func TestAccContainerNodePool_withLinuxNodeConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + // Create a node pool with empty `linux_node_config.sysctls`. + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 100000", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + // Perform an update. + { + Config: testAccContainerNodePool_withLinuxNodeConfig(cluster, np, "1000 20000 200000", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_linux_node_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withCgroupMode(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withCgroupMode(cluster, np, "CGROUP_MODE_V2", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + // Perform an update. + { + Config: testAccContainerNodePool_withCgroupMode(cluster, np, "CGROUP_MODE_UNSPECIFIED", networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withNetworkConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withNetworkConfig(cluster, np, network, "TIER_1"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_node_pool.with_pco_disabled", "network_config.0.pod_cidr_overprovision_config.0.disabled", "true"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.0.total_egress_bandwidth_tier", "TIER_1"), + ), + }, + { + ResourceName: "google_container_node_pool.with_manual_pod_cidr", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_config.0.create_pod_range"}, + }, + { + ResourceName: "google_container_node_pool.with_auto_pod_cidr", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_config.0.create_pod_range"}, + }, + // edit the updateable network config + { + Config: testAccContainerNodePool_withNetworkConfig(cluster, np, network, "TIER_UNSPECIFIED"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.#", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tier1_net", "network_config.0.network_performance_config.0.total_egress_bandwidth_tier", "TIER_UNSPECIFIED"), + ), + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerNodePool_withMultiNicNetworkConfig(t *testing.T) { + t.Parallel() + + randstr := acctest.RandString(t, 10) + cluster := fmt.Sprintf("tf-test-cluster-%s", randstr) + np := fmt.Sprintf("tf-test-np-%s", randstr) + network := fmt.Sprintf("tf-test-net-%s", randstr) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withMultiNicNetworkConfig(cluster, np, network), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"network_config.0.create_pod_range", "deletion_protection"}, + }, + }, + }) +} +{{- end }} + +func TestAccContainerNodePool_withEnablePrivateNodesToggle(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + network := fmt.Sprintf("tf-test-net-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "true"), + }, + { + ResourceName: "google_container_node_pool.with_enable_private_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + { + Config: testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, "false"), + }, + { + ResourceName: "google_container_node_pool.with_enable_private_nodes", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"min_master_version"}, + }, + }, + }) +} + + +func testAccContainerNodePool_withEnablePrivateNodesToggle(cluster, np, network, flag string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + min_master_version = "1.27" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + deletion_protection = false +} + +resource "google_container_node_pool" "with_enable_private_nodes" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = false + enable_private_nodes = %s + pod_range = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} +`, network, cluster, np, flag) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func TestAccContainerNodePool_withBootDiskKmsKey(t *testing.T) { + // Uses generated time-based rotation time + acctest.SkipIfVcr(t) + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withBootDiskKmsKey(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_boot_disk_kms_key", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccContainerNodePool_withUpgradeSettings(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 2, 3, "SURGE", "", 0, 0.0, ""), + }, + { + ResourceName: "google_container_node_pool.with_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 2, 1, "SURGE", "", 0, 0.0, ""), + }, + { + ResourceName: "google_container_node_pool.with_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 1, 1, "SURGE", "", 0, 0.0, ""), + }, + { + ResourceName: "google_container_node_pool.with_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 0, 0, "BLUE_GREEN", "100s", 1, 0.0, "0s"), + }, + { + ResourceName: "google_container_node_pool.with_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withUpgradeSettings(cluster, np, networkName, subnetworkName, 0, 0, "BLUE_GREEN", "100s", 0, 0.5, "1s"), + }, + { + ResourceName: "google_container_node_pool.with_upgrade_settings", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withGPU(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withGPU(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np_with_gpu", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withManagement(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodePool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + management := ` + management { + auto_repair = "false" + auto_upgrade = "false" + }` + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withManagement(cluster, nodePool, "", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.#", "1"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_repair", "true"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_upgrade", "true"), + ), + }, + { + ResourceName: "google_container_node_pool.np_with_management", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withManagement(cluster, nodePool, management, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.#", "1"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_repair", "false"), + resource.TestCheckResourceAttr( + "google_container_node_pool.np_with_management", "management.0.auto_upgrade", "false"), + ), + }, + { + ResourceName: "google_container_node_pool.np_with_management", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withNodeConfigScopeAlias(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np_with_node_config_scope_alias", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// This test exists to validate a regional node pool *and* and update to it. +func TestAccContainerNodePool_regionalAutoscaling(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_regionalAutoscaling(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), + resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#"}, + }, + }, + }) +} + +// This test exists to validate a node pool with total size *and* and update to it. +func TestAccContainerNodePool_totalSize(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_totalSize(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "4"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_max_node_count", "12"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.location_policy", "BALANCED"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_updateTotalSize(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_min_node_count", "2"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.total_max_node_count", "22"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.location_policy", "ANY"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_basicTotalSize(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), + resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#"}, + }, + }, + }) +} + +func TestAccContainerNodePool_autoscaling(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_autoscaling(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "1"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "3"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count", "0"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count", "5"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.min_node_count"), + resource.TestCheckNoResourceAttr("google_container_node_pool.np", "autoscaling.0.max_node_count"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + // autoscaling.# = 0 is equivalent to no autoscaling at all, + // but will still cause an import diff + ImportStateVerifyIgnore: []string{"autoscaling.#"}, + }, + }, + }) +} + +func TestAccContainerNodePool_resize(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_additionalZones(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "2"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_resize(cluster, np, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_count", "3"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func TestAccContainerNodePool_version(t *testing.T) { + t.Parallel() + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_version(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_updateVersion(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_version(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_regionalClusters(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_regionalClusters(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_012_ConfigModeAttr(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_EmptyGuestAccelerator(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Test alternative way to specify an empty node pool + Config: testAccContainerNodePool_EmptyGuestAccelerator(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + // Test alternative way to specify an empty node pool + Config: testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, networkName, subnetworkName, 1), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + // Assert that changes in count from 1 result in a diff + Config: testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, networkName, subnetworkName, 2), + ExpectNonEmptyPlan: true, + PlanOnly: true, + }, + { + // Assert that adding another accelerator block will also result in a diff + Config: testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np, networkName, subnetworkName), + ExpectNonEmptyPlan: true, + PlanOnly: true, + }, + }, + }) +} + +func TestAccContainerNodePool_shieldedInstanceConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_shieldedInstanceConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"max_pods_per_node"}, + }, + }, + }) +} + +func TestAccContainerNodePool_concurrent(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np1 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + np2 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_concurrentCreate(cluster, np1, np2, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np1", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_container_node_pool.np2", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_concurrentUpdate(cluster, np1, np2, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np1", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_container_node_pool.np2", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerNodePool_withSoleTenantConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withSoleTenantConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_sole_tenant_config", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerNodePool_ephemeralStorageConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_ephemeralStorageConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_ephemeralStorageConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" + ephemeral_storage_config { + local_ssd_count = 1 + } + } +} +`, cluster, networkName, subnetworkName, np) +} +{{- end }} + +func TestAccContainerNodePool_ephemeralStorageLocalSsdConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_ephemeralStorageLocalSsdConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_ephemeralStorageLocalSsdConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" + // this feature became available in 1.25.3-gke.1800, not sure if theres a better way to do + version_prefix = "1.25" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" + ephemeral_storage_local_ssd_config { + local_ssd_count = 1 + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func TestAccContainerNodePool_localNvmeSsdBlockConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_localNvmeSsdBlockConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_localNvmeSsdBlockConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" + // this feature became available in 1.25.3-gke.1800, not sure if theres a better way to do + version_prefix = "1.25" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-1" + local_nvme_ssd_block_config { + local_ssd_count = 1 + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func TestAccContainerNodePool_secondaryBootDisks(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_secondaryBootDisks(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_container_node_pool.np-no-mode", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_secondaryBootDisks(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + min_master_version = "1.28" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + gcfs_config { + enabled = true + } + secondary_boot_disks { + disk_image = "" + mode = "CONTAINER_IMAGE_CACHE" + } + } +} + +resource "google_container_node_pool" "np-no-mode" { + name = "%s-no-mode" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + gcfs_config { + enabled = true + } + secondary_boot_disks { + disk_image = "" + } + } +} +`, cluster, networkName, subnetworkName, np, np) +} + +func TestAccContainerNodePool_gcfsConfig(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_gcfsConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + gcfs_config { + enabled = true + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func TestAccContainerNodePool_gvnic(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_gvnic(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_gvnic(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + gvnic { + enabled = true + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func TestAccContainerNodePool_fastSocket(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_fastSocket(cluster, np, networkName, subnetworkName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", + "node_config.0.fast_socket.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_fastSocket(cluster, np, networkName, subnetworkName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", + "node_config.0.fast_socket.0.enabled", "false"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_fastSocket(cluster, np, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-f" + initial_node_count = 1 + min_master_version = "1.28" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-f" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + guest_accelerator { + type = "nvidia-tesla-t4" + count = 1 + } + gvnic { + enabled = true + } + fast_socket { + enabled = %t + } + } +} +`, cluster, networkName, subnetworkName, np, enabled) +} + +func TestAccContainerNodePool_compactPlacement(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_compactPlacement(cluster, np, "COMPACT", networkName, subnetworkName), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_compactPlacement(cluster, np, placementType, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "c2-standard-4" + } + placement_policy { + type = "%s" + } +} +`, cluster, networkName, subnetworkName, np, placementType) +} + +func TestAccContainerNodePool_customPlacementPolicy(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + policy := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_customPlacementPolicy(cluster, np, policy, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.machine_type", "c2-standard-4"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "placement_policy.0.policy_name", policy), + resource.TestCheckResourceAttr("google_container_node_pool.np", "placement_policy.0.type", "COMPACT"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_customPlacementPolicy(cluster, np, policyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_compute_resource_policy" "policy" { + name = "%s" + region = "us-central1" + group_placement_policy { + collocation = "COLLOCATED" + } +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + autoscaling {} + + node_config { + machine_type = "c2-standard-4" + } + placement_policy { + type = "COMPACT" + policy_name = google_compute_resource_policy.policy.name + } +} +`, cluster, networkName, subnetworkName, policyName, np) +} + +func TestAccContainerNodePool_enableQueuedProvisioning(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_enableQueuedProvisioning(cluster, np, networkName, subnetworkName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.np", "node_config.0.machine_type", "n1-standard-2"), + resource.TestCheckResourceAttr("google_container_node_pool.np", + "node_config.0.reservation_affinity.0.consume_reservation_type", "NO_RESERVATION"), + resource.TestCheckResourceAttr("google_container_node_pool.np", "queued_provisioning.0.enabled", "true"), + ), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_enableQueuedProvisioning(cluster, np, networkName, subnetworkName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = "1.28" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + autoscaling { + total_min_node_count = 0 + total_max_node_count = 1 + } + + node_config { + machine_type = "n1-standard-2" + guest_accelerator { + type = "nvidia-tesla-t4" + count = 1 + gpu_driver_installation_config { + gpu_driver_version = "LATEST" + } + } + reservation_affinity { + consume_reservation_type = "NO_RESERVATION" + } + } + queued_provisioning { + enabled = %t + } +} +`, cluster, networkName, subnetworkName, np, enabled) +} + +func TestAccContainerNodePool_threadsPerCore(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_threadsPerCore(cluster, np, networkName, subnetworkName, 1), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_threadsPerCore(cluster, np, networkName, subnetworkName string, threadsPerCore int) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = "%v" + } + } +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = "%v" + } + } +} +`, cluster, networkName, subnetworkName, threadsPerCore, np, threadsPerCore) +} + +func TestAccContainerNodePool_nestedVirtualization(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_nestedVirtualization(cluster, np, networkName, subnetworkName, true), + }, + { + ResourceName: "google_container_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_protection"}, + }, + }, + }) +} + +func testAccContainerNodePool_nestedVirtualization(cluster, np, networkName, subnetworkName string, enableNV bool) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" + + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = "%t" + } + } +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "c2-standard-4" + advanced_machine_features { + threads_per_core = 1 + enable_nested_virtualization = "%t" + } + } +} +`, cluster, networkName, subnetworkName, enableNV, np, enableNV) +} + + +func testAccCheckContainerNodePoolDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_container_node_pool" { + continue + } + + attributes := rs.Primary.Attributes + location := attributes["location"] + + var err error + if location != "" { + _, err = config.NewContainerClient(config.UserAgent).Projects.Zones.Clusters.NodePools.Get( + config.Project, attributes["location"], attributes["cluster"], attributes["name"]).Do() + } else { + name := fmt.Sprintf( + "projects/%s/locations/%s/clusters/%s/nodePools/%s", + config.Project, + attributes["location"], + attributes["cluster"], + attributes["name"], + ) + _, err = config.NewContainerClient(config.UserAgent).Projects.Locations.Clusters.NodePools.Get(name).Do() + } + + if err == nil { + return fmt.Errorf("NodePool still exists") + } + } + + return nil + } +} + +func testAccContainerNodePool_basic(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_withLoggingVariant(cluster, np, loggingVariant, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "with_logging_variant" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_logging_variant" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.with_logging_variant.name + initial_node_count = 1 + node_config { + logging_variant = "%s" + } +} +`, cluster, networkName, subnetworkName, np, loggingVariant) +} + +func testAccContainerNodePool_basicWithClusterId(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%s" + cluster = google_container_cluster.cluster.id + initial_node_count = 2 +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_nodeLocations(cluster, np, network string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + + master_authorized_networks_config { + } + deletion_protection = false +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1" + cluster = google_container_cluster.cluster.name + + initial_node_count = 1 + node_locations = ["us-central1-a", "us-central1-c"] +} +`, network, cluster, np) +} + +func testAccContainerNodePool_maxPodsPerNode(cluster, np, network string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + + master_authorized_networks_config { + } + deletion_protection = false +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + max_pods_per_node = 30 + initial_node_count = 2 +} +`, network, cluster, np) +} + +func testAccContainerNodePool_regionalClusters(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + cluster = google_container_cluster.cluster.name + location = "us-central1" + initial_node_count = 2 +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_namePrefix(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name_prefix = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_noName(cluster, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 +} +`, cluster, networkName, subnetworkName) +} + +func testAccContainerNodePool_regionalAutoscaling(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + autoscaling { + min_node_count = 1 + max_node_count = 3 + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_totalSize(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 3 + min_master_version = "1.27" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + autoscaling { + total_min_node_count = 4 + total_max_node_count = 12 + location_policy = "BALANCED" + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_updateTotalSize(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 3 + min_master_version = "1.27" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + autoscaling { + total_min_node_count = 2 + total_max_node_count = 22 + location_policy = "ANY" + } +} +`, cluster, networkName, subnetworkName, np) +} + + +func testAccContainerNodePool_basicTotalSize(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%s" + location = "us-central1" + initial_node_count = 3 + min_master_version = "1.27" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%s" + location = "us-central1" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_autoscaling(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + autoscaling { + min_node_count = 1 + max_node_count = 3 + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_updateAutoscaling(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + autoscaling { + min_node_count = 0 + max_node_count = 5 + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_additionalZones(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + node_locations = [ + "us-central1-b", + "us-central1-c", + ] + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + node_count = 2 +} +`, cluster, networkName, subnetworkName, nodePool) +} + +func testAccContainerNodePool_resize(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + + node_locations = [ + "us-central1-b", + "us-central1-c", + ] + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + node_count = 3 +} +`, cluster, networkName, subnetworkName, nodePool) +} + +func testAccContainerNodePool_withManagement(cluster, nodePool, management, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + release_channel { + channel = "UNSPECIFIED" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np_with_management" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + %s + + node_config { + machine_type = "g1-small" + disk_size_gb = 10 + oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"] + } +} +`, cluster, networkName, subnetworkName, nodePool, management) +} + +func testAccContainerNodePool_withNodeConfig(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np_with_node_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "g1-small" + disk_size_gb = 10 + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + preemptible = true + min_cpu_platform = "Intel Broadwell" + + taint { + key = "taint_key" + value = "taint_value" + effect = "PREFER_NO_SCHEDULE" + } + + taint { + key = "taint_key2" + value = "taint_value2" + effect = "NO_EXECUTE" + } + + // Updatable fields + image_type = "COS_CONTAINERD" + + tags = ["foo"] + + labels = { + "test.terraform.io/key1" = "foo" + } + + resource_labels = { + "key1" = "foo" + } + } +} +`, cluster, networkName, subnetworkName, nodePool) +} + +func testAccContainerNodePool_withNodeConfigUpdate(cluster, nodePool, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np_with_node_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "g1-small" + disk_size_gb = 10 + oauth_scopes = [ + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + preemptible = true + min_cpu_platform = "Intel Broadwell" + + taint { + key = "taint_key" + value = "taint_value" + effect = "PREFER_NO_SCHEDULE" + } + + taint { + key = "taint_key2" + value = "taint_value2" + effect = "NO_EXECUTE" + } + + // Updatable fields + image_type = "UBUNTU_CONTAINERD" + + tags = ["bar", "foobar"] + + labels = { + "test.terraform.io/key1" = "bar" + "test.terraform.io/key2" = "foo" + } + + resource_labels = { + "key1" = "bar" + "key2" = "foo" + } + } +} +`, cluster, networkName, subnetworkName, nodePool) +} + +func testAccContainerNodePool_withTaintsUpdate(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + taint { + key = "taint_key" + value = "taint_value" + effect = "PREFER_NO_SCHEDULE" + } + } + + +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_withMachineAndDiskUpdate(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +provider "google" { + alias = "user-project-override" + user_project_override = true +} +resource "google_container_cluster" "cluster" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + provider = google.user-project-override + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + disk_type = "pd-ssd" + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_withReservationAffinity(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_reservation_affinity" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n1-standard-1" + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + reservation_affinity { + consume_reservation_type = "ANY_RESERVATION" + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_withReservationAffinitySpecific(cluster, reservation, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_compute_reservation" "gce_reservation" { + name = "%s" + zone = "us-central1-a" + + specific_reservation { + count = 1 + instance_properties { + machine_type = "n1-standard-1" + } + } + + specific_reservation_required = true +} + +resource "google_container_node_pool" "with_reservation_affinity" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n1-standard-1" + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + reservation_affinity { + consume_reservation_type = "SPECIFIC_RESERVATION" + key = "compute.googleapis.com/reservation-name" + values = [ + google_compute_reservation.gce_reservation.name + ] + } + } +} +`, cluster, networkName, subnetworkName, reservation, np) +} + + +func testAccContainerNodePool_withWorkloadMetadataConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_workload_metadata_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + spot = true + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + + workload_metadata_config { + mode = "GCE_METADATA" + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_withWorkloadMetadataConfig_gkeMetadata(projectID, cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_workload_metadata_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + + workload_metadata_config { + mode = "GKE_METADATA" + } + } +} +`, projectID, cluster, networkName, subnetworkName, np) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func testAccContainerNodePool_withSandboxConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_sandbox_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n1-standard-1" // can't be e2 because of gvisor + image_type = "COS_CONTAINERD" + + sandbox_config { + sandbox_type = "gvisor" + } + + labels = { + "test.terraform.io/gke-sandbox" = "true" + } + + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, networkName, subnetworkName, np) +} +{{- end }} + +func testAccContainerNodePool_withKubeletConfig(cluster, np, policy, period, networkName, subnetworkName string, quota bool, podPidsLimit int) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +# cpu_manager_policy & cpu_cfs_quota_period cannot be blank if cpu_cfs_quota is set to true +# cpu_manager_policy & cpu_cfs_quota_period must not set if cpu_cfs_quota is set to false +resource "google_container_node_pool" "with_kubelet_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + image_type = "COS_CONTAINERD" + kubelet_config { + cpu_manager_policy = %q + cpu_cfs_quota = %v + cpu_cfs_quota_period = %q + pod_pids_limit = %d + } + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + logging_variant = "DEFAULT" + } +} +`, cluster, networkName, subnetworkName, np, policy, quota, period, podPidsLimit) +} + +func testAccContainerNodePool_withLinuxNodeConfig(cluster, np, tcpMem, networkName, subnetworkName string) string { + linuxNodeConfig := ` + linux_node_config { + sysctls = {} + } +` + if len(tcpMem) != 0 { + linuxNodeConfig = fmt.Sprintf(` + linux_node_config { + sysctls = { + "net.core.netdev_max_backlog" = "10000" + "net.core.rmem_max" = 10000 + "net.core.wmem_default" = 10000 + "net.core.wmem_max" = 20000 + "net.core.optmem_max" = 10000 + "net.core.somaxconn" = 12800 + "net.ipv4.tcp_rmem" = "%s" + "net.ipv4.tcp_wmem" = "%s" + "net.ipv4.tcp_tw_reuse" = 1 + } + } +`, tcpMem, tcpMem) + } + + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_linux_node_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + image_type = "COS_CONTAINERD" + %s + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, networkName, subnetworkName, np, linuxNodeConfig) +} + +func testAccContainerNodePool_withCgroupMode(cluster, np, mode, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + image_type = "COS_CONTAINERD" + linux_node_config { + cgroup_mode = "%s" + } + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, networkName, subnetworkName, np, mode) +} + +func testAccContainerNodePool_withNetworkConfig(cluster, np, network, netTier string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = google_compute_network.container_network.name + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } + + secondary_ip_range { + range_name = "another-pod" + ip_cidr_range = "10.1.32.0/22" + } + + lifecycle { + ignore_changes = [ + # The auto nodepool creates a secondary range which diffs this resource. + secondary_ip_range, + ] + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + release_channel { + channel = "RAPID" + } + deletion_protection = false +} + +resource "google_container_node_pool" "with_manual_pod_cidr" { + name = "%s-manual" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = false + pod_range = google_compute_subnetwork.container_subnetwork.secondary_ip_range[2].range_name + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +resource "google_container_node_pool" "with_auto_pod_cidr" { + name = "%s-auto" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = true + pod_range = "auto-pod-range" + pod_ipv4_cidr_block = "10.2.0.0/20" + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +resource "google_container_node_pool" "with_pco_disabled" { + name = "%s-pco" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + pod_cidr_overprovision_config { + disabled = true + } + } + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +resource "google_container_node_pool" "with_tier1_net" { + name = "%s-tier1" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + node_locations = [ + "us-central1-a", + ] + network_config { + network_performance_config { + total_egress_bandwidth_tier = "%s" + } + } + node_config { + machine_type = "n2-standard-32" + gvnic { + enabled = true + } + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + } +} + +`, network, cluster, np, np, np, np, netTier) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func testAccContainerNodePool_withMultiNicNetworkConfig(cluster, np, network string) string { + return fmt.Sprintf(` +resource "google_compute_network" "container_network" { + name = "%s-1" + auto_create_subnetworks = false +} + +resource "google_compute_network" "addn_net_1" { + name = "%s-2" + auto_create_subnetworks = false +} + +resource "google_compute_network" "addn_net_2" { + name = "%s-3" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "container_subnetwork" { + name = "%s-subnet-1" + network = google_compute_network.container_network.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } + + lifecycle { + ignore_changes = [ + # The auto nodepool creates a secondary range which diffs this resource. + secondary_ip_range, + ] + } +} + +resource "google_compute_subnetwork" "subnet1" { + name = "%s-subnet-2" + network = google_compute_network.addn_net_1.name + ip_cidr_range = "10.0.37.0/24" + region = "us-central1" +} + +resource "google_compute_subnetwork" "subnet2" { + name = "%s-subnet-3" + network = google_compute_network.addn_net_2.name + ip_cidr_range = "10.0.38.0/24" + region = "us-central1" + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.64.0/19" + } +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + + network = google_compute_network.container_network.name + subnetwork = google_compute_subnetwork.container_subnetwork.name + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.container_subnetwork.secondary_ip_range[1].range_name + } + private_cluster_config { + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + } + release_channel { + channel = "RAPID" + } + enable_multi_networking = true + datapath_provider = "ADVANCED_DATAPATH" + deletion_protection = false +} + +resource "google_container_node_pool" "with_multi_nic" { + name = "%s-mutli-nic" + location = "us-central1" + cluster = google_container_cluster.cluster.name + node_count = 1 + network_config { + create_pod_range = false + enable_private_nodes = true + pod_range = google_compute_subnetwork.container_subnetwork.secondary_ip_range[0].range_name + additional_node_network_configs { + network = google_compute_network.addn_net_1.name + subnetwork = google_compute_subnetwork.subnet1.name + } + additional_node_network_configs { + network = google_compute_network.addn_net_2.name + subnetwork = google_compute_subnetwork.subnet2.name + } + additional_pod_network_configs { + subnetwork = google_compute_subnetwork.subnet2.name + secondary_pod_range = "pod" + max_pods_per_node = 32 + } + } + node_config { + machine_type = "n2-standard-8" + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + image_type = "COS_CONTAINERD" + } +} + +`, network, network, network, network, network, network, cluster, np) +} +{{- end }} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func testAccContainerNodePool_withBootDiskKmsKey(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_kms_key_ring" "keyring" { + name = "%s-kms-key-ring" + location = "us-central1" +} + +resource "google_kms_crypto_key" "example-key" { + name = "%s-kms-key" + key_ring = google_kms_key_ring.keyring.id + rotation_period = "100000s" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_boot_disk_kms_key" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + image_type = "COS_CONTAINERD" + boot_disk_kms_key = google_kms_crypto_key.example-key.id + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, cluster, cluster, networkName, subnetworkName, np) +} +{{- end }} + +func makeUpgradeSettings(maxSurge int, maxUnavailable int, strategy string, nodePoolSoakDuration string, batchNodeCount int, batchPercentage float64, batchSoakDuration string) string { + if strategy == "BLUE_GREEN" { + return fmt.Sprintf(` +upgrade_settings { + strategy = "%s" + blue_green_settings { + node_pool_soak_duration = "%s" + standard_rollout_policy { + batch_node_count = %d + batch_percentage = %f + batch_soak_duration = "%s" + } + } +} +`, strategy, nodePoolSoakDuration, batchNodeCount, batchPercentage, batchSoakDuration) + } + return fmt.Sprintf(` +upgrade_settings { + max_surge = %d + max_unavailable = %d + strategy = "%s" +} +`, maxSurge, maxUnavailable, strategy) +} + +func testAccContainerNodePool_withUpgradeSettings(clusterName, nodePoolName, networkName, subnetworkName string, maxSurge int, maxUnavailable int, strategy string, nodePoolSoakDuration string, batchNodeCount int, batchPercentage float64, batchSoakDuration string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1" { + location = "us-central1" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1" + initial_node_count = 1 + min_master_version = "${data.google_container_engine_versions.central1.latest_master_version}" + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_upgrade_settings" { + name = "%s" + location = "us-central1" + cluster = "${google_container_cluster.cluster.name}" + initial_node_count = 1 + %s +} +`, clusterName, networkName, subnetworkName, nodePoolName, makeUpgradeSettings(maxSurge, maxUnavailable, strategy, nodePoolSoakDuration, batchNodeCount, batchPercentage, batchSoakDuration)) +} + +func testAccContainerNodePool_withGPU(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1c" { + location = "us-central1-c" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-c" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1c.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np_with_gpu" { + name = "%s" + location = "us-central1-c" + cluster = google_container_cluster.cluster.name + + initial_node_count = 1 + + node_config { + machine_type = "a2-highgpu-1g" // can't be e2 because of accelerator + disk_size_gb = 32 + + oauth_scopes = [ + "https://www.googleapis.com/auth/devstorage.read_only", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/service.management.readonly", + "https://www.googleapis.com/auth/servicecontrol", + "https://www.googleapis.com/auth/trace.append", + ] + + preemptible = true + service_account = "default" + image_type = "COS_CONTAINERD" + + guest_accelerator { + type = "nvidia-tesla-a100" + gpu_partition_size = "1g.5gb" + count = 1 + gpu_driver_installation_config { + gpu_driver_version = "LATEST" + } + gpu_sharing_config { + gpu_sharing_strategy = "TIME_SHARING" + max_shared_clients_per_gpu = 2 + } + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_withNodeConfigScopeAlias(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np_with_node_config_scope_alias" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "g1-small" + disk_size_gb = 10 + oauth_scopes = ["compute-rw", "storage-ro", "logging-write", "monitoring"] + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_version(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + version = data.google_container_engine_versions.central1a.valid_node_versions[1] +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_updateVersion(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + version = data.google_container_engine_versions.central1a.valid_node_versions[0] +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_012_ConfigModeAttr1(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-f" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-f" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + guest_accelerator { + count = 1 + type = "nvidia-tesla-t4" + } + machine_type = "n1-highmem-4" + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_012_ConfigModeAttr2(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-f" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-f" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + guest_accelerator = [] + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_EmptyGuestAccelerator(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-f" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-f" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + guest_accelerator { + count = 0 + type = "nvidia-tesla-p100" + } + machine_type = "n1-highmem-4" + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_PartialEmptyGuestAccelerator(cluster, np, networkName, subnetworkName string, count int) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-f" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-f" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + guest_accelerator { + count = 0 + type = "nvidia-tesla-p100" + } + + guest_accelerator { + count = %d + type = "nvidia-tesla-p100" + } + machine_type = "n1-highmem-4" + } +} +`, cluster, networkName, subnetworkName, np, count) +} + +func testAccContainerNodePool_PartialEmptyGuestAccelerator2(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-f" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-f" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + guest_accelerator { + count = 0 + type = "nvidia-tesla-p100" + } + + guest_accelerator { + count = 1 + type = "nvidia-tesla-p100" + } + + guest_accelerator { + count = 1 + type = "nvidia-tesla-p9000" + } + machine_type = "n1-highmem-4" + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_shieldedInstanceConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + node_config { + shielded_instance_config { + enable_integrity_monitoring = true + enable_secure_boot = true + } + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_concurrentCreate(cluster, np1, np2, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np1" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 +} + +resource "google_container_node_pool" "np2" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + } +`, cluster, networkName, subnetworkName, np1, np2) +} + +func testAccContainerNodePool_concurrentUpdate(cluster, np1, np2, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 3 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np1" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + version = "1.27.3-gke.1700" +} + +resource "google_container_node_pool" "np2" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + version = "1.27.3-gke.1700" +} +`, cluster, networkName, subnetworkName, np1, np2) +} + +func testAccContainerNodePool_withSoleTenantConfig(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_compute_node_template" "soletenant-tmpl" { + name = "tf-test-soletenant-tmpl" + region = "us-central1" + node_type = "n1-node-96-624" +} + +resource "google_compute_node_group" "nodes" { + name = "tf-test-soletenant-group" + zone = "us-central1-a" + initial_size = 1 + node_template = google_compute_node_template.soletenant-tmpl.id +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_sole_tenant_config" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n1-standard-2" + sole_tenant_config { + node_affinity { + key = "compute.googleapis.com/node-group-name" + operator = "IN" + values = [google_compute_node_group.nodes.name] + } + } + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + } +} +`, cluster, networkName, subnetworkName, np) +} + +func TestAccContainerNodePool_withConfidentialNodes(t *testing.T) { + t.Parallel() + + clusterName := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-cluster-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_disableConfidentialNodes(clusterName, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerNodePool_withConfidentialNodes(clusterName, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withConfidentialNodes(clusterName, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + node_config { + confidential_nodes { + enabled = false + } + machine_type = "n2-standard-2" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n2d-standard-2" // can't be e2 because Confidential Nodes require AMD CPUs + confidential_nodes { + enabled = true + } + } +} +`, clusterName, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_disableConfidentialNodes(clusterName, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + node_config { + confidential_nodes { + enabled = false + } + machine_type = "n2-standard-2" + } + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + machine_type = "n2d-standard-2" // can't be e2 because Confidential Nodes require AMD CPUs + confidential_nodes { + enabled = false + } + } +} +`, clusterName, networkName, subnetworkName, np) +} + +func TestAccContainerNodePool_tpuTopology(t *testing.T) { + t.Parallel() + t.Skip("https://github.com/hashicorp/terraform-provider-google/issues/15254#issuecomment-1646277473") + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np1 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + np2 := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_tpuTopology(cluster, np1, np2, "2x2x2", networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_container_node_pool.regular_pool", "node_config.0.machine_type", "n1-standard-4"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tpu_topology", "node_config.0.machine_type", "ct4p-hightpu-4t"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tpu_topology", "placement_policy.0.tpu_topology", "2x2x2"), + resource.TestCheckResourceAttr("google_container_node_pool.with_tpu_topology", "placement_policy.0.type", "COMPACT"), + ), + }, + { + ResourceName: "google_container_node_pool.with_tpu_topology", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_tpuTopology(cluster, np1, np2, tpuTopology, networkName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "regular_pool" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + machine_type = "n1-standard-4" + } +} + +resource "google_container_node_pool" "with_tpu_topology" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 2 + + node_config { + machine_type = "ct4p-hightpu-4t" + + } + placement_policy { + type = "COMPACT" + tpu_topology = "%s" + } +} +`, cluster, networkName, subnetworkName, np1, np2, tpuTopology) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccContainerNodePool_withHostMaintenancePolicy(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withHostMaintenancePolicy(cluster, np), + }, + { + ResourceName: "google_container_node_pool.np", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withHostMaintenancePolicy(cluster, np string) string { + return fmt.Sprintf(` +resource "google_container_cluster" "cluster" { + name = "%s" + location = "asia-east1-c" + initial_node_count = 1 + node_config { + host_maintenance_policy { + maintenance_interval = "PERIODIC" + } + machine_type = "n2-standard-2" + } + deletion_protection = false +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "asia-east1-c" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + node_config { + host_maintenance_policy { + maintenance_interval = "PERIODIC" + } + machine_type = "n2-standard-2" + } +} +`, cluster, np) +} +{{- end }} + +func TestAccContainerNodePool_withConfidentialBootDisk(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withConfidentialBootDisk(cluster, np, kms.CryptoKey.Name, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.with_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withConfidentialBootDisk(cluster, np string, kmsKeyName, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "with_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + + node_config { + image_type = "COS_CONTAINERD" + boot_disk_kms_key = "%s" + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + enable_confidential_storage = true + machine_type = "n2-standard-2" + disk_type = "hyperdisk-balanced" + } +} +`, cluster, networkName, subnetworkName, np, kmsKeyName) +} + +func TestAccContainerNodePool_withoutConfidentialBootDisk(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + np := fmt.Sprintf("tf-test-np-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np, networkName, subnetworkName), + }, + { + ResourceName: "google_container_node_pool.without_confidential_boot_disk", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerNodePool_withoutConfidentialBootDisk(cluster, np, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_container_engine_versions" "central1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + min_master_version = data.google_container_engine_versions.central1a.latest_master_version + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "without_confidential_boot_disk" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + + node_config { + image_type = "COS_CONTAINERD" + oauth_scopes = [ + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ] + enable_confidential_storage = false + machine_type = "n2-standard-2" + disk_type = "pd-balanced" + } +} +`, cluster, networkName, subnetworkName, np) +} + +func testAccContainerNodePool_resourceManagerTags(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_tags_tag_key.key1] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} + +# Separately Managed Node Pool +resource "google_container_node_pool" "primary_nodes" { + name = google_container_cluster.primary.name + location = "us-central1-a" + cluster = google_container_cluster.primary.name + + version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key1.name}" = "tagValues/${google_tags_tag_value.value1.name}" + } + } +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerNodePool_resourceManagerTagsUpdate1(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_tags_tag_key.key1] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} + +# Separately Managed Node Pool +resource "google_container_node_pool" "primary_nodes" { + name = google_container_cluster.primary.name + location = "us-central1-a" + cluster = google_container_cluster.primary.name + + version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + + resource_manager_tags = { + "tagKeys/${google_tags_tag_key.key1.name}" = "tagValues/${google_tags_tag_value.value1.name}" + "tagKeys/${google_tags_tag_key.key2.name}" = "tagValues/${google_tags_tag_value.value2.name}" + } + } +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func testAccContainerNodePool_resourceManagerTagsUpdate2(projectID, clusterName, networkName, subnetworkName, randomSuffix string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%[1]s" +} + +resource "google_project_iam_member" "tagHoldAdmin" { + project = "%[1]s" + role = "roles/resourcemanager.tagHoldAdmin" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" +} + +resource "google_project_iam_member" "tagUser1" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:service-${data.google_project.project.number}@container-engine-robot.iam.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "google_project_iam_member" "tagUser2" { + project = "%[1]s" + role = "roles/resourcemanager.tagUser" + member = "serviceAccount:${data.google_project.project.number}@cloudservices.gserviceaccount.com" + + depends_on = [google_project_iam_member.tagHoldAdmin] +} + +resource "time_sleep" "wait_120_seconds" { + create_duration = "120s" + + depends_on = [ + google_project_iam_member.tagHoldAdmin, + google_project_iam_member.tagUser1, + google_project_iam_member.tagUser2, + ] +} + +resource "google_tags_tag_key" "key1" { + parent = "projects/%[1]s" + short_name = "foobarbaz1-%[2]s" + description = "For foo/bar1 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } +} + +resource "google_tags_tag_value" "value1" { + parent = "tagKeys/${google_tags_tag_key.key1.name}" + short_name = "foo1-%[2]s" + description = "For foo1 resources" +} + +resource "google_tags_tag_key" "key2" { + parent = "projects/%[1]s" + short_name = "foobarbaz2-%[2]s" + description = "For foo/bar2 resources" + purpose = "GCE_FIREWALL" + purpose_data = { + network = "%[1]s/%[4]s" + } + + depends_on = [google_tags_tag_key.key1] +} + +resource "google_tags_tag_value" "value2" { + parent = "tagKeys/${google_tags_tag_key.key2.name}" + short_name = "foo2-%[2]s" + description = "For foo2 resources" +} + +data "google_container_engine_versions" "uscentral1a" { + location = "us-central1-a" +} + +resource "google_container_cluster" "primary" { + name = "%[3]s" + location = "us-central1-a" + min_master_version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + + # We can't create a cluster with no node pool defined, but we want to only use + # separately managed node pools. So we create the smallest possible default + # node pool and immediately delete it. + remove_default_node_pool = true + initial_node_count = 1 + + deletion_protection = false + network = "%[4]s" + subnetwork = "%[5]s" + + timeouts { + create = "30m" + update = "40m" + } + + depends_on = [time_sleep.wait_120_seconds] +} + +# Separately Managed Node Pool +resource "google_container_node_pool" "primary_nodes" { + name = google_container_cluster.primary.name + location = "us-central1-a" + cluster = google_container_cluster.primary.name + + version = data.google_container_engine_versions.uscentral1a.release_channel_latest_version["STABLE"] + node_count = 1 + + node_config { + machine_type = "n1-standard-1" // can't be e2 because of local-ssd + disk_size_gb = 15 + } +} +`, projectID, randomSuffix, clusterName, networkName, subnetworkName) +} + +func TestAccContainerNodePool_privateRegistry(t *testing.T) { + t.Parallel() + + cluster := fmt.Sprintf("tf-test-cluster-%s", acctest.RandString(t, 10)) + nodepool := fmt.Sprintf("tf-test-nodepool-%s", acctest.RandString(t, 10)) + secretID := fmt.Sprintf("tf-test-secret-%s", acctest.RandString(t, 10)) + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerNodePool_privateRegistryEnabled(secretID, cluster, nodepool, networkName, subnetworkName), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.private_registry_access_config.0.enabled", + "true", + ), + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.#", + "2", + ), + // First CA config + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.0.fqdns.0", + "my.custom.domain", + ), + // Second CA config + resource.TestCheckResourceAttr( + "google_container_node_pool.np", + "node_config.0.containerd_config.0.private_registry_access_config.0.certificate_authority_domain_config.1.fqdns.0", + "10.1.2.32", + ), + ), + }, + }, + }) +} + +func testAccContainerNodePool_privateRegistryEnabled(secretID, cluster, nodepool, network, subnetwork string) string { + return fmt.Sprintf(` +data "google_project" "test_project" { + } + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "%s" + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.id + secret_data = "dummypassword" + } + +resource "google_secret_manager_secret_iam_member" "secret_iam" { + secret_id = google_secret_manager_secret.secret-basic.id + role = "roles/secretmanager.admin" + member = "serviceAccount:${data.google_project.test_project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret_version.secret-version-basic] + } + +resource "google_container_cluster" "cluster" { + name = "%s" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + network = "%s" + subnetwork = "%s" +} + +resource "google_container_node_pool" "np" { + name = "%s" + location = "us-central1-a" + cluster = google_container_cluster.cluster.name + initial_node_count = 1 + + node_config { + oauth_scopes = [ + "https://www.googleapis.com/auth/cloud-platform", + ] + machine_type = "n1-standard-8" + image_type = "COS_CONTAINERD" + containerd_config { + private_registry_access_config { + enabled = true + certificate_authority_domain_config { + fqdns = [ "my.custom.domain", "10.0.0.127:8888" ] + gcp_secret_manager_certificate_config { + secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + certificate_authority_domain_config { + fqdns = [ "10.1.2.32" ] + gcp_secret_manager_certificate_config { + secret_uri = google_secret_manager_secret_version.secret-version-basic.name + } + } + } + } + } +} +`, secretID, cluster, network, subnetwork, nodepool) +} diff --git a/mmv1/third_party/terraform/services/containeranalysis/go/resource_container_analysis_note_test.go.tmpl b/mmv1/third_party/terraform/services/containeranalysis/go/resource_container_analysis_note_test.go.tmpl new file mode 100644 index 000000000000..41c61ecbc477 --- /dev/null +++ b/mmv1/third_party/terraform/services/containeranalysis/go/resource_container_analysis_note_test.go.tmpl @@ -0,0 +1,79 @@ +package containeranalysis_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccContainerAnalysisNote_basic(t *testing.T) { + t.Parallel() + + name := acctest.RandString(t, 10) + readableName := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAnalysisNoteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAnalysisNoteBasic(name, readableName), + }, + { + ResourceName: "google_container_analysis_note.note", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccContainerAnalysisNote_update(t *testing.T) { + t.Parallel() + + name := acctest.RandString(t, 10) + readableName := acctest.RandString(t, 10) + readableName2 := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckContainerAnalysisNoteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccContainerAnalysisNoteBasic(name, readableName), + }, + { + ResourceName: "google_container_analysis_note.note", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccContainerAnalysisNoteBasic(name, readableName2), + }, + { + ResourceName: "google_container_analysis_note.note", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccContainerAnalysisNoteBasic(name, readableName string) string { + return fmt.Sprintf(` +resource "google_container_analysis_note" "note" { + name = "tf-test-%s" + attestation_authority { + hint { + human_readable_name = "My Attestor %s" + } + } +} +`, name, readableName) +} +{{- else }} +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +{{- end }} diff --git a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job.go.tmpl b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job.go.tmpl new file mode 100644 index 000000000000..3a77c6601e69 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job.go.tmpl @@ -0,0 +1,843 @@ +package dataflow +{{- if ne $.TargetVersionName "ga" }} + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/googleapi" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + dataflow "google.golang.org/api/dataflow/v1b3" +) + +// NOTE: resource_dataflow_flex_template currently does not support updating existing jobs. +// Changing any non-computed field will result in the job being deleted (according to its +// on_delete policy) and recreated with the updated parameters. + +// ResourceDataflowFlexTemplateJob defines the schema for Dataflow FlexTemplate jobs. +func ResourceDataflowFlexTemplateJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDataflowFlexTemplateJobCreate, + Read: resourceDataflowFlexTemplateJobRead, + Update: resourceDataflowFlexTemplateJobUpdate, + Delete: resourceDataflowFlexTemplateJobDelete, + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + resourceDataflowFlexJobTypeCustomizeDiff, + ), + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceDataflowFlexTemplateJobResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceDataflowFlexTemplateJobStateUpgradeV0, + Version: 0, + }, + }, + Schema: map[string]*schema.Schema{ + + "container_spec_gcs_path": { + Type: schema.TypeString, + Required: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The region in which the created job should run.`, + }, + + "transform_name_mapping": { + Type: schema.TypeMap, + Optional: true, + Description: `Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.`, + }, + + "on_delete": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false), + Optional: true, + Default: "cancel", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "job_id": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of this job, selected from the JobType enum.`, + }, + + "num_workers": { + Type: schema.TypeInt, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Computed: true, + Description: `The initial number of Google Compute Engine instances for the job.`, + }, + + "max_workers": { + Type: schema.TypeInt, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Computed: true, + Description: `The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.`, + }, + + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Service Account email used to create the job.`, + }, + + "temp_location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.`, + }, + + "staging_location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.`, + }, + + "sdk_container_image": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines.`, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + Computed: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The machine type to use for the job.`, + }, + + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`, + }, + + "ip_configuration": { + Type: schema.TypeString, + Optional: true, + Description: `The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".`, + ValidateFunc: validation.StringInSlice([]string{"WORKER_IP_PUBLIC", "WORKER_IP_PRIVATE"}, false), + }, + + "additional_experiments": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "enable_streaming_engine": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if the job should use the streaming engine feature.`, + }, + + "autoscaling_algorithm": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The algorithm to use for autoscaling`, + }, + + "launcher_machine_type": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The machine type to use for launching the job. The default is n1-standard-1.`, + }, + + "skip_wait_on_job_termination": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.`, + }, + }, + UseJSONNumber: true, + } +} + +// resourceDataflowFlexTemplateJobCreate creates a Flex Template Job from TF code. +func resourceDataflowFlexTemplateJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + env, updatedParameters, err := resourceDataflowFlexJobSetupEnv(d, config) + if err != nil { + return err + } + + request := dataflow.LaunchFlexTemplateRequest{ + LaunchParameter: &dataflow.LaunchFlexTemplateParameter{ + ContainerSpecGcsPath: d.Get("container_spec_gcs_path").(string), + JobName: d.Get("name").(string), + Parameters: updatedParameters, + Environment: &env, + }, + } + + response, err := config.NewDataflowClient(userAgent).Projects.Locations.FlexTemplates.Launch(project, region, &request).Do() + if err != nil { + return err + } + + job := response.Job + + //adding wait time for setting all the parameters into state file + err = waitForDataflowJobState(d, config, job.Id, userAgent, d.Timeout(schema.TimeoutUpdate), "JOB_STATE_RUNNING") + if err != nil { + return fmt.Errorf("Error waiting for job with job ID %q to be running: %s", job.Id, err) + } + + d.SetId(job.Id) + if err := d.Set("job_id", job.Id); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + + return resourceDataflowFlexTemplateJobRead(d, meta) +} + +func resourceDataflowFlexJobSetupEnv(d *schema.ResourceData, config *transport_tpg.Config) (dataflow.FlexTemplateRuntimeEnvironment, map[string]string, error) { + + updatedParameters := tpgresource.ExpandStringMap(d, "parameters") + + additionalExperiments := tpgresource.ConvertStringSet(d.Get("additional_experiments").(*schema.Set)) + + var autoscalingAlgorithm string + autoscalingAlgorithm, updatedParameters = dataflowFlexJobTypeTransferVar("autoscaling_algorithm", "autoscalingAlgorithm", updatedParameters, d) + + var numWorkers int + if p, ok := d.GetOk("parameters.numWorkers"); ok { + number, err := strconv.Atoi(p.(string)) + if err != nil { + return dataflow.FlexTemplateRuntimeEnvironment{}, updatedParameters, fmt.Errorf("parameters.numWorkers must have a valid integer assigned to it, current value is %s", p.(string)) + } + delete(updatedParameters, "numWorkers") + numWorkers = number + } else { + if v, ok := d.GetOk("num_workers"); ok { + numWorkers = v.(int) + } + } + + var maxNumWorkers int + if p, ok := d.GetOk("parameters.maxNumWorkers"); ok { + number, err := strconv.Atoi(p.(string)) + if err != nil { + return dataflow.FlexTemplateRuntimeEnvironment{}, updatedParameters, fmt.Errorf("parameters.maxNumWorkers must have a valid integer assigned to it, current value is %s", p.(string)) + } + delete(updatedParameters, "maxNumWorkers") + maxNumWorkers = number + } else { + if v, ok := d.GetOk("max_workers"); ok { + maxNumWorkers = v.(int) + } + } + + network, updatedParameters := dataflowFlexJobTypeTransferVar("network", "network", updatedParameters, d) + + serviceAccountEmail, updatedParameters := dataflowFlexJobTypeTransferVar("service_account_email", "serviceAccountEmail", updatedParameters, d) + + subnetwork, updatedParameters := dataflowFlexJobTypeTransferVar("subnetwork", "subnetwork", updatedParameters, d) + + tempLocation, updatedParameters := dataflowFlexJobTypeTransferVar("temp_location", "tempLocation", updatedParameters, d) + + stagingLocation, updatedParameters := dataflowFlexJobTypeTransferVar("staging_location", "stagingLocation", updatedParameters, d) + + machineType, updatedParameters := dataflowFlexJobTypeTransferVar("machine_type", "workerMachineType", updatedParameters, d) + + kmsKeyName, updatedParameters := dataflowFlexJobTypeTransferVar("kms_key_name", "kmsKeyName", updatedParameters, d) + + ipConfiguration, updatedParameters := dataflowFlexJobTypeTransferVar("ip_configuration", "ipConfiguration", updatedParameters, d) + + var enableStreamingEngine bool + if p, ok := d.GetOk("parameters.enableStreamingEngine"); ok { + delete(updatedParameters, "enableStreamingEngine") + e := strings.ToLower(p.(string)) + switch e { + case "true": + enableStreamingEngine = true + case "false": + enableStreamingEngine = false + default: + return dataflow.FlexTemplateRuntimeEnvironment{}, nil, fmt.Errorf("error when handling parameters.enableStreamingEngine value: expected value to be true or false but got value `%s`", e) + } + } else { + if v, ok := d.GetOk("enable_streaming_engine"); ok { + enableStreamingEngine = v.(bool) + } + } + + sdkContainerImage, updatedParameters := dataflowFlexJobTypeTransferVar("sdk_container_image", "sdkContainerImage", updatedParameters, d) + + launcherMachineType, updatedParameters := dataflowFlexJobTypeTransferVar("launcher_machine_type", "launcherMachineType", updatedParameters, d) + + env := dataflow.FlexTemplateRuntimeEnvironment{ + AdditionalUserLabels: tpgresource.ExpandStringMap(d, "effective_labels"), + AutoscalingAlgorithm: autoscalingAlgorithm, + NumWorkers: int64(numWorkers), + MaxWorkers: int64(maxNumWorkers), + Network: network, + ServiceAccountEmail: serviceAccountEmail, + Subnetwork: subnetwork, + TempLocation: tempLocation, + StagingLocation: stagingLocation, + MachineType: machineType, + KmsKeyName: kmsKeyName, + IpConfiguration: ipConfiguration, + EnableStreamingEngine: enableStreamingEngine, + AdditionalExperiments: additionalExperiments, + SdkContainerImage: sdkContainerImage, + LauncherMachineType: launcherMachineType, + } + return env, updatedParameters, nil +} + +// resourceDataflowFlexTemplateJobRead reads a Flex Template Job resource. +func resourceDataflowFlexTemplateJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + jobId := d.Id() + + job, err := resourceDataflowJobGetJob(config, project, region, userAgent, jobId) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", jobId)) + } + + if err := d.Set("job_id", job.Id); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + if err := d.Set("state", job.CurrentState); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err := d.Set("name", job.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("type", job.Type); err != nil { + return fmt.Errorf("Error setting type: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := tpgresource.SetLabels(job.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(job.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", job.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) + } + if err := d.Set("kms_key_name", job.Environment.ServiceKmsKeyName); err != nil { + return fmt.Errorf("Error setting kms_key_name: %s", err) + } + if err := d.Set("service_account_email", job.Environment.ServiceAccountEmail); err != nil { + return fmt.Errorf("Error setting service_account_email: %s", err) + } + + sdkPipelineOptions, err := tpgresource.ConvertToMap(job.Environment.SdkPipelineOptions) + if err != nil { + return err + } + optionsMap := sdkPipelineOptions["options"].(map[string]interface{}) + + if err := d.Set("temp_location", optionsMap["tempLocation"]); err != nil { + return fmt.Errorf("Error setting temp_gcs_location: %s", err) + } + if err := d.Set("network", optionsMap["network"]); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("num_workers", optionsMap["numWorkers"]); err != nil { + return fmt.Errorf("Error setting num_workers: %s", err) + } + if err := d.Set("max_workers", optionsMap["maxNumWorkers"]); err != nil { + return fmt.Errorf("Error setting max_workers: %s", err) + } + if err := d.Set("staging_location", optionsMap["stagingLocation"]); err != nil { + return fmt.Errorf("Error setting staging_location: %s", err) + } + if err := d.Set("sdk_container_image", optionsMap["sdkContainerImage"]); err != nil { + return fmt.Errorf("Error setting sdk_container_image: %s", err) + } + if err := d.Set("network", optionsMap["network"]); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("subnetwork", optionsMap["subnetwork"]); err != nil { + return fmt.Errorf("Error setting subnetwork: %s", err) + } + if err := d.Set("machine_type", optionsMap["workerMachineType"]); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + + if ok := shouldStopDataflowJobDeleteQuery(job.CurrentState, d.Get("skip_wait_on_job_termination").(bool)); ok { + log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState) + d.SetId("") + return nil + } + + return nil +} + +func waitForDataflowJobState(d *schema.ResourceData, config *transport_tpg.Config, jobID, userAgent string, timeout time.Duration, targetState string) error { + return retry.Retry(timeout, func() *retry.RetryError { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return retry.NonRetryableError(err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return retry.NonRetryableError(err) + } + + job, err := resourceDataflowJobGetJob(config, project, region, userAgent, jobID) + if err != nil { + if transport_tpg.IsRetryableError(err, nil, nil) { + return retry.RetryableError(err) + } + return retry.NonRetryableError(err) + } + + state := job.CurrentState + if state == targetState { + log.Printf("[DEBUG] the job with ID %q has state %q.", jobID, state) + return nil + } + _, terminating := DataflowTerminatingStatesMap[state] + if terminating && targetState == "JOB_STATE_RUNNING" { + return retry.NonRetryableError(fmt.Errorf("the job with ID %q is terminating with state %q and cannot reach expected state %q", jobID, state, targetState)) + } + if _, terminated := DataflowTerminalStatesMap[state]; terminated { + return retry.NonRetryableError(fmt.Errorf("the job with ID %q has terminated with state %q instead of expected state %q", jobID, state, targetState)) + } else { + log.Printf("[DEBUG] the job with ID %q has state %q.", jobID, state) + return retry.RetryableError(fmt.Errorf("the job with ID %q has state %q, waiting for %q", jobID, state, targetState)) + } + }) +} + +// resourceDataflowFlexTemplateJobUpdate updates a Flex Template Job resource. +func resourceDataflowFlexTemplateJobUpdate(d *schema.ResourceData, meta interface{}) error { + // Don't send an update request if only virtual fields have changes + if resourceDataflowJobIsVirtualUpdate(d, ResourceDataflowFlexTemplateJob().Schema) { + return nil + } + + if jobHasUpdate(d, ResourceDataflowFlexTemplateJob().Schema) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + tnamemapping := tpgresource.ExpandStringMap(d, "transform_name_mapping") + + env, updatedParameters, err := resourceDataflowFlexJobSetupEnv(d, config) + if err != nil { + return err + } + + // wait until current job is running or terminated + err = waitForDataflowJobState(d, config, d.Id(), userAgent, d.Timeout(schema.TimeoutUpdate), "JOB_STATE_RUNNING") + if err != nil { + return fmt.Errorf("Error waiting for job with job ID %q to be running: %s", d.Id(), err) + } + + request := dataflow.LaunchFlexTemplateRequest{ + LaunchParameter: &dataflow.LaunchFlexTemplateParameter{ + + ContainerSpecGcsPath: d.Get("container_spec_gcs_path").(string), + JobName: d.Get("name").(string), + Parameters: updatedParameters, + TransformNameMappings: tnamemapping, + Environment: &env, + Update: true, + }, + } + + response, err := config.NewDataflowClient(userAgent).Projects.Locations.FlexTemplates.Launch(project, region, &request).Do() + if err != nil { + return err + } + + // don't set id until new job is successfully running + job := response.Job + err = waitForDataflowJobState(d, config, job.Id, userAgent, d.Timeout(schema.TimeoutUpdate), "JOB_STATE_RUNNING") + if err != nil { + // the default behavior is to overwrite the resource's state with the state of the "new" job, even though we are returning an error here. this call to Partial prevents this behavior + d.Partial(true) + return fmt.Errorf("Error waiting for Job with job ID %q to be updated: %s", job.Id, err) + } + + d.SetId(job.Id) + if err := d.Set("job_id", job.Id); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + } + return resourceDataflowFlexTemplateJobRead(d, meta) +} + +func resourceDataflowFlexTemplateJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + + requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string)) + if err != nil { + return err + } + + // Retry updating the state while the job is not ready to be canceled/drained. + err = retry.Retry(time.Minute*time.Duration(15), func() *retry.RetryError { + // To terminate a dataflow job, we update the job with a requested + // terminal state. + job := &dataflow.Job{ + RequestedState: requestedState, + } + + _, updateErr := resourceDataflowJobUpdateJob(config, project, region, userAgent, id, job) + if updateErr != nil { + gerr, isGoogleErr := updateErr.(*googleapi.Error) + if !isGoogleErr { + // If we have an error and it's not a google-specific error, we should go ahead and return. + return retry.NonRetryableError(updateErr) + } + + if strings.Contains(gerr.Message, "not yet ready for canceling") { + // Retry cancelling job if it's not ready. + // Sleep to avoid hitting update quota with repeated attempts. + time.Sleep(5 * time.Second) + return retry.RetryableError(updateErr) + } + + if strings.Contains(gerr.Message, "Job has terminated") { + // Job has already been terminated, skip. + return nil + } + } + + return nil + }) + if err != nil { + return err + } + + // Wait for state to reach terminal state (canceled/drained/done plus cancelling/draining if skipWait) + skipWait := d.Get("skip_wait_on_job_termination").(bool) + var ok bool + ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) + for !ok { + log.Printf("[DEBUG] Waiting for job with job state %q to terminate...", d.Get("state").(string)) + time.Sleep(5 * time.Second) + + err = resourceDataflowFlexTemplateJobRead(d, meta) + if err != nil { + return fmt.Errorf("Error while reading job to see if it was properly terminated: %v", err) + } + ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) + } + + // Only remove the job from state if it's actually successfully hit a final state. + if ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait); ok { + log.Printf("[DEBUG] Removing dataflow job with final state %q", d.Get("state").(string)) + d.SetId("") + return nil + } + return fmt.Errorf("Unable to cancel the dataflow job '%s' - final state was %q.", d.Id(), d.Get("state").(string)) +} + +func resourceDataflowFlexJobTypeCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + + err := dataflowFlexJobTypeParameterOverride("autoscaling_algorithm", "autoscalingAlgorithm", d) + if err != nil { + return err + } + + if p, ok := d.GetOk("parameters.numWorkers"); ok { + if d.HasChange("num_workers") { + e := d.Get("num_workers") + return fmt.Errorf("Error setting num_workers, value is supplied twice: num_workers=%d, parameters.numWorkers=%d", e.(int), p.(int)) + } else { + p := d.Get("parameters.numWorkers") + number, err := strconv.Atoi(p.(string)) + if err != nil { + return fmt.Errorf("parameters.maxNumWorkers must have a valid integer assigned to it, current value is %s", p.(string)) + } + d.SetNew("num_workers", number) + } + } + + if p, ok := d.GetOk("parameters.maxNumWorkers"); ok { + if d.HasChange("max_workers") { + e := d.Get("max_workers") + return fmt.Errorf("Error setting max_workers, value is supplied twice: max_workers=%d, parameters.maxNumWorkers=%d", e.(int), p.(int)) + } else { + p := d.Get("parameters.maxNumWorkers") + number, err := strconv.Atoi(p.(string)) + if err != nil { + return fmt.Errorf("parameters.maxNumWorkers must have a valid integer assigned to it, current value is %s", p.(string)) + } + d.SetNew("max_workers", number) + } + } + + err = dataflowFlexJobTypeParameterOverride("network", "network", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("service_account_email", "serviceAccountEmail", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("subnetwork", "subnetwork", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("temp_location", "tempLocation", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("staging_location", "stagingLocation", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("machine_type", "workerMachineType", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("kms_key_name", "kmsKeyName", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("ip_configuration", "ipConfiguration", d) + if err != nil { + return err + } + + if p, ok := d.GetOk("parameters.enableStreamingEngine"); ok { + if d.HasChange("enable_streaming_engine") { + e := d.Get("enable_streaming_engine") + return fmt.Errorf("Error setting enable_streaming_engine, value is supplied twice: enable_streaming_engine=%t, parameters.enableStreamingEngine=%t", e.(bool), p.(bool)) + } else { + p := d.Get("parameters.enableStreamingEngine") + d.SetNew("enable_streaming_engine", p.(string)) + } + } + + err = dataflowFlexJobTypeParameterOverride("sdk_container_image", "sdkContainerImage", d) + if err != nil { + return err + } + + err = dataflowFlexJobTypeParameterOverride("launcher_machine_type", "launcherMachineType", d) + if err != nil { + return err + } + + // All non-virtual fields are ForceNew for batch jobs + if d.Get("type") == "JOB_TYPE_BATCH" { + resourceSchema := ResourceDataflowFlexTemplateJob().Schema + for field := range resourceSchema { + if field == "on_delete" { + continue + } + + if field != "terraform_labels" && d.HasChange(field) { + if err := d.ForceNew(field); err != nil { + return err + } + } + } + } + + return nil +} + +func dataflowFlexJobTypeTransferVar(ename, pname string, updatedParameters map[string]string, d *schema.ResourceData) (string, map[string]string) { + + pstring := fmt.Sprintf("parameters.%s", pname) + + if p, ok := d.GetOk(pstring); ok { + delete(updatedParameters, pname) + return p.(string), updatedParameters + } else { + if v, ok := d.GetOk(ename); ok { + return v.(string), updatedParameters + } else { + return "", updatedParameters + } + } +} + +func dataflowFlexJobTypeParameterOverride(ename, pname string, d *schema.ResourceDiff) error { + + pstring := fmt.Sprintf("parameters.%s", pname) + + if p, ok := d.GetOk(pstring); ok { + if d.HasChange(ename) { + e := d.Get(ename) + return fmt.Errorf("Error setting %s, value is supplied twice: %s=\"%s\", %s=\"%s\"", ename, ename, e.(string), pstring, p.(string)) + } else { + p := d.Get(pstring) + d.SetNew(ename, p.(string)) + } + } + return nil +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_migrate.go.tmpl b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_migrate.go.tmpl new file mode 100644 index 000000000000..dd6edcbb8e4e --- /dev/null +++ b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_migrate.go.tmpl @@ -0,0 +1,204 @@ +package dataflow +{{- if ne $.TargetVersionName "ga" }} + +import ( + "context" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func resourceDataflowFlexTemplateJobResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + + "container_spec_gcs_path": { + Type: schema.TypeString, + Required: true, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + Description: `The region in which the created job should run.`, + }, + + "transform_name_mapping": { + Type: schema.TypeMap, + Optional: true, + Description: `Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.`, + }, + + "on_delete": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false), + Optional: true, + Default: "cancel", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + DiffSuppressFunc: resourceDataflowJobLabelDiffSuppress, + Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: Google-provided Dataflow templates often provide default labels that begin with goog-dataflow-provided. Unless explicitly set in config, these labels will be ignored to prevent diffs on re-apply.`, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "job_id": { + Type: schema.TypeString, + Computed: true, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + }, + + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of this job, selected from the JobType enum.`, + }, + + "num_workers": { + Type: schema.TypeInt, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The initial number of Google Compute Engine instances for the job.`, + }, + + "max_workers": { + Type: schema.TypeInt, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The maximum number of Google Compute Engine instances to be made available to your pipeline during execution, from 1 to 1000.`, + }, + + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Service Account email used to create the job.`, + }, + + "temp_location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Cloud Storage path to use for temporary files. Must be a valid Cloud Storage URL, beginning with gs://.`, + }, + + "staging_location": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The Cloud Storage path to use for staging files. Must be a valid Cloud Storage URL, beginning with gs://.`, + }, + + "sdk_container_image": { + Type: schema.TypeString, + Optional: true, + Description: `Docker registry location of container image to use for the 'worker harness. Default is the container for the version of the SDK. Note this field is only valid for portable pipelines.`, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: `The machine type to use for the job.`, + }, + + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`, + }, + + "ip_configuration": { + Type: schema.TypeString, + Optional: true, + Description: `The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".`, + ValidateFunc: validation.StringInSlice([]string{"WORKER_IP_PUBLIC", "WORKER_IP_PRIVATE"}, false), + }, + + "additional_experiments": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "enable_streaming_engine": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Indicates if the job should use the streaming engine feature.`, + }, + + "autoscaling_algorithm": { + Type: schema.TypeString, + Optional: true, + Description: `The algorithm to use for autoscaling`, + }, + + "launcher_machine_type": { + Type: schema.TypeString, + Optional: true, + Description: `The machine type to use for launching the job. The default is n1-standard-1.`, + }, + + "skip_wait_on_job_termination": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.`, + }, + }, + UseJSONNumber: true, + } +} + +func ResourceDataflowFlexTemplateJobStateUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.LabelsStateUpgrade(rawState, resourceDataflowJobGoogleLabelPrefix) +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl new file mode 100644 index 000000000000..43dae3c6e829 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_flex_template_job_test.go.tmpl @@ -0,0 +1,1894 @@ +package dataflow_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "regexp" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + compute "google.golang.org/api/compute/v0.beta" +) + +func TestAccDataflowFlexTemplateJob_basic(t *testing.T) { + // This resource uses custom retry logic that cannot be sped up without + // modifying the actual resource + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_basic(job, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_streamUpdate(t *testing.T) { + // This resource uses custom retry logic that cannot be sped up without + // modifying the actual resource + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + topic2 := "tf-test-topic-2" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_basic(job, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job", false), + ), + }, + { + Config: testAccDataflowFlexTemplateJob_basic(job, bucket, topic2), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job", true), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "transform_name_mapping", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_streamFailUpdate(t *testing.T) { + // This resource uses custom retry logic that cannot be sped up without + // modifying the actual resource + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_basic(job, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job", false), + ), + }, + { + Config: testAccDataflowFlexTemplateJob_basicfail(job, bucket), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobHasOption(t, "google_dataflow_flex_template_job.flex_job", "topic", "projects/myproject/topics/tf-test-topic"+randStr, true), + ), + ExpectError: regexp.MustCompile(`Error waiting for Job with job ID "[^"]+" to be updated: the job with ID "[^"]+" has terminated with state "JOB_STATE_FAILED" instead of expected state "JOB_STATE_RUNNING"`), + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_FullUpdate(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_dataflowFlexTemplateJobFull(job, bucket, topic, randStr), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowFlexTemplateJob_dataflowFlexTemplateJobFullUpdate(job, bucket, topic, randStr), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_withNetwork(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + network1 := "tf-test-dataflow-net" + randStr + network2 := "tf-test-dataflow-net2" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_network(job, network1, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_network", false), + testAccDataflowFlexTemplateJobHasNetwork(t, "google_dataflow_flex_template_job.flex_job_network", network1, false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_network", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowFlexTemplateJob_networkUpdate(job, network1, network2, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_network", true), + testAccDataflowFlexTemplateJobHasNetwork(t, "google_dataflow_flex_template_job.flex_job_network", network2, true), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_network", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_withSubNetwork(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + network := "tf-test-dataflow-net" + randStr + subnetwork1 := "tf-test-dataflow-subnetwork" + randStr + subnetwork2 := "tf-test-dataflow-subnetwork2" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_subnetwork(job, network, subnetwork1, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_subnetwork", false), + testAccDataflowFlexTemplateJobHasSubNetwork(t, "google_dataflow_flex_template_job.flex_job_subnetwork", subnetwork1, false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_subnetwork", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowFlexTemplateJob_subnetworkUpdate(job, network, subnetwork1, subnetwork2, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_subnetwork", true), + testAccDataflowFlexTemplateJobHasSubNetwork(t, "google_dataflow_flex_template_job.flex_job_subnetwork", subnetwork2, true), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_subnetwork", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_withIpConfig(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + network := "tf-test-dataflow-net" + randStr + subnetwork := "tf-test-dataflow-subnetwork" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_ipConfig(job, network, subnetwork, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_ipconfig", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_ipconfig", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "ip_configuration", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_withKmsKey(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + cryptoKey := kms.CryptoKey.Name + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + if acctest.BootstrapPSARole(t, "service-", "dataflow-service-producer-prod", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_kms(job, cryptoKey, bucket, topic), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_kms", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_kms", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_withAdditionalExperiments(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + additionalExperiments := []string{"enable_stackdriver_agent_metrics", "use_runner_v2"} + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_additionalExperiments(job, bucket, topic, additionalExperiments), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_experiments", false), + testAccDataflowFlexTemplateJobHasAdditionalExperiments(t, "google_dataflow_flex_template_job.flex_job_experiments", additionalExperiments, false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_experiments", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "additional_experiments", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowFlexTemplateJob_withProviderDefaultLabels(t *testing.T) { + // This resource uses custom retry logic that cannot be sped up without + // modifying the actual resource + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_withProviderDefaultLabels(job, bucket, topic, randStr), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_fullupdate", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccComputeAddress_resourceLabelsOverridesProviderDefaultLabels(job, bucket, topic, randStr), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_fullupdate", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccComputeAddress_moveResourceLabelToProviderDefaultLabels(job, bucket, topic, randStr), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_fullupdate", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccComputeAddress_resourceLabelsOverridesProviderDefaultLabels(job, bucket, topic, randStr), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_fullupdate", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowFlexTemplateJob_dataflowFlexTemplateJobFull(job, bucket, topic, randStr), + Check: resource.ComposeTestCheckFunc( + testAccDataflowFlexJobExists(t, "google_dataflow_flex_template_job.flex_job_fullupdate", false), + ), + }, + { + ResourceName: "google_dataflow_flex_template_job.flex_job_fullupdate", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "container_spec_gcs_path", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowJob_withAttributionLabelCreationOnly(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + add := "true" + strategy := "CREATION_ONLY" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_attributionLabelCreate(bucket, job, add, strategy), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.goog-terraform-provisioned", "true"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "5"), // Includes 3 server generated labels + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowJob_attributionLabelUpdate(bucket, job, add, strategy), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.goog-terraform-provisioned", "true"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "5"), // Includes 3 server generated labels + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowJob_withAttributionLabelProactive(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + strategy := "PROACTIVE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_attributionLabelCreate(bucket, job, "false", strategy), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.user_label", "foo"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "4"), // Includes 3 server generated labels + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowJob_attributionLabelUpdate(bucket, job, "true", strategy), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "2"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.goog-terraform-provisioned", "true"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.user_label", "bar"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "5"), // Includes 3 server generated labels + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + }, + }) +} + +// Test implementation of enabling streaming engine via parameters or via argument in resource block +// NOTE: these fields are immutable, so the resource is being recreated between both test steps. +func TestAccDataflowFlexTemplateJob_enableStreamingEngine(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + job := "tf-test-dataflow-job-" + randStr + bucket := "tf-test-dataflow-bucket-" + randStr + topic := "tf-test-topic" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowFlexTemplateJob_enableStreamingEngine_param(job, bucket, topic), + Check: resource.ComposeTestCheckFunc( + // Is set + resource.TestCheckResourceAttr("google_dataflow_flex_template_job.flex_job", "parameters.enableStreamingEngine", "true"), + // Is not set + resource.TestCheckNoResourceAttr("google_dataflow_flex_template_job.flex_job", "enable_streaming_engine"), + ), + }, + { + Config: testAccDataflowFlexTemplateJob_enableStreamingEngine_field(job, bucket, topic), + Check: resource.ComposeTestCheckFunc( + // Now is unset + resource.TestCheckNoResourceAttr("google_dataflow_flex_template_job.flex_job", "parameters.enableStreamingEngine"), + // Now is set + resource.TestCheckResourceAttr("google_dataflow_flex_template_job.flex_job", "enable_streaming_engine", "true"), + ), + }, + }, + }) +} + +func testAccDataflowFlexTemplateJobHasNetwork(t *testing.T, res, expected string, wait bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + instanceTmpl, err := testAccDataflowFlexTemplateGetGeneratedInstanceTemplate(t, s, res) + if err != nil { + return fmt.Errorf("Error getting dataflow job instance template: %s", err) + } + if len(instanceTmpl.Properties.NetworkInterfaces) == 0 { + return fmt.Errorf("no network interfaces in template properties: %+v", instanceTmpl.Properties) + } + actual := instanceTmpl.Properties.NetworkInterfaces[0].Network + if tpgresource.GetResourceNameFromSelfLink(actual) != tpgresource.GetResourceNameFromSelfLink(expected) { + return fmt.Errorf("network mismatch: %s != %s", actual, expected) + } + return nil + } +} + +func testAccDataflowFlexTemplateJobHasSubNetwork(t *testing.T, res, expected string, wait bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + instanceTmpl, err := testAccDataflowFlexTemplateGetGeneratedInstanceTemplate(t, s, res) + if err != nil { + return fmt.Errorf("Error getting dataflow job instance template: %s", err) + } + if len(instanceTmpl.Properties.NetworkInterfaces) == 0 { + return fmt.Errorf("no network interfaces in template properties: %+v", instanceTmpl.Properties) + } + actual := instanceTmpl.Properties.NetworkInterfaces[0].Subnetwork + if tpgresource.GetResourceNameFromSelfLink(actual) != tpgresource.GetResourceNameFromSelfLink(expected) { + return fmt.Errorf("subnetwork mismatch: %s != %s", actual, expected) + } + return nil + } +} + +func testAccDataflowFlexTemplateJobHasAdditionalExperiments(t *testing.T, res string, experiments []string, wait bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[res] + if !ok { + return fmt.Errorf("resource %q not found in state", res) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := acctest.GoogleProviderConfig(t) + + job, err := config.NewDataflowClient(config.UserAgent).Projects.Jobs.Get(config.Project, rs.Primary.ID).View("JOB_VIEW_ALL").Do() + if err != nil { + return fmt.Errorf("dataflow job does not exist") + } + + for _, expectedExperiment := range experiments { + var contains = false + for _, actualExperiment := range job.Environment.Experiments { + if actualExperiment == expectedExperiment { + contains = true + } + } + if contains != true { + return fmt.Errorf("Expected experiment '%s' not found in experiments", expectedExperiment) + } + } + + return nil + } +} + +func testAccDataflowFlexTemplateGetGeneratedInstanceTemplate(t *testing.T, s *terraform.State, res string) (*compute.InstanceTemplate, error) { + rs, ok := s.RootModule().Resources[res] + if !ok { + return nil, fmt.Errorf("resource %q not in state", res) + } + if rs.Primary.ID == "" { + return nil, fmt.Errorf("resource %q does not have an ID set", res) + } + filter := fmt.Sprintf("properties.labels.dataflow_job_id = %s", rs.Primary.ID) + + config := acctest.GoogleProviderConfig(t) + + var instanceTemplate *compute.InstanceTemplate + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + instanceTemplates, rerr := config.NewComputeClient(config.UserAgent).RegionInstanceTemplates. + List(config.Project, config.Region). + Filter(filter). + MaxResults(2). + Fields("items/properties").Do() + if rerr != nil { + return resource.NonRetryableError(rerr) + } + if len(instanceTemplates.Items) == 0 { + return resource.RetryableError(fmt.Errorf("no instance template found for dataflow job %q", rs.Primary.ID)) + } + if len(instanceTemplates.Items) > 1 { + return resource.NonRetryableError(fmt.Errorf("Wrong number of matching instance templates for dataflow job: %s, %d", rs.Primary.ID, len(instanceTemplates.Items))) + } + instanceTemplate = instanceTemplates.Items[0] + if instanceTemplate == nil || instanceTemplate.Properties == nil { + return resource.NonRetryableError(fmt.Errorf("invalid instance template has no properties")) + } + return nil + }) + if err != nil { + return nil, err + } + return instanceTemplate, nil +} + +func testAccDataflowFlexTemplateJob_basic(job, bucket, topicName string) string { + return fmt.Sprintf(` + +resource "google_pubsub_topic" "example" { + name = "%s" +} + +data "google_storage_bucket_object" "flex_template" { + name = "latest/flex/Streaming_Data_Generator" + bucket = "dataflow-templates" +} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US-CENTRAL1" + force_destroy = true + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "schema" { + name = "schema.json" + bucket = google_storage_bucket.bucket.name + content = < "" + if strings.HasPrefix(k, resourceDataflowJobGoogleProvidedLabelPrefix) && new == "" { + // Suppress diff if field is a Google Dataflow-provided label key and has no explicitly set value in Config. + return true + } + + // Let diff be determined by labels (above) + if strings.HasPrefix(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +func ResourceDataflowJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDataflowJobCreate, + Read: resourceDataflowJobRead, + Update: resourceDataflowJobUpdateByReplacement, + Delete: resourceDataflowJobDelete, + Timeouts: &schema.ResourceTimeout{ + Update: schema.DefaultTimeout(10 * time.Minute), + }, + CustomizeDiff: customdiff.All( + tpgresource.SetLabelsDiff, + resourceDataflowJobTypeCustomizeDiff, + ), + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceDataflowJobResourceV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceDataflowJobStateUpgradeV0, + Version: 0, + }, + }, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `A unique name for the resource, required by Dataflow.`, + }, + + "template_gcs_path": { + Type: schema.TypeString, + Required: true, + Description: `The Google Cloud Storage path to the Dataflow job template.`, + }, + + "temp_gcs_location": { + Type: schema.TypeString, + Required: true, + Description: `A writeable location on Google Cloud Storage for the Dataflow job to dump its temporary data.`, + }, + + "zone": { + Type: schema.TypeString, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The zone in which the created job should run. If it is not provided, the provider zone is used.`, + }, + + "region": { + Type: schema.TypeString, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The region in which the created job should run.`, + }, + + "max_workers": { + Type: schema.TypeInt, + Optional: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The number of workers permitted to work on the job. More workers may improve processing speed at additional cost.`, + }, + + "parameters": { + Type: schema.TypeMap, + Optional: true, + Description: `Key/Value pairs to be passed to the Dataflow job (as used in the template).`, + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `User labels to be specified for the job. Keys and values should follow the restrictions specified in the labeling restrictions page. NOTE: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "transform_name_mapping": { + Type: schema.TypeMap, + Optional: true, + Description: `Only applicable when updating a pipeline. Map of transform name prefixes of the job to be replaced with the corresponding name prefixes of the new job.`, + }, + + "on_delete": { + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"cancel", "drain"}, false), + Optional: true, + Default: "drain", + Description: `One of "drain" or "cancel". Specifies behavior of deletion during terraform destroy.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + // ForceNew applies to both stream and batch jobs + ForceNew: true, + Description: `The project in which the resource belongs.`, + }, + + "state": { + Type: schema.TypeString, + Computed: true, + Description: `The current state of the resource, selected from the JobState enum.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of this job, selected from the JobType enum.`, + }, + "service_account_email": { + Type: schema.TypeString, + Optional: true, + Description: `The Service Account email used to create the job.`, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The network to which VMs will be assigned. If it is not provided, "default" will be used.`, + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: `The subnetwork to which VMs will be assigned. Should be of the form "regions/REGION/subnetworks/SUBNETWORK".`, + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + Description: `The machine type to use for the job.`, + }, + + "kms_key_name": { + Type: schema.TypeString, + Optional: true, + Description: `The name for the Cloud KMS key for the job. Key format is: projects/PROJECT_ID/locations/LOCATION/keyRings/KEY_RING/cryptoKeys/KEY`, + }, + + "ip_configuration": { + Type: schema.TypeString, + Optional: true, + Description: `The configuration for VM IPs. Options are "WORKER_IP_PUBLIC" or "WORKER_IP_PRIVATE".`, + ValidateFunc: validation.StringInSlice([]string{"WORKER_IP_PUBLIC", "WORKER_IP_PRIVATE", ""}, false), + }, + + "additional_experiments": { + Type: schema.TypeSet, + Optional: true, + Computed: true, + Description: `List of experiments that should be used by the job. An example value is ["enable_stackdriver_agent_metrics"].`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "job_id": { + Type: schema.TypeString, + Computed: true, + Description: `The unique ID of this job.`, + }, + + "enable_streaming_engine": { + Type: schema.TypeBool, + Optional: true, + Description: `Indicates if the job should use the streaming engine feature.`, + }, + + "skip_wait_on_job_termination": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If true, treat DRAINING and CANCELLING as terminal job states and do not wait for further changes before removing from terraform state and moving on. WARNING: this will lead to job name conflicts if you do not ensure that the job names are different, e.g. by embedding a release ID or by using a random_id.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceDataflowJobTypeCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + // All non-virtual fields are ForceNew for batch jobs + if d.Get("type") == "JOB_TYPE_BATCH" { + resourceSchema := ResourceDataflowJob().Schema + for field := range resourceSchema { + if field == "on_delete" { + continue + } + + if field != "terraform_labels" && d.HasChange(field) { + if err := d.ForceNew(field); err != nil { + return err + } + } + } + } + + return nil +} + +// return true if a job is in a terminal state, OR if a job is in a +// terminating state and skipWait is true +func shouldStopDataflowJobDeleteQuery(state string, skipWait bool) bool { + _, stopQuery := DataflowTerminalStatesMap[state] + if !stopQuery && skipWait { + _, stopQuery = DataflowTerminatingStatesMap[state] + } + return stopQuery +} + +func resourceDataflowJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + params := tpgresource.ExpandStringMap(d, "parameters") + + env, err := resourceDataflowJobSetupEnv(d, config) + if err != nil { + return err + } + + request := dataflow.CreateJobFromTemplateRequest{ + JobName: d.Get("name").(string), + GcsPath: d.Get("template_gcs_path").(string), + Parameters: params, + Environment: &env, + } + + job, err := resourceDataflowJobCreateJob(config, project, region, userAgent, &request) + if err != nil { + return err + } + d.SetId(job.Id) + + return resourceDataflowJobRead(d, meta) +} + +func resourceDataflowJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + + job, err := resourceDataflowJobGetJob(config, project, region, userAgent, id) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataflow job %s", id)) + } + + if err := d.Set("job_id", job.Id); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + if err := d.Set("state", job.CurrentState); err != nil { + return fmt.Errorf("Error setting state: %s", err) + } + if err := d.Set("name", job.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("type", job.Type); err != nil { + return fmt.Errorf("Error setting type: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := tpgresource.SetLabels(job.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(job.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", job.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) + } + if job.Environment == nil { + return fmt.Errorf("Error accessing Environment proto: proto is nil") + } + if err := d.Set("kms_key_name", job.Environment.ServiceKmsKeyName); err != nil { + return fmt.Errorf("Error setting kms_key_name: %s", err) + } + + // This map isn't provided on all responses. It's not clear why but we only want to set these fields + // if the API returns. Otherwise this execution will crash for the user. + // https://github.com/hashicorp/terraform-provider-google/issues/7449 + sdkPipelineOptions, err := tpgresource.ConvertToMap(job.Environment.SdkPipelineOptions) + if err == nil { + optionsMap := sdkPipelineOptions["options"].(map[string]interface{}) + if err := d.Set("template_gcs_path", optionsMap["templateLocation"]); err != nil { + return fmt.Errorf("Error setting template_gcs_path: %s", err) + } + if err := d.Set("temp_gcs_location", optionsMap["tempLocation"]); err != nil { + return fmt.Errorf("Error setting temp_gcs_location: %s", err) + } + if err := d.Set("machine_type", optionsMap["machineType"]); err != nil { + return fmt.Errorf("Error setting machine_type: %s", err) + } + if err := d.Set("network", optionsMap["network"]); err != nil { + return fmt.Errorf("Error setting network: %s", err) + } + if err := d.Set("service_account_email", optionsMap["serviceAccountEmail"]); err != nil { + return fmt.Errorf("Error setting service_account_email: %s", err) + } + } + + if ok := shouldStopDataflowJobDeleteQuery(job.CurrentState, d.Get("skip_wait_on_job_termination").(bool)); ok { + log.Printf("[DEBUG] Removing resource '%s' because it is in state %s.\n", job.Name, job.CurrentState) + d.SetId("") + return nil + } + d.SetId(job.Id) + + return nil +} + +// Stream update method. Batch job changes should have been set to ForceNew via custom diff +func resourceDataflowJobUpdateByReplacement(d *schema.ResourceData, meta interface{}) error { + // Don't send an update request if only virtual fields have changes + if resourceDataflowJobIsVirtualUpdate(d, ResourceDataflowJob().Schema) { + return nil + } + + if jobHasUpdate(d, ResourceDataflowJob().Schema) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + params := tpgresource.ExpandStringMap(d, "parameters") + tnamemapping := tpgresource.ExpandStringMap(d, "transform_name_mapping") + + env, err := resourceDataflowJobSetupEnv(d, config) + if err != nil { + return err + } + + request := dataflow.LaunchTemplateParameters{ + JobName: d.Get("name").(string), + Parameters: params, + TransformNameMapping: tnamemapping, + Environment: &env, + Update: true, + } + + var response *dataflow.LaunchTemplateResponse + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (updateErr error) { + response, updateErr = resourceDataflowJobLaunchTemplate(config, project, region, userAgent, d.Get("template_gcs_path").(string), &request) + return updateErr + }, + Timeout: time.Minute*time.Duration(5), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsDataflowJobUpdateRetryableError}, + }) + if err != nil { + return err + } + + if err := waitForDataflowJobToBeUpdated(d, config, response.Job.Id, userAgent, d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("Error updating job with job ID %q: %v", d.Id(), err) + } + + d.SetId(response.Job.Id) + } + return resourceDataflowJobRead(d, meta) +} + +func resourceDataflowJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + id := d.Id() + + requestedState, err := resourceDataflowJobMapRequestedState(d.Get("on_delete").(string)) + if err != nil { + return err + } + + // Retry updating the state while the job is not ready to be canceled/drained. + err = retry.Retry(time.Minute*time.Duration(15), func() *retry.RetryError { + // To terminate a dataflow job, we update the job with a requested + // terminal state. + job := &dataflow.Job{ + RequestedState: requestedState, + } + + _, updateErr := resourceDataflowJobUpdateJob(config, project, region, userAgent, id, job) + if updateErr != nil { + gerr, isGoogleErr := updateErr.(*googleapi.Error) + if !isGoogleErr { + // If we have an error and it's not a google-specific error, we should go ahead and return. + return retry.NonRetryableError(updateErr) + } + + if strings.Contains(gerr.Message, "not yet ready for canceling") { + // Retry cancelling job if it's not ready. + // Sleep to avoid hitting update quota with repeated attempts. + time.Sleep(5 * time.Second) + return retry.RetryableError(updateErr) + } + + if strings.Contains(gerr.Message, "Job has terminated") { + // Job has already been terminated, skip. + return nil + } + } + + return nil + }) + if err != nil { + return err + } + + // Wait for state to reach terminal state (canceled/drained/done plus cancelling/draining if skipWait) + skipWait := d.Get("skip_wait_on_job_termination").(bool) + ok := shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) + for !ok { + log.Printf("[DEBUG] Waiting for job with job state %q to terminate...", d.Get("state").(string)) + time.Sleep(5 * time.Second) + + err = resourceDataflowJobRead(d, meta) + if err != nil { + return fmt.Errorf("Error while reading job to see if it was properly terminated: %v", err) + } + ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait) + } + + // Only remove the job from state if it's actually successfully hit a final state. + if ok = shouldStopDataflowJobDeleteQuery(d.Get("state").(string), skipWait); ok { + log.Printf("[DEBUG] Removing dataflow job with final state %q", d.Get("state").(string)) + d.SetId("") + return nil + } + return fmt.Errorf("Unable to cancel the dataflow job '%s' - final state was %q.", d.Id(), d.Get("state").(string)) +} + +func resourceDataflowJobMapRequestedState(policy string) (string, error) { + switch policy { + case "cancel": + return "JOB_STATE_CANCELLED", nil + case "drain": + return "JOB_STATE_DRAINING", nil + default: + return "", fmt.Errorf("Invalid `on_delete` policy: %s", policy) + } +} + +func resourceDataflowJobCreateJob(config *transport_tpg.Config, project, region, userAgent string, request *dataflow.CreateJobFromTemplateRequest) (*dataflow.Job, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Templates.Create(project, request).Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Create(project, region, request).Do() +} + +func resourceDataflowJobGetJob(config *transport_tpg.Config, project, region, userAgent string, id string) (*dataflow.Job, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Jobs.Get(project, id).View("JOB_VIEW_ALL").Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Get(project, region, id).View("JOB_VIEW_ALL").Do() +} + +func resourceDataflowJobUpdateJob(config *transport_tpg.Config, project, region, userAgent string, id string, job *dataflow.Job) (*dataflow.Job, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Jobs.Update(project, id, job).Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Jobs.Update(project, region, id, job).Do() +} + +func resourceDataflowJobLaunchTemplate(config *transport_tpg.Config, project, region, userAgent string, gcsPath string, request *dataflow.LaunchTemplateParameters) (*dataflow.LaunchTemplateResponse, error) { + if region == "" { + return config.NewDataflowClient(userAgent).Projects.Templates.Launch(project, request).GcsPath(gcsPath).Do() + } + return config.NewDataflowClient(userAgent).Projects.Locations.Templates.Launch(project, region, request).GcsPath(gcsPath).Do() +} + +func resourceDataflowJobSetupEnv(d *schema.ResourceData, config *transport_tpg.Config) (dataflow.RuntimeEnvironment, error) { + zone, _ := tpgresource.GetZone(d, config) + + labels := tpgresource.ExpandStringMap(d, "effective_labels") + + additionalExperiments := tpgresource.ConvertStringSet(d.Get("additional_experiments").(*schema.Set)) + + env := dataflow.RuntimeEnvironment{ + MaxWorkers: int64(d.Get("max_workers").(int)), + Network: d.Get("network").(string), + ServiceAccountEmail: d.Get("service_account_email").(string), + Subnetwork: d.Get("subnetwork").(string), + TempLocation: d.Get("temp_gcs_location").(string), + MachineType: d.Get("machine_type").(string), + KmsKeyName: d.Get("kms_key_name").(string), + IpConfiguration: d.Get("ip_configuration").(string), + EnableStreamingEngine: d.Get("enable_streaming_engine").(bool), + AdditionalUserLabels: labels, + Zone: zone, + AdditionalExperiments: additionalExperiments, + } + return env, nil +} + +func resourceDataflowJobIterateMapForceNew(mapKey string, d *schema.ResourceDiff) error { + obj := d.Get(mapKey).(map[string]interface{}) + for k := range obj { + entrySchemaKey := mapKey + "." + k + if d.HasChange(entrySchemaKey) { + // ForceNew must be called on the parent map to trigger + if err := d.ForceNew(mapKey); err != nil { + return err + } + break + } + } + return nil +} + +func resourceDataflowJobIterateMapHasChange(mapKey string, d *schema.ResourceData) bool { + obj := d.Get(mapKey).(map[string]interface{}) + for k := range obj { + entrySchemaKey := mapKey + "." + k + if d.HasChange(entrySchemaKey) { + return true + } + } + return false +} + +func resourceDataflowJobIsVirtualUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { + // on_delete is the only virtual field + if d.HasChange("on_delete") { + for field := range resourceSchema { + if field == "on_delete" { + continue + } + + if d.HasChange(field) { + return false + } + } + // on_delete is changing, but nothing else + return true + } + + return false +} + +// If only fields on_delete, terraform_labels are changing, no update request is needed +func jobHasUpdate(d *schema.ResourceData, resourceSchema map[string]*schema.Schema) bool { + if d.HasChange("on_delete") || d.HasChange("labels") || d.HasChange("terraform_labels") { + for field := range resourceSchema { + if field == "on_delete" || field == "labels" || field == "terraform_labels" { + continue + } + + if d.HasChange(field) { + return true + } + } + // on_delete, or terraform_labels are changing, but nothing else + return false + } + + return true +} + +func waitForDataflowJobToBeUpdated(d *schema.ResourceData, config *transport_tpg.Config, replacementJobID, userAgent string, timeout time.Duration) error { + return retry.Retry(timeout, func() *retry.RetryError { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return retry.NonRetryableError(err) + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return retry.NonRetryableError(err) + } + + replacementJob, err := resourceDataflowJobGetJob(config, project, region, userAgent, replacementJobID) + if err != nil { + if transport_tpg.IsRetryableError(err, nil, nil) { + return retry.RetryableError(err) + } + return retry.NonRetryableError(err) + } + + state := replacementJob.CurrentState + switch state { + case "", "JOB_STATE_PENDING": + return retry.RetryableError(fmt.Errorf("the replacement job with ID %q has pending state %q.", replacementJobID, state)) + case "JOB_STATE_FAILED": + return retry.NonRetryableError(fmt.Errorf("the replacement job with ID %q failed with state %q.", replacementJobID, state)) + default: + log.Printf("[DEBUG] the replacement job with ID %q has state %q.", replacementJobID, state) + return nil + } + }) +} diff --git a/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_job_test.go.tmpl b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_job_test.go.tmpl new file mode 100644 index 000000000000..3f041d25d374 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataflow/go/resource_dataflow_job_test.go.tmpl @@ -0,0 +1,1316 @@ +package dataflow_test + +import ( + "fmt" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-provider-google/google/services/dataflow" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +const ( + testDataflowJobTemplateWordCountUrl = "gs://dataflow-templates/latest/Word_Count" + testDataflowJobSampleFileUrl = "gs://dataflow-samples/shakespeare/various.txt" + testDataflowJobTemplateTextToPubsub = "gs://dataflow-templates/latest/Stream_GCS_Text_to_Cloud_PubSub" +) + +func TestAccDataflowJob_basic(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + zone := "us-east5-b" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_zone(bucket, job, zone), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, + }, + }) +} + +func TestAccDataflowJobSkipWait_basic(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + zone := "us-east5-b" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJobSkipWait_zone(bucket, job, zone), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withRegion(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobRegionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_region(bucket, job), + Check: resource.ComposeTestCheckFunc( + testAccRegionalDataflowJobExists(t, "google_dataflow_job.big_data", "us-central1"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "region", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withServiceAccount(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + accountId := "tf-test-dataflow-sa" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_serviceAccount(bucket, job, accountId), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + testAccDataflowJobHasServiceAccount(t, "google_dataflow_job.big_data", accountId), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withNetwork(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + network := "tf-test-dataflow-net" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_network(bucket, job, network), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + testAccDataflowJobHasNetwork(t, "google_dataflow_job.big_data", network), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withSubnetwork(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + network := "tf-test-dataflow-net" + randStr + subnetwork := "tf-test-dataflow-subnet" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_subnetwork(bucket, job, network, subnetwork), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + testAccDataflowJobHasSubnetwork(t, "google_dataflow_job.big_data", subnetwork), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "subnetwork", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withLabels(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + key := "my-label" + value := "my-value" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_labels(bucket, job, key, value), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + testAccDataflowJobHasLabels(t, "google_dataflow_job.big_data", key), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDataflowJob_withProviderDefaultLabels(t *testing.T) { + // The test failed if VCR testing is enabled, because the cached provider config is used. + // With the cached provider config, any changes in the provider default labels will not be applied. + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + zone := "us-east5-b" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_withProviderDefaultLabels(bucket, job), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "2"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_key1", "default_value1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "6"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowJob_resourceLabelsOverridesProviderDefaultLabels(bucket, job), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "3"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_expiration_ms", "3600000"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "6"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + // The labels field in the state is decided by the configuration. + // During importing, the configuration is unavailable, so the labels field in the state after importing is empty. + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowJob_moveResourceLabelToProviderDefaultLabels(bucket, job), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "2"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_expiration_ms", "3600000"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "6"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowJob_resourceLabelsOverridesProviderDefaultLabels(bucket, job), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.%", "3"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_expiration_ms", "3600000"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "labels.default_key1", "value1"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.%", "3"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_key1", "value1"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.env", "foo"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "terraform_labels.default_expiration_ms", "3600000"), + + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "6"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "labels", "terraform_labels"}, + }, + { + Config: testAccDataflowJob_zone(bucket, job, zone), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckNoResourceAttr("google_dataflow_job.big_data", "labels.%"), + resource.TestCheckResourceAttr("google_dataflow_job.big_data", "effective_labels.%", "3"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withIpConfig(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_ipConfig(bucket, job), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "ip_configuration", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_withKmsKey(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + key_ring := "tf-test-dataflow-kms-ring-" + randStr + crypto_key := "tf-test-dataflow-kms-key-" + randStr + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + zone := "us-east5-b" + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + if acctest.BootstrapPSARole(t, "service-", "dataflow-service-producer-prod", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_kms(key_ring, crypto_key, bucket, job, zone), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.big_data"), + ), + }, + { + ResourceName: "google_dataflow_job.big_data", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "zone", "state"}, + }, + }, + }) +} +func TestAccDataflowJobWithAdditionalExperiments(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + randStr := acctest.RandString(t, 10) + bucket := "tf-test-dataflow-gcs-" + randStr + job := "tf-test-dataflow-job-" + randStr + additionalExperiments := []string{"enable_stackdriver_agent_metrics", "shuffle_mode=service"} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_additionalExperiments(bucket, job, additionalExperiments), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.with_additional_experiments"), + testAccDataflowJobHasExperiments(t, "google_dataflow_job.with_additional_experiments", additionalExperiments), + ), + }, + { + ResourceName: "google_dataflow_job.with_additional_experiments", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state", "additional_experiments"}, + }, + }, + }) +} + +func TestAccDataflowJob_streamUpdate(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + suffix := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_updateStream(suffix, "google_storage_bucket.bucket1.url"), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.pubsub_stream"), + ), + }, + { + Config: testAccDataflowJob_updateStream(suffix, "google_storage_bucket.bucket2.url"), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobHasTempLocation(t, "google_dataflow_job.pubsub_stream", "gs://tf-test-bucket2-"+suffix), + ), + }, + { + ResourceName: "google_dataflow_job.pubsub_stream", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "transform_name_mapping", "state"}, + }, + }, + }) +} + +func TestAccDataflowJob_virtualUpdate(t *testing.T) { + // Dataflow responses include serialized java classes and bash commands + // This makes body comparison infeasible + acctest.SkipIfVcr(t) + t.Parallel() + + suffix := acctest.RandString(t, 10) + + // If the update is virtual-only, the ID should remain the same after updating. + var id string + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataflowJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataflowJob_virtualUpdate(suffix, "drain"), + Check: resource.ComposeTestCheckFunc( + testAccDataflowJobExists(t, "google_dataflow_job.pubsub_stream"), + testAccDataflowSetId(t, "google_dataflow_job.pubsub_stream", &id), + ), + }, + { + Config: testAccDataflowJob_virtualUpdate(suffix, "cancel"), + Check: resource.ComposeTestCheckFunc( + testAccDataflowCheckId(t, "google_dataflow_job.pubsub_stream", &id), + resource.TestCheckResourceAttr("google_dataflow_job.pubsub_stream", "on_delete", "cancel"), + ), + }, + { + ResourceName: "google_dataflow_job.pubsub_stream", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"on_delete", "parameters", "skip_wait_on_job_termination", "state"}, + }, + }, + }) +} + +func testAccCheckDataflowJobDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_dataflow_job" { + continue + } + + config := acctest.GoogleProviderConfig(t) + job, err := config.NewDataflowClient(config.UserAgent).Projects.Jobs.Get(config.Project, rs.Primary.ID).Do() + if job != nil { + var ok bool + skipWait, err := strconv.ParseBool(rs.Primary.Attributes["skip_wait_on_job_termination"]) + if err != nil { + return fmt.Errorf("could not parse attribute: %v", err) + } + _, ok = dataflow.DataflowTerminalStatesMap[job.CurrentState] + if !ok && skipWait { + _, ok = dataflow.DataflowTerminatingStatesMap[job.CurrentState] + } + if !ok { + return fmt.Errorf("Job still present") + } + } else if err != nil { + return err + } + } + + return nil + } +} + +func testAccCheckDataflowJobRegionDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_dataflow_job" { + continue + } + config := acctest.GoogleProviderConfig(t) + job, err := config.NewDataflowClient(config.UserAgent).Projects.Locations.Jobs.Get(config.Project, "us-central1", rs.Primary.ID).Do() + if job != nil { + var ok bool + skipWait, err := strconv.ParseBool(rs.Primary.Attributes["skip_wait_on_job_termination"]) + if err != nil { + return fmt.Errorf("could not parse attribute: %v", err) + } + _, ok = dataflow.DataflowTerminalStatesMap[job.CurrentState] + if !ok && skipWait { + _, ok = dataflow.DataflowTerminatingStatesMap[job.CurrentState] + } + if !ok { + return fmt.Errorf("Job still present") + } + } else if err != nil { + return err + } + } + + return nil + } +} + +func testAccDataflowJobExists(t *testing.T, resource string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resource] + if !ok { + return fmt.Errorf("resource %q not in state", resource) + } + if rs.Primary.ID == "" { + return fmt.Errorf("no ID is set") + } + + config := acctest.GoogleProviderConfig(t) + _, err := config.NewDataflowClient(config.UserAgent).Projects.Jobs.Get(config.Project, rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("could not confirm Dataflow Job %q exists: %v", rs.Primary.ID, err) + } + + return nil + } +} + +func testAccDataflowSetId(t *testing.T, resource string, id *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resource] + if !ok { + return fmt.Errorf("resource %q not in state", resource) + } + + *id = rs.Primary.ID + return nil + } +} + +func testAccDataflowCheckId(t *testing.T, resource string, id *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resource] + if !ok { + return fmt.Errorf("resource %q not in state", resource) + } + + if rs.Primary.ID != *id { + return fmt.Errorf("ID did not match. Expected %s, received %s", *id, rs.Primary.ID) + } + return nil + } +} + +func testAccDataflowJobHasNetwork(t *testing.T, res, expected string) resource.TestCheckFunc { + return func(s *terraform.State) error { + instanceTmpl, err := testAccDataflowJobGetGeneratedInstanceTemplate(t, s, res) + if err != nil { + return fmt.Errorf("Error getting dataflow job instance template: %s", err) + } + if len(instanceTmpl.Properties.NetworkInterfaces) == 0 { + return fmt.Errorf("no network interfaces in template properties: %+v", instanceTmpl.Properties) + } + actual := instanceTmpl.Properties.NetworkInterfaces[0].Network + if tpgresource.GetResourceNameFromSelfLink(actual) != tpgresource.GetResourceNameFromSelfLink(expected) { + return fmt.Errorf("network mismatch: %s != %s", actual, expected) + } + return nil + } +} + +func testAccDataflowJobHasSubnetwork(t *testing.T, res, expected string) resource.TestCheckFunc { + return func(s *terraform.State) error { + instanceTmpl, err := testAccDataflowJobGetGeneratedInstanceTemplate(t, s, res) + if err != nil { + return fmt.Errorf("Error getting dataflow job instance template: %s", err) + } + if len(instanceTmpl.Properties.NetworkInterfaces) == 0 { + return fmt.Errorf("no network interfaces in template properties: %+v", instanceTmpl.Properties) + } + actual := instanceTmpl.Properties.NetworkInterfaces[0].Subnetwork + if tpgresource.GetResourceNameFromSelfLink(actual) != tpgresource.GetResourceNameFromSelfLink(expected) { + return fmt.Errorf("subnetwork mismatch: %s != %s", actual, expected) + } + return nil + } +} + +func testAccDataflowJobHasServiceAccount(t *testing.T, res, expectedId string) resource.TestCheckFunc { + return func(s *terraform.State) error { + instanceTmpl, err := testAccDataflowJobGetGeneratedInstanceTemplate(t, s, res) + if err != nil { + return fmt.Errorf("Error getting dataflow job instance template: %s", err) + } + accounts := instanceTmpl.Properties.ServiceAccounts + if len(accounts) != 1 { + return fmt.Errorf("Found multiple service accounts (%d) for dataflow job %q, expected 1", len(accounts), res) + } + actualId := strings.Split(accounts[0].Email, "@")[0] + if expectedId != actualId { + return fmt.Errorf("service account mismatch, expected account ID = %q, actual email = %q", expectedId, accounts[0].Email) + } + return nil + } +} + +func testAccDataflowJobGetGeneratedInstanceTemplate(t *testing.T, s *terraform.State, res string) (*compute.InstanceTemplate, error) { + rs, ok := s.RootModule().Resources[res] + if !ok { + return nil, fmt.Errorf("resource %q not in state", res) + } + if rs.Primary.ID == "" { + return nil, fmt.Errorf("resource %q does not have an ID set", res) + } + filter := fmt.Sprintf("properties.labels.dataflow_job_id = %s", rs.Primary.ID) + + config := acctest.GoogleProviderConfig(t) + + var instanceTemplate *compute.InstanceTemplate + + err := resource.Retry(1*time.Minute, func() *resource.RetryError { + instanceTemplates, rerr := config.NewComputeClient(config.UserAgent).RegionInstanceTemplates. + List(config.Project, config.Region). + Filter(filter). + MaxResults(2). + Fields("items/properties").Do() + if rerr != nil { + return resource.NonRetryableError(rerr) + } + if len(instanceTemplates.Items) == 0 { + return resource.RetryableError(fmt.Errorf("no instance template found for dataflow job %q", rs.Primary.ID)) + } + if len(instanceTemplates.Items) > 1 { + return resource.NonRetryableError(fmt.Errorf("Wrong number of matching instance templates for dataflow job: %s, %d", rs.Primary.ID, len(instanceTemplates.Items))) + } + instanceTemplate = instanceTemplates.Items[0] + if instanceTemplate == nil || instanceTemplate.Properties == nil { + return resource.NonRetryableError(fmt.Errorf("invalid instance template has no properties")) + } + return nil + }) + if err != nil { + return nil, err + } + return instanceTemplate, nil +} + +func testAccRegionalDataflowJobExists(t *testing.T, res, region string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[res] + if !ok { + return fmt.Errorf("resource %q not found in state", res) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := acctest.GoogleProviderConfig(t) + _, err := config.NewDataflowClient(config.UserAgent).Projects.Locations.Jobs.Get(config.Project, region, rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("Job does not exist") + } + + return nil + } +} + +func testAccDataflowJobHasLabels(t *testing.T, res, key string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[res] + if !ok { + return fmt.Errorf("resource %q not found in state", res) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := acctest.GoogleProviderConfig(t) + + job, err := config.NewDataflowClient(config.UserAgent).Projects.Jobs.Get(config.Project, rs.Primary.ID).Do() + if err != nil { + return fmt.Errorf("dataflow job does not exist") + } + + if job.Labels[key] != rs.Primary.Attributes["labels."+key] { + return fmt.Errorf("Labels do not match what is stored in state.") + } + + return nil + } +} + +func testAccDataflowJobHasExperiments(t *testing.T, res string, experiments []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[res] + if !ok { + return fmt.Errorf("resource %q not found in state", res) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := acctest.GoogleProviderConfig(t) + + job, err := config.NewDataflowClient(config.UserAgent).Projects.Jobs.Get(config.Project, rs.Primary.ID).View("JOB_VIEW_ALL").Do() + if err != nil { + return fmt.Errorf("dataflow job does not exist") + } + + for _, expectedExperiment := range experiments { + var contains = false + for _, actualExperiment := range job.Environment.Experiments { + if actualExperiment == expectedExperiment { + contains = true + } + } + if contains != true { + return fmt.Errorf("Expected experiment '%s' not found in experiments", expectedExperiment) + } + } + + return nil + } +} + +func testAccDataflowJobHasTempLocation(t *testing.T, res, targetLocation string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[res] + if !ok { + return fmt.Errorf("resource %q not found in state", res) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + config := acctest.GoogleProviderConfig(t) + + job, err := config.NewDataflowClient(config.UserAgent).Projects.Jobs.Get(config.Project, rs.Primary.ID).View("JOB_VIEW_ALL").Do() + if err != nil { + return fmt.Errorf("dataflow job does not exist") + } + sdkPipelineOptions, err := tpgresource.ConvertToMap(job.Environment.SdkPipelineOptions) + if err != nil { + return err + } + optionsMap := sdkPipelineOptions["options"].(map[string]interface{}) + + if optionsMap["tempLocation"] != targetLocation { + return fmt.Errorf("Temp locations do not match. Got %s while expecting %s", optionsMap["tempLocation"], targetLocation) + } + + return nil + } +} + +func testAccDataflowJob_zone(bucket, job, zone string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + zone = "%s" + + machine_type = "e2-standard-2" + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, zone, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJobSkipWait_zone(bucket, job, zone string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + zone = "%s" + + machine_type = "e2-standard-2" + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" + skip_wait_on_job_termination = true +} +`, bucket, job, zone, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_region(bucket, job string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + region = "us-central1" + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + + on_delete = "cancel" +} +`, bucket, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_network(bucket, job, network string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_compute_network" "net" { + name = "%s" + auto_create_subnetworks = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + network = google_compute_network.net.name + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, network, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_subnetwork(bucket, job, network, subnet string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_compute_network" "net" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnet" { + name = "%s" + ip_cidr_range = "10.2.0.0/16" + network = google_compute_network.net.self_link +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + subnetwork = google_compute_subnetwork.subnet.self_link + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, network, subnet, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_serviceAccount(bucket, job, accountId string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_service_account" "dataflow-sa" { + account_id = "%s" + display_name = "DataFlow Service Account" +} + +resource "google_storage_bucket_iam_member" "dataflow-gcs" { + bucket = google_storage_bucket.temp.name + role = "roles/storage.objectAdmin" + member = "serviceAccount:${google_service_account.dataflow-sa.email}" +} + +resource "google_project_iam_member" "dataflow-worker" { + project = data.google_project.project.project_id + role = "roles/dataflow.worker" + member = "serviceAccount:${google_service_account.dataflow-sa.email}" +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + depends_on = [ + google_storage_bucket_iam_member.dataflow-gcs, + google_project_iam_member.dataflow-worker + ] + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + + service_account_email = google_service_account.dataflow-sa.email +} +`, bucket, accountId, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_ipConfig(bucket, job string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + ip_configuration = "WORKER_IP_PRIVATE" + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_labels(bucket, job, labelKey, labelVal string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + labels = { + "%s" = "%s" + } + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, labelKey, labelVal, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_withProviderDefaultLabels(bucket, job string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + labels = { + env = "foo" + default_expiration_ms = 3600000 + } + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_resourceLabelsOverridesProviderDefaultLabels(bucket, job string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + } +} + +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + labels = { + env = "foo" + default_expiration_ms = 3600000 + default_key1 = "value1" + } + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_moveResourceLabelToProviderDefaultLabels(bucket, job string) string { + return fmt.Sprintf(` +provider "google" { + default_labels = { + default_key1 = "default_value1" + env = "foo" + } +} + +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + labels = { + default_expiration_ms = 3600000 + default_key1 = "value1" + } + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_kms(key_ring, crypto_key, bucket, job, zone string) string { + return fmt.Sprintf(` +resource "google_kms_key_ring" "keyring" { + name = "%s" + location = "global" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.keyring.id + rotation_period = "100000s" +} + +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "big_data" { + name = "%s" + + zone = "%s" + + machine_type = "e2-standard-2" + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + kms_key_name = google_kms_crypto_key.crypto_key.id + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, key_ring, crypto_key, bucket, job, zone, testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_additionalExperiments(bucket string, job string, experiments []string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "temp" { + name = "%s" + location = "US" + force_destroy = true +} + +resource "google_dataflow_job" "with_additional_experiments" { + name = "%s" + + additional_experiments = ["%s"] + + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.temp.url + parameters = { + inputFile = "%s" + output = "${google_storage_bucket.temp.url}/output" + } + on_delete = "cancel" +} +`, bucket, job, strings.Join(experiments, `", "`), testDataflowJobTemplateWordCountUrl, testDataflowJobSampleFileUrl) +} + +func testAccDataflowJob_updateStream(suffix, tempLocation string) string { + return fmt.Sprintf(` +resource "google_pubsub_topic" "topic" { + name = "tf-test-dataflow-job-%s" +} +resource "google_storage_bucket" "bucket1" { + name = "tf-test-bucket1-%s" + location = "US" + force_destroy = true +} +resource "google_storage_bucket" "bucket2" { + name = "tf-test-bucket2-%s" + location = "US" + force_destroy = true +} +resource "google_dataflow_job" "pubsub_stream" { + name = "tf-test-dataflow-job-%s" + template_gcs_path = "%s" + temp_gcs_location = %s + parameters = { + inputFilePattern = "${google_storage_bucket.bucket1.url}/*.json" + outputTopic = google_pubsub_topic.topic.id + } + transform_name_mapping = { + name = "test_job" + env = "test" + } + on_delete = "cancel" +} + `, suffix, suffix, suffix, suffix, testDataflowJobTemplateTextToPubsub, tempLocation) +} + +func testAccDataflowJob_virtualUpdate(suffix, onDelete string) string { + return fmt.Sprintf(` +resource "google_pubsub_topic" "topic" { + name = "tf-test-dataflow-job-%s" +} +resource "google_storage_bucket" "bucket" { + name = "tf-test-bucket-%s" + location = "US" + force_destroy = true +} +resource "google_dataflow_job" "pubsub_stream" { + name = "tf-test-dataflow-job-%s" + template_gcs_path = "%s" + temp_gcs_location = google_storage_bucket.bucket.url + parameters = { + inputFilePattern = "${google_storage_bucket.bucket.url}/*.json" + outputTopic = google_pubsub_topic.topic.id + } + on_delete = "%s" +} + `, suffix, suffix, suffix, testDataflowJobTemplateTextToPubsub, onDelete) +} diff --git a/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl b/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl new file mode 100644 index 000000000000..642456e8efd6 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataform/go/resource_dataform_repository_test.go.tmpl @@ -0,0 +1,128 @@ +package dataform_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataformRepository_updated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckDataformRepositoryDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataformRepository_basic(context), + }, + { + ResourceName: "google_dataform_repository.dataform_repository", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + { + Config: testAccDataformRepository_updated(context), + }, + { + ResourceName: "google_dataform_repository.dataform_repository", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + +func testAccDataformRepository_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_sourcerepo_repository" "git_repository" { + provider = google-beta + name = "my/repository%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret" { + provider = google-beta + secret_id = "secret" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "secret_version" { + provider = google-beta + secret = google_secret_manager_secret.secret.id + + secret_data = "tf-test-secret-data%{random_suffix}" +} + +resource "google_dataform_repository" "dataform_repository" { + provider = google-beta + name = "tf_test_dataform_repository%{random_suffix}" + + git_remote_settings { + url = google_sourcerepo_repository.git_repository.url + default_branch = "main" + authentication_token_secret_version = google_secret_manager_secret_version.secret_version.id + } + + workspace_compilation_overrides { + default_database = "database" + schema_suffix = "_suffix" + table_prefix = "prefix_" + } +} +`, context) +} + +func testAccDataformRepository_updated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_sourcerepo_repository" "git_repository" { + provider = google-beta + name = "my/repository%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret" { + provider = google-beta + secret_id = "secret" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "secret_version" { + provider = google-beta + secret = google_secret_manager_secret.secret.id + + secret_data = "tf-test-secret-data%{random_suffix}" +} + +resource "google_dataform_repository" "dataform_repository" { + provider = google-beta + name = "tf_test_dataform_repository%{random_suffix}" + + git_remote_settings { + url = google_sourcerepo_repository.git_repository.url + default_branch = "main" + authentication_token_secret_version = google_secret_manager_secret_version.secret_version.id + } + + workspace_compilation_overrides { + schema_suffix = "_suffix_v2" + table_prefix = "prefix_v2_" + } +} +`, context) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_cluster.go.tmpl b/mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_cluster.go.tmpl new file mode 100644 index 000000000000..d68c915afec8 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_cluster.go.tmpl @@ -0,0 +1,142 @@ +package dataproc + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/dataproc/v1" +) + +var IamDataprocClusterSchema = map[string]*schema.Schema{ + "cluster": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type DataprocClusterIamUpdater struct { + project string + region string + cluster string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewDataprocClusterUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + + return &DataprocClusterIamUpdater{ + project: project, + region: region, + cluster: d.Get("cluster").(string), + d: d, + Config: config, + }, nil +} + +func DataprocClusterIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseRegionalFieldValue("clusters", d.Id(), "project", "region", "zone", d, config, true) + if err != nil { + return err + } + + if err := d.Set("project", fv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", fv.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("cluster", fv.Name); err != nil { + return fmt.Errorf("Error setting cluster: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fv.RelativeLink()) + return nil +} + +func (u *DataprocClusterIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + req := &dataproc.GetIamPolicyRequest{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.GetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *DataprocClusterIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + req := &dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} + _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Clusters.SetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocClusterIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/regions/%s/clusters/%s", u.project, u.region, u.cluster) +} + +func (u *DataprocClusterIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-cluster-%s-%s-%s", u.project, u.region, u.cluster) +} + +func (u *DataprocClusterIamUpdater) DescribeResource() string { + return fmt.Sprintf("Dataproc Cluster %s/%s/%s", u.project, u.region, u.cluster) +} diff --git a/mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_job.go.tmpl b/mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_job.go.tmpl new file mode 100644 index 000000000000..234c87bc05be --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/iam_dataproc_job.go.tmpl @@ -0,0 +1,161 @@ +package dataproc + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/dataproc/v1" +) + +var IamDataprocJobSchema = map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, +} + +type DataprocJobIamUpdater struct { + project string + region string + jobId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewDataprocJobUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + project, err := tpgresource.GetProject(d, config) + if err != nil { + return nil, err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return nil, err + } + + if err := d.Set("project", project); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", region); err != nil { + return nil, fmt.Errorf("Error setting region: %s", err) + } + + return &DataprocJobIamUpdater{ + project: project, + region: region, + jobId: d.Get("job_id").(string), + d: d, + Config: config, + }, nil +} + +func DataprocJobIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + fv, err := tpgresource.ParseRegionalFieldValue("jobs", d.Id(), "project", "region", "zone", d, config, true) + if err != nil { + return err + } + + if err := d.Set("job_id", fv.Name); err != nil { + return fmt.Errorf("Error setting job_id: %s", err) + } + if err := d.Set("project", fv.Project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("region", fv.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + + // Explicitly set the id so imported resources have the same ID format as non-imported ones. + d.SetId(fv.RelativeLink()) + return nil +} + +func (u *DataprocJobIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + req := &dataproc.GetIamPolicyRequest{} + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + +{{/* GetIamPolicy signature changes between beta and GA clients */}} + p, err := u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.GetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := dataprocToResourceManagerPolicy(p) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *DataprocJobIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + dataprocPolicy, err := resourceManagerToDataprocPolicy(policy) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + req := &dataproc.SetIamPolicyRequest{Policy: dataprocPolicy} + _, err = u.Config.NewDataprocClient(userAgent).Projects.Regions.Jobs.SetIamPolicy(u.GetResourceId(), req).Do() + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *DataprocJobIamUpdater) GetResourceId() string { + return fmt.Sprintf("projects/%s/regions/%s/jobs/%s", u.project, u.region, u.jobId) +} + +func (u *DataprocJobIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-dataproc-job-%s-%s-%s", u.project, u.region, u.jobId) +} + +func (u *DataprocJobIamUpdater) DescribeResource() string { + return fmt.Sprintf("Dataproc Job %s/%s/%s", u.project, u.region, u.jobId) +} + +func resourceManagerToDataprocPolicy(p *cloudresourcemanager.Policy) (*dataproc.Policy, error) { + out := &dataproc.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a dataproc policy to a cloudresourcemanager policy: {{"{{"}}err{{"}}"}}", err) + } + return out, nil +} + +func dataprocToResourceManagerPolicy(p *dataproc.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a cloudresourcemanager policy to a dataproc policy: {{"{{"}}err{{"}}"}}", err) + } + return out, nil +} diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go new file mode 100644 index 000000000000..0be63ab32fb5 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_cluster_test.go @@ -0,0 +1,2561 @@ +package dataproc_test + +import ( + "fmt" + "net/http" + "reflect" + "regexp" + "strconv" + "strings" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "google.golang.org/api/googleapi" + + "google.golang.org/api/dataproc/v1" +) + +func TestAccDataprocCluster_missingZoneGlobalRegion1(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckDataproc_missingZoneGlobalRegion1(rnd), + ExpectError: regexp.MustCompile("zone is mandatory when region is set to 'global'"), + }, + }, + }) +} + +func TestAccDataprocCluster_missingZoneGlobalRegion2(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccCheckDataproc_missingZoneGlobalRegion2(rnd), + ExpectError: regexp.MustCompile("zone is mandatory when region is set to 'global'"), + }, + }, + }) +} + +func TestAccDataprocCluster_basic(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_basic(rnd), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + + // Default behaviour is for Dataproc to autogen or autodiscover a config bucket + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.bucket"), + + // Default behavior is for Dataproc to not use only internal IP addresses + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.internal_ip_only", "false"), + + // Expect 1 master instances with computed values + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.master_config.#", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.master_config.0.num_instances", "1"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.master_config.0.disk_config.0.boot_disk_size_gb"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.master_config.0.disk_config.0.num_local_ssds"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.master_config.0.disk_config.0.boot_disk_type"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.master_config.0.machine_type"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.master_config.0.instance_names.#", "1"), + + // Expect 2 worker instances with computed values + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.worker_config.#", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.worker_config.0.num_instances", "2"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.worker_config.0.disk_config.0.boot_disk_size_gb"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.worker_config.0.disk_config.0.num_local_ssds"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.worker_config.0.disk_config.0.boot_disk_type"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.basic", "cluster_config.0.worker_config.0.machine_type"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.worker_config.0.instance_names.#", "2"), + + // Expect 0 preemptible worker instances + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.preemptible_worker_config.#", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.preemptible_worker_config.0.num_instances", "0"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.preemptible_worker_config.0.instance_names.#", "0"), + ), + }, + }, + }) +} + +func TestAccDataprocVirtualCluster_basic(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + pid := envvar.GetTestProjectFromEnv() + version := "3.1-dataproc-7" + networkName := acctest.BootstrapSharedTestNetwork(t, "gke-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "gke-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocVirtualCluster_basic(pid, rnd, networkName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.virtual_cluster", &cluster), + + // Expect 1 dataproc on gke instances with computed values + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.#", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.#", "1"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.kubernetes_namespace"), + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.kubernetes_software_config.#", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.kubernetes_software_config.0.component_version.SPARK", version), + + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.gke_cluster_config.#", "1"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.gke_cluster_config.0.gke_cluster_target"), + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.gke_cluster_config.0.node_pool_target.#", "1"), + resource.TestCheckResourceAttrSet("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.gke_cluster_config.0.node_pool_target.0.node_pool"), + resource.TestCheckResourceAttr("google_dataproc_cluster.virtual_cluster", "virtual_cluster_config.0.kubernetes_cluster_config.0.gke_cluster_config.0.node_pool_target.0.roles.#", "1"), + testAccCheckDataprocGkeClusterNodePoolsHaveRoles(&cluster, "DEFAULT"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withAccelerators(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + + project := envvar.GetTestProjectFromEnv() + acceleratorType := "nvidia-tesla-t4" + zone := "us-central1-c" + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withAccelerators(rnd, acceleratorType, zone, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.accelerated_cluster", &cluster), + testAccCheckDataprocClusterAccelerator(&cluster, project, 1, 1), + ), + }, + }, + }) +} + +func testAccCheckDataprocAuxiliaryNodeGroupAccelerator(cluster *dataproc.Cluster, project string) resource.TestCheckFunc { + return func(s *terraform.State) error { + expectedUri := fmt.Sprintf("projects/%s/zones/.*/acceleratorTypes/nvidia-tesla-t4", project) + r := regexp.MustCompile(expectedUri) + + nodeGroup := cluster.Config.AuxiliaryNodeGroups[0].NodeGroup.NodeGroupConfig.Accelerators + if len(nodeGroup) != 1 { + return fmt.Errorf("Saw %d nodeGroup accelerator types instead of 1", len(nodeGroup)) + } + + matches := r.FindStringSubmatch(nodeGroup[0].AcceleratorTypeUri) + if len(matches) != 1 { + return fmt.Errorf("Saw %s master accelerator type instead of %s", nodeGroup[0].AcceleratorTypeUri, expectedUri) + } + return nil + } +} + +func testAccCheckDataprocClusterAccelerator(cluster *dataproc.Cluster, project string, masterCount int, workerCount int) resource.TestCheckFunc { + return func(s *terraform.State) error { + expectedUri := fmt.Sprintf("projects/%s/zones/.*/acceleratorTypes/nvidia-tesla-t4", project) + r := regexp.MustCompile(expectedUri) + + master := cluster.Config.MasterConfig.Accelerators + if len(master) != 1 { + return fmt.Errorf("Saw %d master accelerator types instead of 1", len(master)) + } + + if int(master[0].AcceleratorCount) != masterCount { + return fmt.Errorf("Saw %d master accelerators instead of %d", int(master[0].AcceleratorCount), masterCount) + } + + matches := r.FindStringSubmatch(master[0].AcceleratorTypeUri) + if len(matches) != 1 { + return fmt.Errorf("Saw %s master accelerator type instead of %s", master[0].AcceleratorTypeUri, expectedUri) + } + + worker := cluster.Config.WorkerConfig.Accelerators + if len(worker) != 1 { + return fmt.Errorf("Saw %d worker accelerator types instead of 1", len(worker)) + } + + if int(worker[0].AcceleratorCount) != workerCount { + return fmt.Errorf("Saw %d worker accelerators instead of %d", int(worker[0].AcceleratorCount), workerCount) + } + + matches = r.FindStringSubmatch(worker[0].AcceleratorTypeUri) + if len(matches) != 1 { + return fmt.Errorf("Saw %s worker accelerator type instead of %s", worker[0].AcceleratorTypeUri, expectedUri) + } + + return nil + } +} + +func TestAccDataprocCluster_withInternalIpOnlyTrueAndShieldedConfig(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withInternalIpOnlyTrueAndShieldedConfig(rnd), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + + // Testing behavior for Dataproc to use only internal IP addresses + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.internal_ip_only", "true"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.shielded_instance_config.0.enable_integrity_monitoring", "true"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.shielded_instance_config.0.enable_secure_boot", "true"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.shielded_instance_config.0.enable_vtpm", "true"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withMetadataAndTags(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withMetadataAndTags(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.metadata.foo", "bar"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.metadata.baz", "qux"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.tags.#", "4"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withMinNumInstances(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withMinNumInstances(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_min_num_instances", &cluster), + + resource.TestCheckResourceAttr("google_dataproc_cluster.with_min_num_instances", "cluster_config.0.worker_config.0.min_num_instances", "2"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withReservationAffinity(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withReservationAffinity(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.reservation_affinity.0.consume_reservation_type", "SPECIFIC_RESERVATION"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.reservation_affinity.0.key", "compute.googleapis.com/reservation-name"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.reservation_affinity.0.values.#", "1"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withDataprocMetricConfig(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withDataprocMetricConfig(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.dataproc_metric_config.0.metrics.#", "2"), + + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.dataproc_metric_config.0.metrics.0.metric_source", "HDFS"), + resource.TestCheckResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.dataproc_metric_config.0.metrics.0.metric_overrides.#", "1"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withNodeGroupAffinity(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withNodeGroupAffinity(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + + resource.TestMatchResourceAttr("google_dataproc_cluster.basic", "cluster_config.0.gce_cluster_config.0.node_group_affinity.0.node_group_uri", regexp.MustCompile("test-nodegroup")), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_singleNodeCluster(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_singleNodeCluster(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.single_node_cluster", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.single_node_cluster", "cluster_config.0.master_config.0.num_instances", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.single_node_cluster", "cluster_config.0.worker_config.0.num_instances", "0"), + + // We set the "dataproc:dataproc.allow.zero.workers" override property. + // GCP should populate the 'properties' value with this value, as well as many others + resource.TestCheckResourceAttrSet("google_dataproc_cluster.single_node_cluster", "cluster_config.0.software_config.0.properties.%"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_updatable(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_updatable(rnd, 2, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.updatable", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.master_config.0.num_instances", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.worker_config.0.num_instances", "2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.preemptible_worker_config.0.num_instances", "1")), + }, + { + Config: testAccDataprocCluster_updatable(rnd, 2, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.updatable", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.master_config.0.num_instances", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.worker_config.0.num_instances", "2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.preemptible_worker_config.0.num_instances", "0")), + }, + { + Config: testAccDataprocCluster_updatable(rnd, 3, 2), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.master_config.0.num_instances", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.worker_config.0.num_instances", "3"), + resource.TestCheckResourceAttr("google_dataproc_cluster.updatable", "cluster_config.0.preemptible_worker_config.0.num_instances", "2")), + }, + }, + }) +} + +func TestAccDataprocCluster_nonPreemptibleSecondary(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_nonPreemptibleSecondary(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.non_preemptible_secondary", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.non_preemptible_secondary", "cluster_config.0.preemptible_worker_config.0.preemptibility", "NON_PREEMPTIBLE"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_spotSecondary(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_spotSecondary(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.spot_secondary", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_secondary", "cluster_config.0.preemptible_worker_config.0.preemptibility", "SPOT"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_spotWithInstanceFlexibilityPolicy(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_spotWithInstanceFlexibilityPolicy(rnd), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.spot_with_instance_flexibility_policy", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_with_instance_flexibility_policy", "cluster_config.0.preemptible_worker_config.0.preemptibility", "SPOT"), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_with_instance_flexibility_policy", "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy.0.instance_selection_list.0.machine_types.0", "n2d-standard-2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.spot_with_instance_flexibility_policy", "cluster_config.0.preemptible_worker_config.0.instance_flexibility_policy.0.instance_selection_list.0.rank", "3"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_spotWithAuxiliaryNodeGroups(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withAuxiliaryNodeGroups(rnd), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_auxiliary_node_groups", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.roles.0", "DRIVER"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.num_instances", "2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.machine_type", "n1-standard-2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.min_cpu_platform", "Intel Haswell"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.disk_config.0.boot_disk_size_gb", "35"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.disk_config.0.boot_disk_type", "pd-standard"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.disk_config.0.num_local_ssds", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.disk_config.0.local_ssd_interface", "nvme"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group.0.node_group_config.0.accelerators.0.accelerator_count", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_auxiliary_node_groups", "cluster_config.0.auxiliary_node_groups.0.node_group_id", "node-group-id"), + testAccCheckDataprocAuxiliaryNodeGroupAccelerator(&cluster, project), + + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withStagingBucket(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + clusterName := fmt.Sprintf("tf-test-dproc-%s", rnd) + bucketName := fmt.Sprintf("%s-bucket", clusterName) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withStagingBucketAndCluster(clusterName, bucketName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_bucket", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_bucket", "cluster_config.0.staging_bucket", bucketName), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_bucket", "cluster_config.0.bucket", bucketName)), + }, + { + // Simulate destroy of cluster by removing it from definition, + // but leaving the storage bucket (should not be auto deleted) + Config: testAccDataprocCluster_withStagingBucketOnly(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocStagingBucketExists(t, bucketName), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withTempBucket(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + clusterName := fmt.Sprintf("tf-test-dproc-%s", rnd) + bucketName := fmt.Sprintf("%s-temp-bucket", clusterName) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withTempBucketAndCluster(clusterName, bucketName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_bucket", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_bucket", "cluster_config.0.temp_bucket", bucketName)), + }, + { + // Simulate destroy of cluster by removing it from definition, + // but leaving the temp bucket (should not be auto deleted) + Config: testAccDataprocCluster_withTempBucketOnly(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocTempBucketExists(t, bucketName), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withInitAction(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + bucketName := fmt.Sprintf("tf-test-dproc-%s-init-bucket", rnd) + objectName := "msg.txt" + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withInitAction(rnd, bucketName, objectName, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_init_action", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_init_action", "cluster_config.0.initialization_action.#", "2"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_init_action", "cluster_config.0.initialization_action.0.timeout_sec", "500"), + testAccCheckDataprocClusterInitActionSucceeded(t, bucketName, objectName), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withConfigOverrides(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + var cluster dataproc.Cluster + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withConfigOverrides(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_config_overrides", &cluster), + validateDataprocCluster_withConfigOverrides("google_dataproc_cluster.with_config_overrides", &cluster), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withServiceAcc(t *testing.T) { + t.Parallel() + + sa := "a" + acctest.RandString(t, 10) + saEmail := fmt.Sprintf("%s@%s.iam.gserviceaccount.com", sa, envvar.GetTestProjectFromEnv()) + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withServiceAcc(sa, rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists( + t, "google_dataproc_cluster.with_service_account", &cluster), + testAccCheckDataprocClusterHasServiceScopes(t, &cluster, + "https://www.googleapis.com/auth/cloud.useraccounts.readonly", + "https://www.googleapis.com/auth/devstorage.read_write", + "https://www.googleapis.com/auth/logging.write", + "https://www.googleapis.com/auth/monitoring", + ), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_service_account", "cluster_config.0.gce_cluster_config.0.service_account", saEmail), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withImageVersion(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + version := "2.0.35-debian10" + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withImageVersion(rnd, version, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_image_version", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_image_version", "cluster_config.0.software_config.0.image_version", version), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withOptionalComponents(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withOptionalComponents(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_opt_components", &cluster), + testAccCheckDataprocClusterHasOptionalComponents(&cluster, "ZOOKEEPER", "DOCKER"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withLifecycleConfigIdleDeleteTtl(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withLifecycleConfigIdleDeleteTtl(rnd, "600s", subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_lifecycle_config", &cluster), + ), + }, + { + Config: testAccDataprocCluster_withLifecycleConfigIdleDeleteTtl(rnd, "610s", subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_lifecycle_config", &cluster), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withLifecycleConfigAutoDeletion(t *testing.T) { + // Uses time.Now + acctest.SkipIfVcr(t) + t.Parallel() + + rnd := acctest.RandString(t, 10) + now := time.Now() + fmtString := "2006-01-02T15:04:05.072Z" + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withLifecycleConfigAutoDeletionTime(rnd, now.Add(time.Hour * 10).Format(fmtString), subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_lifecycle_config", &cluster), + ), + }, + { + Config: testAccDataprocCluster_withLifecycleConfigAutoDeletionTime(rnd, now.Add(time.Hour * 20).Format(fmtString), subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_lifecycle_config", &cluster), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withLabels(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + var cluster dataproc.Cluster + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withoutLabels(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_labels", &cluster), + + resource.TestCheckNoResourceAttr("google_dataproc_cluster.with_labels", "labels.%"), + // We don't provide any, but GCP adds three and goog-dataproc-autozone is added internally, so expect 4. + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "effective_labels.%", "4"), + ), + }, + { + Config: testAccDataprocCluster_withLabels(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_labels", &cluster), + + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "labels.%", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "labels.key1", "value1"), + // We only provide one, but GCP adds three and goog-dataproc-autozone is added internally, so expect 5. + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "effective_labels.%", "5"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "effective_labels.key1", "value1"), + ), + }, + { + Config: testAccDataprocCluster_withLabelsUpdate(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_labels", &cluster), + + // We only provide two, so expect 2. + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "labels.%", "1"), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "labels.key2", "value2"), + ), + }, + { + Config: testAccDataprocCluster_withoutLabels(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_labels", &cluster), + + resource.TestCheckNoResourceAttr("google_dataproc_cluster.with_labels", "labels.%"), + // We don't provide any, but GCP adds three and goog-dataproc-autozone is added internally, so expect 4. + resource.TestCheckResourceAttr("google_dataproc_cluster.with_labels", "effective_labels.%", "4"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withNetworkRefs(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + var c1, c2 dataproc.Cluster + rnd := acctest.RandString(t, 10) + netName := fmt.Sprintf(`dproc-cluster-test-%s-net`, rnd) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withNetworkRefs(rnd, netName), + Check: resource.ComposeTestCheckFunc( + // successful creation of the clusters is good enough to assess it worked + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_net_ref_by_url", &c1), + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_net_ref_by_name", &c2), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withEndpointConfig(t *testing.T) { + t.Parallel() + + var cluster dataproc.Cluster + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withEndpointConfig(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_endpoint_config", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_endpoint_config", "cluster_config.0.endpoint_config.0.enable_http_port_access", "true"), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_KMS(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + kms := acctest.BootstrapKMSKey(t) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + if acctest.BootstrapPSARole(t, "service-", "compute-system", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_KMS(rnd, kms.CryptoKey.Name, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.kms", &cluster), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withKerberos(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + kms := acctest.BootstrapKMSKey(t) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withKerberos(rnd, kms.CryptoKey.Name, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.kerb", &cluster), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withAutoscalingPolicy(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + var cluster dataproc.Cluster + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withAutoscalingPolicy(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + testAccCheckDataprocClusterAutoscaling(t, &cluster, true), + ), + }, + { + Config: testAccDataprocCluster_removeAutoscalingPolicy(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.basic", &cluster), + testAccCheckDataprocClusterAutoscaling(t, &cluster, false), + ), + }, + }, + }) +} + +func TestAccDataprocCluster_withMetastoreConfig(t *testing.T) { + t.Parallel() + + pid := envvar.GetTestProjectFromEnv() + basicServiceId := "tf-test-metastore-srv-" + acctest.RandString(t, 10) + updateServiceId := "tf-test-metastore-srv-update-" + acctest.RandString(t, 10) + msName_basic := fmt.Sprintf("projects/%s/locations/us-central1/services/%s", pid, basicServiceId) + msName_update := fmt.Sprintf("projects/%s/locations/us-central1/services/%s", pid, updateServiceId) + + var cluster dataproc.Cluster + clusterName := "tf-test-" + acctest.RandString(t, 10) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocClusterDestroy(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocCluster_withMetastoreConfig(clusterName, basicServiceId), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_metastore_config", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_metastore_config", "cluster_config.0.metastore_config.0.dataproc_metastore_service",msName_basic), + + ), + }, + { + Config: testAccDataprocCluster_withMetastoreConfig_update(clusterName, updateServiceId), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocClusterExists(t, "google_dataproc_cluster.with_metastore_config", &cluster), + resource.TestCheckResourceAttr("google_dataproc_cluster.with_metastore_config", "cluster_config.0.metastore_config.0.dataproc_metastore_service", msName_update), + + ), + }, + }, + }) +} + +func testAccCheckDataprocClusterDestroy(t *testing.T) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_dataproc_cluster" { + continue + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Unable to verify delete of dataproc cluster, ID is empty") + } + + attributes := rs.Primary.Attributes + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + parts := strings.Split(rs.Primary.ID, "/") + clusterId := parts[len(parts)-1] + _, err = config.NewDataprocClient(config.UserAgent).Projects.Regions.Clusters.Get( + project, attributes["region"], clusterId).Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusNotFound { + return nil + } else if ok { + return fmt.Errorf("Error validating cluster deleted. Code: %d. Message: %s", gerr.Code, gerr.Message) + } + return fmt.Errorf("Error validating cluster deleted. %s", err.Error()) + } + return fmt.Errorf("Dataproc cluster still exists") + } + + return nil + } +} + +func testAccCheckDataprocClusterHasServiceScopes(t *testing.T, cluster *dataproc.Cluster, scopes ...string) func(s *terraform.State) error { + return func(s *terraform.State) error { + + if !reflect.DeepEqual(scopes, cluster.Config.GceClusterConfig.ServiceAccountScopes) { + return fmt.Errorf("Cluster does not contain expected set of service account scopes : %v : instead %v", + scopes, cluster.Config.GceClusterConfig.ServiceAccountScopes) + } + return nil + } +} + +func testAccCheckDataprocClusterAutoscaling(t *testing.T, cluster *dataproc.Cluster, expectAutoscaling bool) func(s *terraform.State) error { + return func(s *terraform.State) error { + if cluster.Config.AutoscalingConfig == nil && expectAutoscaling { + return fmt.Errorf("Cluster does not contain AutoscalingConfig, expected it would") + } else if cluster.Config.AutoscalingConfig != nil && !expectAutoscaling { + return fmt.Errorf("Cluster contains AutoscalingConfig, expected it not to") + } + + return nil + } +} + +func validateBucketExists(bucket string, config *transport_tpg.Config) (bool, error) { + _, err := config.NewStorageClient(config.UserAgent).Buckets.Get(bucket).Do() + + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == http.StatusNotFound { + return false, nil + } else if ok { + return false, fmt.Errorf("Error validating bucket exists: http code error : %d, http message error: %s", gerr.Code, gerr.Message) + } + return false, fmt.Errorf("Error validating bucket exists: %s", err.Error()) + } + return true, nil +} + +func testAccCheckDataprocStagingBucketExists(t *testing.T, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + config := acctest.GoogleProviderConfig(t) + + exists, err := validateBucketExists(bucketName, config) + if err != nil { + return err + } + if !exists { + return fmt.Errorf("Staging Bucket %s does not exist", bucketName) + } + return nil + } +} + +func testAccCheckDataprocTempBucketExists(t *testing.T, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + config := acctest.GoogleProviderConfig(t) + + exists, err := validateBucketExists(bucketName, config) + if err != nil { + return err + } + if !exists { + return fmt.Errorf("Temp Bucket %s does not exist", bucketName) + } + return nil + } +} + +func testAccCheckDataprocClusterHasOptionalComponents(cluster *dataproc.Cluster, components ...string) func(s *terraform.State) error { + return func(s *terraform.State) error { + + if !reflect.DeepEqual(components, cluster.Config.SoftwareConfig.OptionalComponents) { + return fmt.Errorf("Cluster does not contain expected optional components : %v : instead %v", + components, cluster.Config.SoftwareConfig.OptionalComponents) + } + return nil + } +} + +func testAccCheckDataprocClusterInitActionSucceeded(t *testing.T, bucket, object string) resource.TestCheckFunc { + + // The init script will have created an object in the specified bucket. + // Ensure it exists + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + _, err := config.NewStorageClient(config.UserAgent).Objects.Get(bucket, object).Do() + if err != nil { + return fmt.Errorf("Unable to verify init action success: Error reading object %s in bucket %s: %v", object, bucket, err) + } + + return nil + } +} + +func validateDataprocCluster_withConfigOverrides(n string, cluster *dataproc.Cluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + + type tfAndGCPTestField struct { + tfAttr string + expectedVal string + actualGCPVal string + } + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Terraform resource Not found: %s", n) + } + + if cluster.Config.MasterConfig == nil || cluster.Config.WorkerConfig == nil || cluster.Config.SecondaryWorkerConfig == nil { + return fmt.Errorf("Master/Worker/SecondaryConfig values not set in GCP, expecting values") + } + + clusterTests := []tfAndGCPTestField{ + {"cluster_config.0.master_config.0.num_instances", "3", strconv.Itoa(int(cluster.Config.MasterConfig.NumInstances))}, + {"cluster_config.0.master_config.0.disk_config.0.boot_disk_size_gb", "35", strconv.Itoa(int(cluster.Config.MasterConfig.DiskConfig.BootDiskSizeGb))}, + {"cluster_config.0.master_config.0.disk_config.0.num_local_ssds", "0", strconv.Itoa(int(cluster.Config.MasterConfig.DiskConfig.NumLocalSsds))}, + {"cluster_config.0.master_config.0.disk_config.0.boot_disk_type", "pd-ssd", cluster.Config.MasterConfig.DiskConfig.BootDiskType}, + {"cluster_config.0.master_config.0.disk_config.0.local_ssd_interface", "nvme", cluster.Config.MasterConfig.DiskConfig.LocalSsdInterface}, + {"cluster_config.0.master_config.0.machine_type", "n1-standard-2", tpgresource.GetResourceNameFromSelfLink(cluster.Config.MasterConfig.MachineTypeUri)}, + {"cluster_config.0.master_config.0.instance_names.#", "3", strconv.Itoa(len(cluster.Config.MasterConfig.InstanceNames))}, + {"cluster_config.0.master_config.0.min_cpu_platform", "Intel Skylake", cluster.Config.MasterConfig.MinCpuPlatform}, + + {"cluster_config.0.worker_config.0.num_instances", "3", strconv.Itoa(int(cluster.Config.WorkerConfig.NumInstances))}, + {"cluster_config.0.worker_config.0.disk_config.0.boot_disk_size_gb", "35", strconv.Itoa(int(cluster.Config.WorkerConfig.DiskConfig.BootDiskSizeGb))}, + {"cluster_config.0.worker_config.0.disk_config.0.num_local_ssds", "1", strconv.Itoa(int(cluster.Config.WorkerConfig.DiskConfig.NumLocalSsds))}, + {"cluster_config.0.worker_config.0.disk_config.0.boot_disk_type", "pd-standard", cluster.Config.WorkerConfig.DiskConfig.BootDiskType}, + {"cluster_config.0.worker_config.0.disk_config.0.local_ssd_interface", "scsi", cluster.Config.WorkerConfig.DiskConfig.LocalSsdInterface}, + {"cluster_config.0.worker_config.0.machine_type", "n1-standard-2", tpgresource.GetResourceNameFromSelfLink(cluster.Config.WorkerConfig.MachineTypeUri)}, + {"cluster_config.0.worker_config.0.instance_names.#", "3", strconv.Itoa(len(cluster.Config.WorkerConfig.InstanceNames))}, + {"cluster_config.0.worker_config.0.min_cpu_platform", "Intel Broadwell", cluster.Config.WorkerConfig.MinCpuPlatform}, + + {"cluster_config.0.preemptible_worker_config.0.num_instances", "1", strconv.Itoa(int(cluster.Config.SecondaryWorkerConfig.NumInstances))}, + {"cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_size_gb", "35", strconv.Itoa(int(cluster.Config.SecondaryWorkerConfig.DiskConfig.BootDiskSizeGb))}, + {"cluster_config.0.preemptible_worker_config.0.disk_config.0.num_local_ssds", "1", strconv.Itoa(int(cluster.Config.SecondaryWorkerConfig.DiskConfig.NumLocalSsds))}, + {"cluster_config.0.preemptible_worker_config.0.disk_config.0.boot_disk_type", "pd-ssd", cluster.Config.SecondaryWorkerConfig.DiskConfig.BootDiskType}, + {"cluster_config.0.preemptible_worker_config.0.disk_config.0.local_ssd_interface", "nvme", cluster.Config.SecondaryWorkerConfig.DiskConfig.LocalSsdInterface}, + {"cluster_config.0.preemptible_worker_config.0.instance_names.#", "1", strconv.Itoa(len(cluster.Config.SecondaryWorkerConfig.InstanceNames))}, + } + + for _, attrs := range clusterTests { + tfVal := rs.Primary.Attributes[attrs.tfAttr] + if tfVal != attrs.expectedVal { + return fmt.Errorf("%s: Terraform Attribute value '%s' is not as expected '%s' ", attrs.tfAttr, tfVal, attrs.expectedVal) + } + if attrs.actualGCPVal != tfVal { + return fmt.Errorf("%s: Terraform Attribute value '%s' is not aligned with that in GCP '%s' ", attrs.tfAttr, tfVal, attrs.actualGCPVal) + } + } + + return nil + } +} + +func testAccCheckDataprocClusterExists(t *testing.T, n string, cluster *dataproc.Cluster) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Terraform resource Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set for Dataproc cluster") + } + + config := acctest.GoogleProviderConfig(t) + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + parts := strings.Split(rs.Primary.ID, "/") + clusterId := parts[len(parts)-1] + found, err := config.NewDataprocClient(config.UserAgent).Projects.Regions.Clusters.Get( + project, rs.Primary.Attributes["region"], clusterId).Do() + if err != nil { + return err + } + + if found.ClusterName != clusterId { + return fmt.Errorf("Dataproc cluster %s not found, found %s instead", clusterId, cluster.ClusterName) + } + + *cluster = *found + + return nil + } +} + +func testAccCheckDataproc_missingZoneGlobalRegion1(rnd string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "global" +} +`, rnd) +} + +func testAccCheckDataproc_missingZoneGlobalRegion2(rnd string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "global" + + cluster_config { + gce_cluster_config { + network = "default" + } + } +} +`, rnd) +} + +func testAccDataprocCluster_basic(rnd string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "us-central1" +} +`, rnd) +} + +func testAccDataprocVirtualCluster_basic(projectID, rnd, networkName, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" { + project_id = "%s" +} + +resource "google_container_cluster" "primary" { + name = "tf-test-gke-%s" + location = "us-central1-a" + network = "%s" + subnetwork = "%s" + + initial_node_count = 1 + + workload_identity_config { + workload_pool = "${data.google_project.project.project_id}.svc.id.goog" + } + deletion_protection = false +} + +resource "google_project_iam_binding" "workloadidentity" { + project = "%s" + role = "roles/iam.workloadIdentityUser" + + members = [ + "serviceAccount:${data.google_project.project.project_id}.svc.id.goog[tf-test-dproc-%s/agent]", + "serviceAccount:${data.google_project.project.project_id}.svc.id.goog[tf-test-dproc-%s/spark-driver]", + "serviceAccount:${data.google_project.project.project_id}.svc.id.goog[tf-test-dproc-%s/spark-executor]", + ] +} + +resource "google_dataproc_cluster" "virtual_cluster" { + depends_on = [ + google_project_iam_binding.workloadidentity + ] + + name = "tf-test-dproc-%s" + region = "us-central1" + + virtual_cluster_config { + kubernetes_cluster_config { + kubernetes_namespace = "tf-test-dproc-%s" + kubernetes_software_config { + component_version = { + "SPARK": "3.1-dataproc-7", + } + } + gke_cluster_config { + gke_cluster_target = google_container_cluster.primary.id + node_pool_target { + node_pool = "tf-test-gke-np-%s" + roles = [ + "DEFAULT" + ] + } + } + } + } + } +`, projectID, rnd, networkName, subnetworkName, projectID, rnd, rnd, rnd, rnd, rnd, rnd) +} + +func testAccCheckDataprocGkeClusterNodePoolsHaveRoles(cluster *dataproc.Cluster, roles ...string) func(s *terraform.State) error { + return func(s *terraform.State) error { + + for _, nodePool := range cluster.VirtualClusterConfig.KubernetesClusterConfig.GkeClusterConfig.NodePoolTarget { + if reflect.DeepEqual(roles, nodePool.Roles) { + return nil + } + } + + return fmt.Errorf("Cluster NodePools does not contain expected roles : %v", roles) + } +} + +func testAccDataprocCluster_withAccelerators(rnd, acceleratorType, zone, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "accelerated_cluster" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + zone = "%s" + } + + master_config { + accelerators { + accelerator_type = "%s" + accelerator_count = "1" + } + } + + worker_config { + accelerators { + accelerator_type = "%s" + accelerator_count = "1" + } + } + } +} +`, rnd, subnetworkName, zone, acceleratorType, acceleratorType) +} + +func testAccDataprocCluster_withInternalIpOnlyTrueAndShieldedConfig(rnd string) string { + return fmt.Sprintf(` +variable "subnetwork_cidr" { + default = "10.0.0.0/16" +} + +resource "google_compute_network" "dataproc_network" { + name = "tf-test-dproc-net-%s" + auto_create_subnetworks = false +} + +# +# Create a subnet with Private IP Access enabled to test +# deploying a Dataproc cluster with Internal IP Only enabled. +# +resource "google_compute_subnetwork" "dataproc_subnetwork" { + name = "tf-test-dproc-subnet-%s" + ip_cidr_range = var.subnetwork_cidr + network = google_compute_network.dataproc_network.self_link + region = "us-central1" + private_ip_google_access = true +} + +# +# The default network within GCP already comes pre configured with +# certain firewall rules open to allow internal communication. As we +# are creating a new one here for this test, we need to additionally +# open up similar rules to allow the nodes to talk to each other +# internally as part of their configuration or this will just hang. +# +resource "google_compute_firewall" "dataproc_network_firewall" { + name = "tf-test-dproc-firewall-%s" + description = "Firewall rules for dataproc Terraform acceptance testing" + network = google_compute_network.dataproc_network.name + + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + ports = ["0-65535"] + } + + allow { + protocol = "udp" + ports = ["0-65535"] + } + + source_ranges = [var.subnetwork_cidr] +} + +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "us-central1" + depends_on = [google_compute_firewall.dataproc_network_firewall] + + cluster_config { + gce_cluster_config { + subnetwork = google_compute_subnetwork.dataproc_subnetwork.name + internal_ip_only = true + shielded_instance_config{ + enable_integrity_monitoring = true + enable_secure_boot = true + enable_vtpm = true + } + } + } +} +`, rnd, rnd, rnd, rnd) +} + +func testAccDataprocCluster_withMetadataAndTags(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + metadata = { + foo = "bar" + baz = "qux" + } + tags = ["my-tag", "your-tag", "our-tag", "their-tag"] + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withMinNumInstances(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_min_num_instances" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + master_config{ + num_instances=1 + } + worker_config{ + num_instances = 3 + min_num_instances = 2 + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withReservationAffinity(rnd, subnetworkName string) string { + return fmt.Sprintf(` + +resource "google_compute_reservation" "reservation" { + name = "tf-test-dproc-reservation-%s" + zone = "us-central1-f" + + specific_reservation { + count = 10 + instance_properties { + machine_type = "n1-standard-2" + } + } + specific_reservation_required = true +} + +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + master_config { + machine_type = "n1-standard-2" + } + + worker_config { + machine_type = "n1-standard-2" + } + + gce_cluster_config { + subnetwork = "%s" + zone = "us-central1-f" + reservation_affinity { + consume_reservation_type = "SPECIFIC_RESERVATION" + key = "compute.googleapis.com/reservation-name" + values = [google_compute_reservation.reservation.name] + } + } + } +} +`, rnd, rnd, subnetworkName) +} + +func testAccDataprocCluster_withDataprocMetricConfig(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + dataproc_metric_config { + metrics { + metric_source = "HDFS" + metric_overrides = ["yarn:ResourceManager:QueueMetrics:AppsCompleted"] + } + + metrics { + metric_source = "SPARK" + metric_overrides = ["spark:driver:DAGScheduler:job.allJobs"] + } + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withNodeGroupAffinity(rnd, subnetworkName string) string { + return fmt.Sprintf(` + +resource "google_compute_node_template" "nodetmpl" { + name = "test-nodetmpl-%s" + region = "us-central1" + + node_affinity_labels = { + tfacc = "test" + } + + node_type = "n1-node-96-624" + + cpu_overcommit_type = "ENABLED" +} + +resource "google_compute_node_group" "nodes" { + name = "test-nodegroup-%s" + zone = "us-central1-f" + + initial_size = 3 + node_template = google_compute_node_template.nodetmpl.self_link +} + +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + zone = "us-central1-f" + node_group_affinity { + node_group_uri = google_compute_node_group.nodes.name + } + } + } +} +`, rnd, rnd, rnd, subnetworkName) +} + +func testAccDataprocCluster_singleNodeCluster(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "single_node_cluster" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withConfigOverrides(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_config_overrides" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + master_config { + num_instances = 3 + machine_type = "n1-standard-2" // can't be e2 because of min_cpu_platform + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 35 + local_ssd_interface = "nvme" + } + min_cpu_platform = "Intel Skylake" + } + + worker_config { + num_instances = 3 + machine_type = "n1-standard-2" // can't be e2 because of min_cpu_platform + disk_config { + boot_disk_type = "pd-standard" + boot_disk_size_gb = 35 + num_local_ssds = 1 + local_ssd_interface = "scsi" + } + + min_cpu_platform = "Intel Broadwell" + } + + preemptible_worker_config { + num_instances = 1 + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 35 + num_local_ssds = 1 + local_ssd_interface = "nvme" + } + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withInitAction(rnd, bucket, objName, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "init_bucket" { + name = "%s" + location = "US" + force_destroy = "true" +} + +resource "google_storage_bucket_object" "init_script" { + name = "dproc-cluster-test-%s-init-script.sh" + bucket = google_storage_bucket.init_bucket.name + content = <> /tmp/%s +gsutil cp /tmp/%s ${google_storage_bucket.init_bucket.url} +EOL + +} + +resource "google_dataproc_cluster" "with_init_action" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + initialization_action { + script = "${google_storage_bucket.init_bucket.url}/${google_storage_bucket_object.init_script.name}" + timeout_sec = 500 + } + initialization_action { + script = "${google_storage_bucket.init_bucket.url}/${google_storage_bucket_object.init_script.name}" + } + } +} +`, bucket, rnd, objName, objName, rnd, subnetworkName) +} + +func testAccDataprocCluster_updatable(rnd string, w, p int) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "updatable" { + name = "tf-test-dproc-%s" + region = "us-central1" + graceful_decommission_timeout = "0.2s" + + cluster_config { + master_config { + num_instances = "1" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + worker_config { + num_instances = "%d" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + preemptible_worker_config { + num_instances = "%d" + disk_config { + boot_disk_size_gb = 35 + } + } + } +} +`, rnd, w, p) +} + +func testAccDataprocCluster_nonPreemptibleSecondary(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "non_preemptible_secondary" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + master_config { + num_instances = "1" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + worker_config { + num_instances = "2" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + preemptible_worker_config { + num_instances = "1" + preemptibility = "NON_PREEMPTIBLE" + disk_config { + boot_disk_size_gb = 35 + } + } + } +} + `, rnd, subnetworkName) +} + +func testAccDataprocCluster_spotSecondary(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "spot_secondary" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + master_config { + num_instances = "1" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + worker_config { + num_instances = "2" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + preemptible_worker_config { + num_instances = "1" + preemptibility = "SPOT" + disk_config { + boot_disk_size_gb = 35 + } + } + } +} + `, rnd, subnetworkName) +} + +func testAccDataprocCluster_spotWithInstanceFlexibilityPolicy(rnd string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "spot_with_instance_flexibility_policy" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + master_config { + num_instances = "1" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + worker_config { + num_instances = "2" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + preemptible_worker_config { + num_instances = "3" + preemptibility = "SPOT" + disk_config { + boot_disk_size_gb = 35 + } + instance_flexibility_policy { + instance_selection_list { + machine_types = ["n2d-standard-2"] + rank = 3 + } + } + } + } +} + `, rnd) +} + +func testAccDataprocCluster_withAuxiliaryNodeGroups(rnd string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_auxiliary_node_groups" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + master_config { + num_instances = "1" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + worker_config { + num_instances = "2" + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + auxiliary_node_groups{ + node_group_id="node-group-id" + node_group { + roles = ["DRIVER"] + node_group_config{ + num_instances=2 + machine_type="n1-standard-2" + min_cpu_platform = "Intel Haswell" + disk_config { + boot_disk_size_gb = 35 + boot_disk_type = "pd-standard" + num_local_ssds = 1 + local_ssd_interface = "nvme" + } + accelerators { + accelerator_count = 1 + accelerator_type = "nvidia-tesla-t4" + } + } + } + } + } +} + `, rnd) +} + +func testAccDataprocCluster_withStagingBucketOnly(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = "true" +} +`, bucketName) +} + +func testAccDataprocCluster_withTempBucketOnly(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = "true" +} +`, bucketName) +} + +func testAccDataprocCluster_withStagingBucketAndCluster(clusterName, bucketName, subnetworkName string) string { + return fmt.Sprintf(` +%s + +resource "google_dataproc_cluster" "with_bucket" { + name = "%s" + region = "us-central1" + + cluster_config { + staging_bucket = google_storage_bucket.bucket.name + + gce_cluster_config { + subnetwork = "%s" + } + + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + } +} +`, testAccDataprocCluster_withStagingBucketOnly(bucketName), clusterName, subnetworkName) +} + +func testAccDataprocCluster_withTempBucketAndCluster(clusterName, bucketName, subnetworkName string) string { + return fmt.Sprintf(` +%s + +resource "google_dataproc_cluster" "with_bucket" { + name = "%s" + region = "us-central1" + + cluster_config { + temp_bucket = google_storage_bucket.bucket.name + + gce_cluster_config { + subnetwork = "%s" + } + + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + } +} +`, testAccDataprocCluster_withTempBucketOnly(bucketName), clusterName, subnetworkName) +} + +func testAccDataprocCluster_withLabels(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_labels" { + name = "tf-test-dproc-%s" + region = "us-central1" + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + } + + labels = { + key1 = "value1" + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withLabelsUpdate(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_labels" { + name = "tf-test-dproc-%s" + region = "us-central1" + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + } + + labels = { + key2 = "value2" + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withoutLabels(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_labels" { + name = "tf-test-dproc-%s" + region = "us-central1" + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withEndpointConfig(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_endpoint_config" { + name = "tf-test-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + endpoint_config { + enable_http_port_access = true + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withImageVersion(rnd, version, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_image_version" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + software_config { + image_version = "%s" + } + } +} +`, rnd, subnetworkName, version) +} + +func testAccDataprocCluster_withOptionalComponents(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_opt_components" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + software_config { + optional_components = ["DOCKER", "ZOOKEEPER"] + } + } +} +`, rnd, subnetworkName) +} + +func testAccDataprocCluster_withLifecycleConfigIdleDeleteTtl(rnd, tm, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_lifecycle_config" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + lifecycle_config { + idle_delete_ttl = "%s" + } + } +} +`, rnd, subnetworkName, tm) +} + +func testAccDataprocCluster_withLifecycleConfigAutoDeletionTime(rnd, tm, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_lifecycle_config" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + lifecycle_config { + auto_delete_time = "%s" + } + } +} +`, rnd, subnetworkName, tm) +} + +func testAccDataprocCluster_withServiceAcc(sa, rnd, subnetworkName string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_service_account" "service_account" { + account_id = "%s" +} + +resource "google_project_iam_member" "service_account" { + project = data.google_project.project.project_id + role = "roles/dataproc.worker" + member = "serviceAccount:${google_service_account.service_account.email}" +} + +# Wait for IAM propagation +resource "time_sleep" "wait_120_seconds" { + depends_on = [google_project_iam_member.service_account] + + create_duration = "120s" +} + +resource "google_dataproc_cluster" "with_service_account" { + name = "dproc-cluster-test-%s" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + gce_cluster_config { + subnetwork = "%s" + service_account = google_service_account.service_account.email + service_account_scopes = [ + # User supplied scopes + "https://www.googleapis.com/auth/monitoring", + # The following scopes necessary for the cluster to function properly are + # always added, even if not explicitly specified: + # useraccounts-ro: https://www.googleapis.com/auth/cloud.useraccounts.readonly + # storage-rw: https://www.googleapis.com/auth/devstorage.read_write + # logging-write: https://www.googleapis.com/auth/logging.write + "useraccounts-ro", + "storage-rw", + "logging-write", + ] + } + } + + depends_on = [time_sleep.wait_120_seconds] +} +`, sa, rnd, subnetworkName) +} + +func testAccDataprocCluster_withNetworkRefs(rnd, netName string) string { + return fmt.Sprintf(` +resource "google_compute_network" "dataproc_network" { + name = "%s" + auto_create_subnetworks = true +} + +# +# The default network within GCP already comes pre configured with +# certain firewall rules open to allow internal communication. As we +# are creating a new one here for this test, we need to additionally +# open up similar rules to allow the nodes to talk to each other +# internally as part of their configuration or this will just hang. +# +resource "google_compute_firewall" "dataproc_network_firewall" { + name = "tf-test-dproc-%s" + description = "Firewall rules for dataproc Terraform acceptance testing" + network = google_compute_network.dataproc_network.name + source_ranges = ["192.168.0.0/16"] + + allow { + protocol = "icmp" + } + + allow { + protocol = "tcp" + ports = ["0-65535"] + } + + allow { + protocol = "udp" + ports = ["0-65535"] + } +} + +resource "google_dataproc_cluster" "with_net_ref_by_name" { + name = "tf-test-dproc-net-%s" + region = "us-central1" + depends_on = [google_compute_firewall.dataproc_network_firewall] + + cluster_config { + + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + gce_cluster_config { + network = google_compute_network.dataproc_network.name + } + } +} + +resource "google_dataproc_cluster" "with_net_ref_by_url" { + name = "tf-test-dproc-url-%s" + region = "us-central1" + depends_on = [google_compute_firewall.dataproc_network_firewall] + + cluster_config { + + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + machine_type = "e2-medium" + disk_config { + boot_disk_size_gb = 35 + } + } + + gce_cluster_config { + network = google_compute_network.dataproc_network.self_link + } + } +} +`, netName, rnd, rnd, rnd) +} + +func testAccDataprocCluster_KMS(rnd, kmsKey, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "kms" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + encryption_config { + kms_key_name = "%s" + } + } +} +`, rnd, subnetworkName, kmsKey) +} + +func testAccDataprocCluster_withKerberos(rnd, kmsKey, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-dproc-%s" + location = "US" +} +resource "google_storage_bucket_object" "password" { + name = "dataproc-password-%s" + bucket = google_storage_bucket.bucket.name + content = "hunter2" +} + +resource "google_dataproc_cluster" "kerb" { + name = "tf-test-dproc-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + security_config { + kerberos_config { + root_principal_password_uri = google_storage_bucket_object.password.self_link + kms_key_uri = "%s" + } + } + } +} +`, rnd, rnd, rnd, subnetworkName, kmsKey) +} + +func testAccDataprocCluster_withAutoscalingPolicy(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dataproc-policy-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + autoscaling_config { + policy_uri = google_dataproc_autoscaling_policy.asp.id + } + } +} + +resource "google_dataproc_autoscaling_policy" "asp" { + policy_id = "tf-test-dataproc-policy-%s" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} +`, rnd, subnetworkName, rnd) +} + +func testAccDataprocCluster_removeAutoscalingPolicy(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "tf-test-dataproc-policy-%s" + region = "us-central1" + + cluster_config { + gce_cluster_config { + subnetwork = "%s" + } + + autoscaling_config { + policy_uri = "" + } + } +} + +resource "google_dataproc_autoscaling_policy" "asp" { + policy_id = "tf-test-dataproc-policy-%s" + location = "us-central1" + + worker_config { + max_instances = 3 + } + + basic_algorithm { + yarn_config { + graceful_decommission_timeout = "30s" + scale_up_factor = 0.5 + scale_down_factor = 0.5 + } + } +} +`, rnd, subnetworkName, rnd) +} + +func testAccDataprocCluster_withMetastoreConfig(clusterName, serviceId string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_metastore_config" { + name = "%s" + region = "us-central1" + + cluster_config { + metastore_config { + dataproc_metastore_service = google_dataproc_metastore_service.ms.name + } + } +} + +resource "google_dataproc_metastore_service" "ms" { + service_id = "%s" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "3.1.2" + } +} +`, clusterName, serviceId) +} + +func testAccDataprocCluster_withMetastoreConfig_update(clusterName, serviceId string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "with_metastore_config" { + name = "%s" + region = "us-central1" + + cluster_config { + metastore_config { + dataproc_metastore_service = google_dataproc_metastore_service.ms.name + } + } +} + +resource "google_dataproc_metastore_service" "ms" { + service_id = "%s" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "3.1.2" + } +} +`, clusterName, serviceId) +} + diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job.go b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job.go new file mode 100644 index 000000000000..a60aade879d9 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job.go @@ -0,0 +1,1324 @@ +package dataproc + +import ( + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "google.golang.org/api/dataproc/v1" +) + +var jobTypes = []string{"pyspark_config", "spark_config", "hadoop_config", "hive_config", "pig_config", "sparksql_config", "presto_config"} + +func ResourceDataprocJob() *schema.Resource { + return &schema.Resource{ + Create: resourceDataprocJobCreate, + Update: resourceDataprocJobUpdate, + Read: resourceDataprocJobRead, + Delete: resourceDataprocJobDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + tpgresource.SetLabelsDiff, + ), + + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The project in which the cluster can be found and jobs subsequently run against. If it is not provided, the provider project is used.`, + }, + + // Ref: https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs#JobReference + "region": { + Type: schema.TypeString, + Optional: true, + Default: "global", + ForceNew: true, + Description: `The Cloud Dataproc region. This essentially determines which clusters are available for this job to be submitted to. If not specified, defaults to global.`, + }, + + // If a job is still running, trying to delete a job will fail. Setting + // this flag to true however will force the deletion by first cancelling + // the job and then deleting it + "force_delete": { + Type: schema.TypeBool, + Default: false, + Optional: true, + Description: `By default, you can only delete inactive jobs within Dataproc. Setting this to true, and calling destroy, will ensure that the job is first cancelled before issuing the delete.`, + }, + + "reference": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Description: `The reference of the job`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_id": { + Type: schema.TypeString, + Description: "The job ID, which must be unique within the project. The job ID is generated by the server upon job submission or provided by the user as a means to perform retries without creating duplicate jobs", + Optional: true, + ForceNew: true, + Computed: true, + ValidateFunc: verify.ValidateRegexp("^[a-zA-Z0-9_-]{1,100}$"), + }, + }, + }, + }, + + "placement": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: `The config of job placement.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Description: "The name of the cluster where the job will be submitted", + Required: true, + ForceNew: true, + }, + "cluster_uuid": { + Type: schema.TypeString, + Computed: true, + Description: "Output-only. A cluster UUID generated by the Cloud Dataproc service when the job is submitted", + }, + }, + }, + }, + + "status": { + Type: schema.TypeList, + Computed: true, + Description: `The status of the job.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "state": { + Type: schema.TypeString, + Description: "Output-only. A state message specifying the overall job state", + Computed: true, + }, + "details": { + Type: schema.TypeString, + Description: "Output-only. Optional job state details, such as an error description if the state is ERROR", + Computed: true, + }, + "state_start_time": { + Type: schema.TypeString, + Description: "Output-only. The time when this state was entered", + Computed: true, + }, + "substate": { + Type: schema.TypeString, + Description: "Output-only. Additional state information, which includes status reported by the agent", + Computed: true, + }, + }, + }, + }, + + "driver_output_resource_uri": { + Type: schema.TypeString, + Description: "Output-only. A URI pointing to the location of the stdout of the job's driver program", + Computed: true, + }, + + "driver_controls_files_uri": { + Type: schema.TypeString, + Description: "Output-only. If present, the location of miscellaneous control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as driver_output_uri.", + Computed: true, + }, + + "labels": { + Type: schema.TypeMap, + Description: `Optional. The labels to associate with this job. + + **Note**: This field is non-authoritative, and will only manage the labels present in your configuration. + Please refer to the field 'effective_labels' for all of the labels present on the resource.`, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "scheduling": { + Type: schema.TypeList, + Description: "Optional. Job scheduling configuration.", + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_failures_per_hour": { + Type: schema.TypeInt, + Description: "Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.", + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtMost(10), + }, + "max_failures_total": { + Type: schema.TypeInt, + Description: "Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed.", + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtMost(240), + }, + }, + }, + }, + + "pyspark_config": pySparkSchema, + "spark_config": sparkSchema, + "hadoop_config": hadoopSchema, + "hive_config": hiveSchema, + "pig_config": pigSchema, + "sparksql_config": sparkSqlSchema, + "presto_config": prestoSchema, + }, + UseJSONNumber: true, + } +} + +func resourceDataprocJobUpdate(d *schema.ResourceData, meta interface{}) error { + // The only updatable value is currently 'force_delete' which is a local + // only value therefore we don't need to make any GCP calls to update this. + + return resourceDataprocJobRead(d, meta) +} + +func resourceDataprocJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + clusterName := d.Get("placement.0.cluster_name").(string) + region := d.Get("region").(string) + + submitReq := &dataproc.SubmitJobRequest{ + Job: &dataproc.Job{ + Placement: &dataproc.JobPlacement{ + ClusterName: clusterName, + }, + Reference: &dataproc.JobReference{ + ProjectId: project, + }, + }, + } + + if v, ok := d.GetOk("reference.0.job_id"); ok { + submitReq.Job.Reference.JobId = v.(string) + } + + if v, ok := d.GetOk("scheduling"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.Scheduling = expandJobScheduling(config) + } + + if _, ok := d.GetOk("effective_labels"); ok { + submitReq.Job.Labels = tpgresource.ExpandEffectiveLabels(d) + } + + if v, ok := d.GetOk("pyspark_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.PysparkJob = expandPySparkJob(config) + } + + if v, ok := d.GetOk("spark_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.SparkJob = expandSparkJob(config) + } + + if v, ok := d.GetOk("hadoop_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.HadoopJob = expandHadoopJob(config) + } + + if v, ok := d.GetOk("hive_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.HiveJob = expandHiveJob(config) + } + + if v, ok := d.GetOk("pig_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.PigJob = expandPigJob(config) + } + + if v, ok := d.GetOk("sparksql_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.SparkSqlJob = expandSparkSqlJob(config) + } + + if v, ok := d.GetOk("presto_config"); ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + submitReq.Job.PrestoJob = expandPrestoJob(config) + } + + // Submit the job + job, err := config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Submit( + project, region, submitReq).Do() + if err != nil { + return err + } + d.SetId(fmt.Sprintf("projects/%s/regions/%s/jobs/%s", project, region, job.Reference.JobId)) + + waitErr := DataprocJobOperationWait(config, region, project, job.Reference.JobId, + "Creating Dataproc job", userAgent, d.Timeout(schema.TimeoutCreate)) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] Dataproc job %s has been submitted", job.Reference.JobId) + return resourceDataprocJobRead(d, meta) +} + +func resourceDataprocJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + region := d.Get("region").(string) + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + parts := strings.Split(d.Id(), "/") + jobId := parts[len(parts)-1] + job, err := config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Get( + project, region, jobId).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Dataproc Job %q", jobId)) + } + + if err := d.Set("force_delete", d.Get("force_delete")); err != nil { + return fmt.Errorf("Error setting force_delete: %s", err) + } + if err := tpgresource.SetLabels(job.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(job.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", job.Labels); err != nil { + return fmt.Errorf("Error setting effective_labels: %s", err) + } + if err := d.Set("driver_output_resource_uri", job.DriverOutputResourceUri); err != nil { + return fmt.Errorf("Error setting driver_output_resource_uri: %s", err) + } + if err := d.Set("driver_controls_files_uri", job.DriverControlFilesUri); err != nil { + return fmt.Errorf("Error setting driver_controls_files_uri: %s", err) + } + + if err := d.Set("placement", flattenJobPlacement(job.Placement)); err != nil { + return fmt.Errorf("Error setting placement: %s", err) + } + if err := d.Set("status", flattenJobStatus(job.Status)); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if err := d.Set("reference", flattenJobReference(job.Reference)); err != nil { + return fmt.Errorf("Error setting reference: %s", err) + } + if err := d.Set("scheduling", flattenJobScheduling(job.Scheduling)); err != nil { + return fmt.Errorf("Error setting reference: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + if job.PysparkJob != nil { + if err := d.Set("pyspark_config", flattenPySparkJob(job.PysparkJob)); err != nil { + return fmt.Errorf("Error setting pyspark_config: %s", err) + } + } + if job.SparkJob != nil { + if err := d.Set("spark_config", flattenSparkJob(job.SparkJob)); err != nil { + return fmt.Errorf("Error setting spark_config: %s", err) + } + } + if job.HadoopJob != nil { + if err := d.Set("hadoop_config", flattenHadoopJob(job.HadoopJob)); err != nil { + return fmt.Errorf("Error setting hadoop_config: %s", err) + } + } + if job.HiveJob != nil { + if err := d.Set("hive_config", flattenHiveJob(job.HiveJob)); err != nil { + return fmt.Errorf("Error setting hive_config: %s", err) + } + } + if job.PigJob != nil { + if err := d.Set("pig_config", flattenPigJob(job.PigJob)); err != nil { + return fmt.Errorf("Error setting pig_config: %s", err) + } + } + if job.SparkSqlJob != nil { + if err := d.Set("sparksql_config", flattenSparkSqlJob(job.SparkSqlJob)); err != nil { + return fmt.Errorf("Error setting sparksql_config: %s", err) + } + } + + if job.PrestoJob != nil { + if err := d.Set("presto_config", flattenPrestoJob(job.PrestoJob)); err != nil { + return fmt.Errorf("Error setting presto_config: %s", err) + } + } + return nil +} + +func resourceDataprocJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region := d.Get("region").(string) + forceDelete := d.Get("force_delete").(bool) + + parts := strings.Split(d.Id(), "/") + jobId := parts[len(parts)-1] + if forceDelete { + log.Printf("[DEBUG] Attempting to first cancel Dataproc job %s if it's still running ...", d.Id()) + + // ignore error if we get one - job may be finished already and not need to + // be cancelled. We do however wait for the state to be one that is + // at least not active + _, _ = config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Cancel(project, region, jobId, &dataproc.CancelJobRequest{}).Do() + + waitErr := DataprocJobOperationWait(config, region, project, jobId, + "Cancelling Dataproc job", userAgent, d.Timeout(schema.TimeoutDelete)) + if waitErr != nil { + return waitErr + } + + } + + log.Printf("[DEBUG] Deleting Dataproc job %s", d.Id()) + _, err = config.NewDataprocClient(userAgent).Projects.Regions.Jobs.Delete( + project, region, jobId).Do() + if err != nil { + return err + } + + waitErr := DataprocDeleteOperationWait(config, region, project, jobId, + "Deleting Dataproc job", userAgent, d.Timeout(schema.TimeoutDelete)) + if waitErr != nil { + return waitErr + } + + log.Printf("[INFO] Dataproc job %s has been deleted", d.Id()) + d.SetId("") + + return nil +} + +// ---- PySpark Job ---- + +var loggingConfig = &schema.Schema{ + Type: schema.TypeList, + Description: "The runtime logging config of the job", + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Description: "Optional. The per-package log levels for the driver. This may include 'root' package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'.", + Required: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, +} + +var pySparkSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of pySpark job.`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_python_file_uri": { + Type: schema.TypeString, + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file", + Required: true, + ForceNew: true, + }, + + "args": { + Type: schema.TypeList, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as --conf, that can be set as job properties, since a collision may occur that causes an incorrect job submission", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "python_file_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS URIs of files to be copied to the working directory of Python drivers and distributed tasks. Useful for naively parallel tasks", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "archive_uris": { + Type: schema.TypeList, + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code", + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenPySparkJob(job *dataproc.PySparkJob) []map[string]interface{} { + return []map[string]interface{}{ + { + "main_python_file_uri": job.MainPythonFileUri, + "args": job.Args, + "python_file_uris": job.PythonFileUris, + "jar_file_uris": job.JarFileUris, + "file_uris": job.FileUris, + "archive_uris": job.ArchiveUris, + "properties": job.Properties, + "logging_config": flattenLoggingConfig(job.LoggingConfig), + }, + } +} + +func expandPySparkJob(config map[string]interface{}) *dataproc.PySparkJob { + job := &dataproc.PySparkJob{} + if v, ok := config["main_python_file_uri"]; ok { + job.MainPythonFileUri = v.(string) + } + if v, ok := config["args"]; ok { + job.Args = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["python_file_uris"]; ok { + job.PythonFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["file_uris"]; ok { + job.FileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["archive_uris"]; ok { + job.ArchiveUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["logging_config"]; ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + job.LoggingConfig = expandLoggingConfig(config) + } + + return job + +} + +func expandJobScheduling(config map[string]interface{}) *dataproc.JobScheduling { + jobScheduling := &dataproc.JobScheduling{} + if v, ok := config["max_failures_per_hour"]; ok { + jobScheduling.MaxFailuresPerHour = int64(v.(int)) + } + if v, ok := config["max_failures_total"]; ok { + jobScheduling.MaxFailuresTotal = int64(v.(int)) + } + return jobScheduling +} + +// ---- Spark Job ---- + +var sparkSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of the Spark job.`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main driver: can be only one of the class | jar_file + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The class containing the main method of the driver. Must be in a provided jar or jar that is already on the classpath. Conflicts with main_jar_file_uri`, + ExactlyOneOf: []string{"spark_config.0.main_class", "spark_config.0.main_jar_file_uri"}, + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of jar file containing the driver jar. Conflicts with main_class`, + ExactlyOneOf: []string{"spark_config.0.main_jar_file_uri", "spark_config.0.main_class"}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenSparkJob(job *dataproc.SparkJob) []map[string]interface{} { + return []map[string]interface{}{ + { + "main_class": job.MainClass, + "main_jar_file_uri": job.MainJarFileUri, + "args": job.Args, + "jar_file_uris": job.JarFileUris, + "file_uris": job.FileUris, + "archive_uris": job.ArchiveUris, + "properties": job.Properties, + "logging_config": flattenLoggingConfig(job.LoggingConfig), + }, + } +} + +func expandSparkJob(config map[string]interface{}) *dataproc.SparkJob { + job := &dataproc.SparkJob{} + if v, ok := config["main_class"]; ok { + job.MainClass = v.(string) + } + if v, ok := config["main_jar_file_uri"]; ok { + job.MainJarFileUri = v.(string) + } + + if v, ok := config["args"]; ok { + job.Args = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["file_uris"]; ok { + job.FileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["archive_uris"]; ok { + job.ArchiveUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["logging_config"]; ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + job.LoggingConfig = expandLoggingConfig(config) + } + + return job + +} + +// ---- Hadoop Job ---- + +var hadoopSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of Hadoop job`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main driver: can be only one of the main_class | main_jar_file_uri + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The class containing the main method of the driver. Must be in a provided jar or jar that is already on the classpath. Conflicts with main_jar_file_uri`, + ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of jar file containing the driver jar. Conflicts with main_class`, + ExactlyOneOf: []string{"hadoop_config.0.main_jar_file_uri", "hadoop_config.0.main_class"}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The arguments to pass to the driver.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of files to be copied to the working directory of Spark drivers and distributed tasks. Useful for naively parallel tasks.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of archives to be extracted in the working directory of .jar, .tar, .tar.gz, .tgz, and .zip.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenHadoopJob(job *dataproc.HadoopJob) []map[string]interface{} { + return []map[string]interface{}{ + { + "main_class": job.MainClass, + "main_jar_file_uri": job.MainJarFileUri, + "args": job.Args, + "jar_file_uris": job.JarFileUris, + "file_uris": job.FileUris, + "archive_uris": job.ArchiveUris, + "properties": job.Properties, + "logging_config": flattenLoggingConfig(job.LoggingConfig), + }, + } +} + +func expandHadoopJob(config map[string]interface{}) *dataproc.HadoopJob { + job := &dataproc.HadoopJob{} + if v, ok := config["main_class"]; ok { + job.MainClass = v.(string) + } + if v, ok := config["main_jar_file_uri"]; ok { + job.MainJarFileUri = v.(string) + } + + if v, ok := config["args"]; ok { + job.Args = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["file_uris"]; ok { + job.FileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["archive_uris"]; ok { + job.ArchiveUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["logging_config"]; ok { + config := tpgresource.ExtractFirstMapConfig(v.([]interface{})) + job.LoggingConfig = expandLoggingConfig(config) + } + + return job + +} + +// ---- Hive Job ---- + +var hiveSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of hive job`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of Hive queries or statements to execute as part of the job. Conflicts with query_file_uri`, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `HCFS URI of file containing Hive script to execute as the job. Conflicts with query_list`, + ExactlyOneOf: []string{"hive_config.0.query_file_uri", "hive_config.0.query_list"}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.`, + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Mapping of query variable names to values (equivalent to the Hive command: SET name="value";).`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, +} + +func flattenHiveJob(job *dataproc.HiveJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "continue_on_failure": job.ContinueOnFailure, + "script_variables": job.ScriptVariables, + "properties": job.Properties, + "jar_file_uris": job.JarFileUris, + }, + } +} + +func expandHiveJob(config map[string]interface{}) *dataproc.HiveJob { + job := &dataproc.HiveJob{} + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(v.([]interface{})), + } + } + if v, ok := config["continue_on_failure"]; ok { + job.ContinueOnFailure = v.(bool) + } + if v, ok := config["script_variables"]; ok { + job.ScriptVariables = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + + return job +} + +// ---- Pig Job ---- + +var pigSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of pag job.`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of Hive queries or statements to execute as part of the job. Conflicts with query_file_uri`, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `HCFS URI of file containing Hive script to execute as the job. Conflicts with query_list`, + ExactlyOneOf: []string{"pig_config.0.query_file_uri", "pig_config.0.query_list"}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to continue executing queries if a query fails. The default value is false. Setting to true can be useful when executing independent parallel queries. Defaults to false.`, + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Mapping of query variable names to values (equivalent to the Pig command: name=[value]).`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Cloud Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenPigJob(job *dataproc.PigJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "continue_on_failure": job.ContinueOnFailure, + "script_variables": job.ScriptVariables, + "properties": job.Properties, + "jar_file_uris": job.JarFileUris, + }, + } +} + +func expandPigJob(config map[string]interface{}) *dataproc.PigJob { + job := &dataproc.PigJob{} + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(v.([]interface{})), + } + } + if v, ok := config["continue_on_failure"]; ok { + job.ContinueOnFailure = v.(bool) + } + if v, ok := config["script_variables"]; ok { + job.ScriptVariables = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + + return job + +} + +// ---- Spark SQL Job ---- + +var sparkSqlSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of SparkSql job`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of SQL queries or statements to execute as part of the job. Conflicts with query_file_uri`, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the script that contains SQL queries. Conflicts with query_list`, + ExactlyOneOf: []string{"sparksql_config.0.query_file_uri", "sparksql_config.0.query_list"}, + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `Mapping of query variable names to values (equivalent to the Spark SQL command: SET name="value";).`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Cloud Dataproc API may be overwritten.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `HCFS URIs of jar files to be added to the Spark CLASSPATH.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenSparkSqlJob(job *dataproc.SparkSqlJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "script_variables": job.ScriptVariables, + "properties": job.Properties, + "jar_file_uris": job.JarFileUris, + }, + } +} + +func expandSparkSqlJob(config map[string]interface{}) *dataproc.SparkSqlJob { + job := &dataproc.SparkSqlJob{} + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(v.([]interface{})), + } + } + if v, ok := config["script_variables"]; ok { + job.ScriptVariables = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["jar_file_uris"]; ok { + job.JarFileUris = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + + return job + +} + +// ---- Presto Job ---- + +var prestoSchema = &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: `The config of presto job`, + ExactlyOneOf: jobTypes, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_tags": { + Type: schema.TypeList, + Description: `Presto client tags to attach to this query.`, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Whether to continue executing queries if a query fails. Setting to true can be useful when executing independent parallel queries. Defaults to false.`, + }, + // main query: can be only one of query_list | query_file_uri + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `The list of SQL queries or statements to execute as part of the job. Conflicts with query_file_uri`, + Elem: &schema.Schema{Type: schema.TypeString}, + ExactlyOneOf: []string{"presto_config.0.query_file_uri", "presto_config.0.query_list"}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The HCFS URI of the script that contains SQL queries. Conflicts with query_list`, + ExactlyOneOf: []string{"presto_config.0.query_file_uri", "presto_config.0.query_list"}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: `A mapping of property names to values. Used to set Presto session properties Equivalent to using the --session flag in the Presto CLI.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "output_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The format in which query output will be displayed. See the Presto documentation for supported output formats.`, + }, + + "logging_config": loggingConfig, + }, + }, +} + +func flattenPrestoJob(job *dataproc.PrestoJob) []map[string]interface{} { + queries := []string{} + if job.QueryList != nil { + queries = job.QueryList.Queries + } + return []map[string]interface{}{ + { + "client_tags": job.ClientTags, + "continue_on_failure": job.ContinueOnFailure, + "query_list": queries, + "query_file_uri": job.QueryFileUri, + "properties": job.Properties, + "output_format": job.OutputFormat, + }, + } +} + +func expandPrestoJob(config map[string]interface{}) *dataproc.PrestoJob { + job := &dataproc.PrestoJob{} + if v, ok := config["client_tags"]; ok { + job.ClientTags = tpgresource.ConvertStringArr(v.([]interface{})) + } + if v, ok := config["continue_on_failure"]; ok { + job.ContinueOnFailure = v.(bool) + } + if v, ok := config["query_file_uri"]; ok { + job.QueryFileUri = v.(string) + } + if v, ok := config["query_list"]; ok { + job.QueryList = &dataproc.QueryList{ + Queries: tpgresource.ConvertStringArr(v.([]interface{})), + } + } + if v, ok := config["properties"]; ok { + job.Properties = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + if v, ok := config["output_format"]; ok { + job.OutputFormat = v.(string) + } + + return job + +} + +// ---- Other flatten / expand methods ---- + +func expandLoggingConfig(config map[string]interface{}) *dataproc.LoggingConfig { + conf := &dataproc.LoggingConfig{} + if v, ok := config["driver_log_levels"]; ok { + conf.DriverLogLevels = tpgresource.ConvertStringMap(v.(map[string]interface{})) + } + return conf +} + +func flattenLoggingConfig(l *dataproc.LoggingConfig) []map[string]interface{} { + return []map[string]interface{}{ + { + "driver_log_levels": l.DriverLogLevels, + }, + } +} + +func flattenJobReference(r *dataproc.JobReference) []map[string]interface{} { + return []map[string]interface{}{ + { + "job_id": r.JobId, + }, + } +} + +func flattenJobScheduling(r *dataproc.JobScheduling) []map[string]interface{} { + jobScheduling := []map[string]interface{}{} + + if r != nil { + jobScheduling = append(jobScheduling, + map[string]interface{}{ + "max_failures_per_hour": r.MaxFailuresPerHour, + "max_failures_total": r.MaxFailuresTotal, + }) + } + return jobScheduling +} + +func flattenJobStatus(s *dataproc.JobStatus) []map[string]interface{} { + return []map[string]interface{}{ + { + "state": s.State, + "details": s.Details, + "state_start_time": s.StateStartTime, + "substate": s.Substate, + }, + } +} + +func flattenJobPlacement(jp *dataproc.JobPlacement) []map[string]interface{} { + return []map[string]interface{}{ + { + "cluster_name": jp.ClusterName, + "cluster_uuid": jp.ClusterUuid, + }, + } +} diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job_test.go.tmpl b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job_test.go.tmpl new file mode 100644 index 000000000000..1aba35f13a6d --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_job_test.go.tmpl @@ -0,0 +1,900 @@ +package dataproc_test + +import ( + "fmt" + "io/ioutil" + "log" + "strings" + "testing" + "time" + "strconv" + + // "regexp" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + tpgdataproc "github.com/hashicorp/terraform-provider-google/google/services/dataproc" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "google.golang.org/api/googleapi" + "google.golang.org/api/dataproc/v1" +) + +type jobTestField struct { + tf_attr string + gcp_attr interface{} +} + +// TODO (mbang): Test `ExactlyOneOf` here +// func TestAccDataprocJob_failForMissingJobConfig(t *testing.T) { +// t.Parallel() + +// acctest.VcrTest(t, resource.TestCase{ +// PreCheck: func() { acctest.AccTestPreCheck(t) }, +// ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +// CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), +// Steps: []resource.TestStep{ +// { +// Config: testAccDataprocJob_missingJobConf(), +// ExpectError: regexp.MustCompile("You must define and configure exactly one xxx_config block"), +// }, +// }, +// }) +// } + +func TestAccDataprocJob_updatable(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + jobId := fmt.Sprintf("dproc-update-job-id-%s", rnd) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_updatable(rnd, subnetworkName, jobId, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.updatable", &job), + resource.TestCheckResourceAttr("google_dataproc_job.updatable", "force_delete", "false"), + ), + }, + { + Config: testAccDataprocJob_updatable(rnd, subnetworkName, jobId, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.updatable", &job), + resource.TestCheckResourceAttr("google_dataproc_job.updatable", "force_delete", "true"), + ), + }, + }, + }) +} + +func TestAccDataprocJob_PySpark(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + jobId := fmt.Sprintf("dproc-custom-job-id-%s", rnd) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_pySpark(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + + testAccCheckDataprocJobExists(t, "google_dataproc_job.pyspark", &job), + + // Custom supplied job_id + resource.TestCheckResourceAttr("google_dataproc_job.pyspark", "reference.0.job_id", jobId), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.pyspark", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.pyspark", "status.0.state_start_time"), + resource.TestCheckResourceAttr("google_dataproc_job.pyspark", "scheduling.0.max_failures_per_hour", "1"), + resource.TestCheckResourceAttr("google_dataproc_job.pyspark", "scheduling.0.max_failures_total", "20"), + resource.TestCheckResourceAttr("google_dataproc_job.pyspark", "labels.one", "1"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.pyspark", "pyspark_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.pyspark", &job), + ), + }, + }, + }) +} + +func TestAccDataprocJob_Spark(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_spark(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.spark", &job), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.spark", "reference.0.job_id"), + resource.TestCheckResourceAttrSet("google_dataproc_job.spark", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.spark", "status.0.state_start_time"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.spark", "spark_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.spark", &job), + ), + }, + }, + }) +} + +func TestAccDataprocJob_Hadoop(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_hadoop(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.hadoop", &job), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.hadoop", "reference.0.job_id"), + resource.TestCheckResourceAttrSet("google_dataproc_job.hadoop", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.hadoop", "status.0.state_start_time"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.hadoop", "hadoop_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.hadoop", &job), + ), + }, + }, + }) +} + +func TestAccDataprocJob_Hive(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_hive(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.hive", &job), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.hive", "reference.0.job_id"), + resource.TestCheckResourceAttrSet("google_dataproc_job.hive", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.hive", "status.0.state_start_time"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.hive", "hive_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.hive", &job), + ), + }, + }, + }) +} + +func TestAccDataprocJob_Pig(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_pig(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.pig", &job), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.pig", "reference.0.job_id"), + resource.TestCheckResourceAttrSet("google_dataproc_job.pig", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.pig", "status.0.state_start_time"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.pig", "pig_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.pig", &job), + ), + }, + }, + }) +} + +func TestAccDataprocJob_SparkSql(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_sparksql(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.sparksql", &job), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.sparksql", "reference.0.job_id"), + resource.TestCheckResourceAttrSet("google_dataproc_job.sparksql", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.sparksql", "status.0.state_start_time"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.sparksql", "sparksql_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.sparksql", &job), + ), + }, + }, + }) +} + +func TestAccDataprocJob_Presto(t *testing.T) { + t.Parallel() + + var job dataproc.Job + rnd := acctest.RandString(t, 10) + networkName := acctest.BootstrapSharedTestNetwork(t, "dataproc-cluster") + subnetworkName := acctest.BootstrapSubnet(t, "dataproc-cluster", networkName) + acctest.BootstrapFirewallForDataprocSharedNetwork(t, "dataproc-cluster", networkName) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocJobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocJob_presto(rnd, subnetworkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckDataprocJobExists(t, "google_dataproc_job.presto", &job), + + // Autogenerated / computed values + resource.TestCheckResourceAttrSet("google_dataproc_job.presto", "reference.0.job_id"), + resource.TestCheckResourceAttrSet("google_dataproc_job.presto", "status.0.state"), + resource.TestCheckResourceAttrSet("google_dataproc_job.presto", "status.0.state_start_time"), + + // Unique job config + testAccCheckDataprocJobAttrMatch( + "google_dataproc_job.presto", "presto_config", &job), + + // Wait until job completes successfully + testAccCheckDataprocJobCompletesSuccessfully(t, "google_dataproc_job.presto", &job), + ), + }, + }, + }) +} + +func testAccCheckDataprocJobDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_dataproc_job" { + continue + } + + if rs.Primary.ID == "" { + return fmt.Errorf("Unable to verify delete of dataproc job ID is empty") + } + attributes := rs.Primary.Attributes + + project, err := acctest.GetTestProject(rs.Primary, config) + if err != nil { + return err + } + + parts := strings.Split(rs.Primary.ID, "/") + job_id := parts[len(parts)-1] + _, err = config.NewDataprocClient(config.UserAgent).Projects.Regions.Jobs.Get( + project, attributes["region"], job_id).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return nil + } else if ok { + return fmt.Errorf("Error making GCP platform call: http code error : %d, http message error: %s", gerr.Code, gerr.Message) + } + return fmt.Errorf("Error making GCP platform call: %s", err.Error()) + } + return fmt.Errorf("Dataproc job still exists") + } + + return nil + } +} + +func testAccCheckDataprocJobCompletesSuccessfully(t *testing.T, n string, job *dataproc.Job) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + attributes := s.RootModule().Resources[n].Primary.Attributes + region := attributes["region"] + project, err := acctest.GetTestProject(s.RootModule().Resources[n].Primary, config) + if err != nil { + return err + } + + jobCompleteTimeoutMins := 5 * time.Minute + waitErr := tpgdataproc.DataprocJobOperationWait(config, region, project, job.Reference.JobId, + "Awaiting Dataproc job completion", config.UserAgent, jobCompleteTimeoutMins) + if waitErr != nil { + return waitErr + } + + completeJob, err := config.NewDataprocClient(config.UserAgent).Projects.Regions.Jobs.Get( + project, region, job.Reference.JobId).Do() + if err != nil { + return err + } + if completeJob.Status.State == "ERROR" { + if !strings.HasPrefix(completeJob.DriverOutputResourceUri, "gs://") { + return fmt.Errorf("Job completed in ERROR state but no valid log URI found") + } + u := strings.SplitN(strings.TrimPrefix(completeJob.DriverOutputResourceUri, "gs://"), "/", 2) + if len(u) != 2 { + return fmt.Errorf("Job completed in ERROR state but no valid log URI found") + } + l, err := config.NewStorageClient(config.UserAgent).Objects.List(u[0]).Prefix(u[1]).Do() + if err != nil { + return errwrap.Wrapf("Job completed in ERROR state, found error when trying to list logs: {{"{{"}}err{{"}}"}}", err) + } + for _, item := range l.Items { + resp, err := config.NewStorageClient(config.UserAgent).Objects.Get(item.Bucket, item.Name).Download() + if err != nil { + return errwrap.Wrapf("Job completed in ERROR state, found error when trying to read logs: {{"{{"}}err{{"}}"}}", err) + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return errwrap.Wrapf("Job completed in ERROR state, found error when trying to read logs: {{"{{"}}err{{"}}"}}", err) + } + log.Printf("[ERROR] Job failed, driver logs:\n%s", body) + } + return fmt.Errorf("Job completed in ERROR state, check logs for details") + } else if completeJob.Status.State != "DONE" && completeJob.Status.State != "RUNNING" { + return fmt.Errorf("Job did not complete successfully, instead status: %s", completeJob.Status.State) + } + + return nil + } +} + +func testAccCheckDataprocJobExists(t *testing.T, n string, job *dataproc.Job) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Terraform resource Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set for Dataproc job") + } + + config := acctest.GoogleProviderConfig(t) + parts := strings.Split(s.RootModule().Resources[n].Primary.ID, "/") + jobId := parts[len(parts)-1] + project, err := acctest.GetTestProject(s.RootModule().Resources[n].Primary, config) + if err != nil { + return err + } + + found, err := config.NewDataprocClient(config.UserAgent).Projects.Regions.Jobs.Get( + project, rs.Primary.Attributes["region"], jobId).Do() + if err != nil { + return err + } + + *job = *found + + return nil + } +} + +func testAccCheckDataprocJobAttrMatch(n, jobType string, job *dataproc.Job) resource.TestCheckFunc { + return func(s *terraform.State) error { + attributes, err := tpgresource.GetResourceAttributes(n, s) + if err != nil { + return err + } + + jobTests := []jobTestField{} + if jobType == "pyspark_config" { + jobTests = append(jobTests, jobTestField{"pyspark_config.0.main_python_file_uri", job.PysparkJob.MainPythonFileUri}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.args", job.PysparkJob.Args}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.python_file_uris", job.PysparkJob.PythonFileUris}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.jar_file_uris", job.PysparkJob.JarFileUris}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.file_uris", job.PysparkJob.FileUris}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.archive_uris", job.PysparkJob.ArchiveUris}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.properties", job.PysparkJob.Properties}) + jobTests = append(jobTests, jobTestField{"pyspark_config.0.logging_config.0.driver_log_levels", job.PysparkJob.LoggingConfig.DriverLogLevels}) + } + if jobType == "spark_config" { + jobTests = append(jobTests, jobTestField{"spark_config.0.main_class", job.SparkJob.MainClass}) + jobTests = append(jobTests, jobTestField{"spark_config.0.main_jar_file_uri", job.SparkJob.MainJarFileUri}) + jobTests = append(jobTests, jobTestField{"spark_config.0.args", job.SparkJob.Args}) + jobTests = append(jobTests, jobTestField{"spark_config.0.jar_file_uris", job.SparkJob.JarFileUris}) + jobTests = append(jobTests, jobTestField{"spark_config.0.file_uris", job.SparkJob.FileUris}) + jobTests = append(jobTests, jobTestField{"spark_config.0.archive_uris", job.SparkJob.ArchiveUris}) + jobTests = append(jobTests, jobTestField{"spark_config.0.properties", job.SparkJob.Properties}) + jobTests = append(jobTests, jobTestField{"spark_config.0.logging_config.0.driver_log_levels", job.SparkJob.LoggingConfig.DriverLogLevels}) + } + if jobType == "hadoop_config" { + jobTests = append(jobTests, jobTestField{"hadoop_config.0.main_class", job.HadoopJob.MainClass}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.main_jar_file_uri", job.HadoopJob.MainJarFileUri}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.args", job.HadoopJob.Args}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.jar_file_uris", job.HadoopJob.JarFileUris}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.file_uris", job.HadoopJob.FileUris}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.archive_uris", job.HadoopJob.ArchiveUris}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.properties", job.HadoopJob.Properties}) + jobTests = append(jobTests, jobTestField{"hadoop_config.0.logging_config.0.driver_log_levels", job.HadoopJob.LoggingConfig.DriverLogLevels}) + } + if jobType == "hive_config" { + queries := []string{} + if job.HiveJob.QueryList != nil { + queries = job.HiveJob.QueryList.Queries + } + jobTests = append(jobTests, jobTestField{"hive_config.0.query_list", queries}) + jobTests = append(jobTests, jobTestField{"hive_config.0.query_file_uri", job.HiveJob.QueryFileUri}) + jobTests = append(jobTests, jobTestField{"hive_config.0.continue_on_failure", job.HiveJob.ContinueOnFailure}) + jobTests = append(jobTests, jobTestField{"hive_config.0.script_variables", job.HiveJob.ScriptVariables}) + jobTests = append(jobTests, jobTestField{"hive_config.0.properties", job.HiveJob.Properties}) + jobTests = append(jobTests, jobTestField{"hive_config.0.jar_file_uris", job.HiveJob.JarFileUris}) + } + if jobType == "pig_config" { + queries := []string{} + if job.PigJob.QueryList != nil { + queries = job.PigJob.QueryList.Queries + } + jobTests = append(jobTests, jobTestField{"pig_config.0.query_list", queries}) + jobTests = append(jobTests, jobTestField{"pig_config.0.query_file_uri", job.PigJob.QueryFileUri}) + jobTests = append(jobTests, jobTestField{"pig_config.0.continue_on_failure", job.PigJob.ContinueOnFailure}) + jobTests = append(jobTests, jobTestField{"pig_config.0.script_variables", job.PigJob.ScriptVariables}) + jobTests = append(jobTests, jobTestField{"pig_config.0.properties", job.PigJob.Properties}) + jobTests = append(jobTests, jobTestField{"pig_config.0.jar_file_uris", job.PigJob.JarFileUris}) + } + if jobType == "sparksql_config" { + queries := []string{} + if job.SparkSqlJob.QueryList != nil { + queries = job.SparkSqlJob.QueryList.Queries + } + jobTests = append(jobTests, jobTestField{"sparksql_config.0.query_list", queries}) + jobTests = append(jobTests, jobTestField{"sparksql_config.0.query_file_uri", job.SparkSqlJob.QueryFileUri}) + jobTests = append(jobTests, jobTestField{"sparksql_config.0.script_variables", job.SparkSqlJob.ScriptVariables}) + jobTests = append(jobTests, jobTestField{"sparksql_config.0.properties", job.SparkSqlJob.Properties}) + jobTests = append(jobTests, jobTestField{"sparksql_config.0.jar_file_uris", job.SparkSqlJob.JarFileUris}) + } + + for _, attrs := range jobTests { + if c := checkMatch(attributes, attrs.tf_attr, attrs.gcp_attr); c != "" { + return fmt.Errorf(c) + } + } + + return nil + } +} + +func checkMatch(attributes map[string]string, attr string, gcp interface{}) string { + if gcpList, ok := gcp.([]string); ok { + return checkListMatch(attributes, attr, gcpList) + } + if gcpMap, ok := gcp.(map[string]string); ok { + return checkMapMatch(attributes, attr, gcpMap) + } + if gcpBool, ok := gcp.(bool); ok { + return checkBoolMatch(attributes, attr, gcpBool) + } + + tf := attributes[attr] + if tf != gcp { + return matchError(attr, tf, gcp) + } + return "" +} + +func checkListMatch(attributes map[string]string, attr string, gcpList []string) string { + // A bunch of the TestAccDataprocJob_* tests fail without this. It's likely an inaccuracy that happens when shimming the terraform-json + // representation of state back to the old framework's representation of state. So, in the past we would get x.# = 0 whereas now we get x.# = ''. + // It's likely not intentional, however, shouldn't be a big problem - but if we notice it is the sdk team can address it. + if attributes[attr+".#"] == "" { + attributes[attr+".#"] = "0" + } + + num, err := strconv.Atoi(attributes[attr+".#"]) + if err != nil { + return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) + } + if num != len(gcpList) { + return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpList)) + } + + for i, gcp := range gcpList { + if tf := attributes[fmt.Sprintf("%s.%d", attr, i)]; tf != gcp { + return matchError(fmt.Sprintf("%s[%d]", attr, i), tf, gcp) + } + } + + return "" +} + +func checkMapMatch(attributes map[string]string, attr string, gcpMap map[string]string) string { + // A bunch of the TestAccDataprocJob_* tests fail without this. It's likely an inaccuracy that happens when shimming the terraform-json + // representation of state back to the old framework's representation of state. So, in the past we would get x.# = 0 whereas now we get x.# = ''. + // It's likely not intentional, however, shouldn't be a big problem - but if we notice it is the sdk team can address it. + if attributes[attr+".%"] == "" { + attributes[attr+".%"] = "0" + } + + num, err := strconv.Atoi(attributes[attr+".%"]) + if err != nil { + return fmt.Sprintf("Error in number conversion for attribute %s: %s", attr, err) + } + if num != len(gcpMap) { + return fmt.Sprintf("Cluster has mismatched %s size.\nTF Size: %d\nGCP Size: %d", attr, num, len(gcpMap)) + } + + for k, gcp := range gcpMap { + if tf := attributes[fmt.Sprintf("%s.%s", attr, k)]; tf != gcp { + return matchError(fmt.Sprintf("%s[%s]", attr, k), tf, gcp) + } + } + + return "" +} + +func checkBoolMatch(attributes map[string]string, attr string, gcpBool bool) string { + // Handle the case where an unset value defaults to false + var tf bool + var err error + if attributes[attr] == "" { + tf = false + } else { + tf, err = strconv.ParseBool(attributes[attr]) + if err != nil { + return fmt.Sprintf("Error converting attribute %s to boolean: value is %s", attr, attributes[attr]) + } + } + + if tf != gcpBool { + return matchError(attr, tf, gcpBool) + } + + return "" +} + +func matchError(attr, tf interface{}, gcp interface{}) string { + return fmt.Sprintf("Cluster has mismatched %s.\nTF State: %+v\nGCP State: %+v", attr, tf, gcp) +} + +// TODO (mbang): Test `ExactlyOneOf` here +// func testAccDataprocJob_missingJobConf() string { +// return ` +// resource "google_dataproc_job" "missing_config" { +// placement { +// cluster_name = "na" +// } + +// force_delete = true +// }` +// } + +var singleNodeClusterConfig = ` +resource "google_dataproc_cluster" "basic" { + name = "dproc-job-test-%s" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + + gce_cluster_config { + subnetwork = "%s" + } + } +} +` + +func testAccDataprocJob_updatable(rnd, subnetworkName, jobId, del string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "updatable" { + placement { + cluster_name = google_dataproc_cluster.basic.name + } + reference { + job_id = "%s" + } + + region = google_dataproc_cluster.basic.region + force_delete = %s + + pyspark_config { + main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" + } +} +`, rnd, subnetworkName, jobId, del) +} + +func testAccDataprocJob_pySpark(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "pyspark" { + placement { + cluster_name = google_dataproc_cluster.basic.name + } + reference { + job_id = "dproc-custom-job-id-%s" + } + + region = google_dataproc_cluster.basic.region + force_delete = true + + pyspark_config { + main_python_file_uri = "gs://dataproc-examples-2f10d78d114f6aaec76462e3c310f31f/src/pyspark/hello-world/hello-world.py" + properties = { + "spark.logConf" = "true" + } + logging_config { + driver_log_levels = { + "root" = "INFO" + } + } + } + + scheduling { + max_failures_per_hour = 1 + max_failures_total=20 + } + + labels = { + one = "1" + } +} +`, rnd, subnetworkName, rnd) +} + +func testAccDataprocJob_spark(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "spark" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + spark_config { + main_class = "org.apache.spark.examples.SparkPi" + jar_file_uris = ["file:///usr/lib/spark/examples/jars/spark-examples.jar"] + args = ["1000"] + properties = { + "spark.logConf" = "true" + } + logging_config { + driver_log_levels = { + } + } + } +} +`, rnd, subnetworkName) + +} + +func testAccDataprocJob_hadoop(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "hadoop" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + hadoop_config { + main_jar_file_uri = "file:///usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar" + args = [ + "wordcount", + "file:///usr/lib/spark/NOTICE", + "gs://${google_dataproc_cluster.basic.cluster_config[0].bucket}/hadoopjob_output_%s", + ] + } +} +`, rnd, subnetworkName, rnd) + +} + +func testAccDataprocJob_hive(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "hive" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + hive_config { + query_list = [ + "DROP TABLE IF EXISTS dprocjob_test", + "CREATE EXTERNAL TABLE dprocjob_test(bar int) LOCATION 'gs://${google_dataproc_cluster.basic.cluster_config[0].bucket}/hive_dprocjob_test/'", + "SELECT * FROM dprocjob_test WHERE bar > 2", + ] + } +} +`, rnd, subnetworkName) + +} + +func testAccDataprocJob_pig(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "pig" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + pig_config { + query_list = [ + "LNS = LOAD 'file:///usr/lib/pig/LICENSE.txt ' AS (line)", + "WORDS = FOREACH LNS GENERATE FLATTEN(TOKENIZE(line)) AS word", + "GROUPS = GROUP WORDS BY word", + "WORD_COUNTS = FOREACH GROUPS GENERATE group, COUNT(WORDS)", + "DUMP WORD_COUNTS", + ] + } +} +`, rnd, subnetworkName) + +} + +func testAccDataprocJob_sparksql(rnd, subnetworkName string) string { + return fmt.Sprintf( + singleNodeClusterConfig+` +resource "google_dataproc_job" "sparksql" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + sparksql_config { + query_list = [ + "DROP TABLE IF EXISTS dprocjob_test", + "CREATE TABLE dprocjob_test(bar int)", + "SELECT * FROM dprocjob_test WHERE bar > 2", + ] + } +} +`, rnd, subnetworkName) + +} + +func testAccDataprocJob_presto(rnd, subnetworkName string) string { + return fmt.Sprintf(` +resource "google_dataproc_cluster" "basic" { + name = "dproc-job-test-%s" + region = "us-central1" + + cluster_config { + # Keep the costs down with smallest config we can get away with + software_config { + override_properties = { + "dataproc:dataproc.allow.zero.workers" = "true" + } + optional_components = ["PRESTO"] + } + + master_config { + num_instances = 1 + machine_type = "e2-standard-2" + disk_config { + boot_disk_size_gb = 35 + } + } + + gce_cluster_config { + subnetwork = "%s" + } + } +} + +resource "google_dataproc_job" "presto" { + region = google_dataproc_cluster.basic.region + force_delete = true + placement { + cluster_name = google_dataproc_cluster.basic.name + } + + presto_config { + query_list = [ + "SELECT * FROM system.metadata.schema_properties" + ] + } +} +`, rnd, subnetworkName) + +} diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_migrate.go.tmpl b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_migrate.go.tmpl new file mode 100644 index 000000000000..8dba1611b9fa --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_migrate.go.tmpl @@ -0,0 +1,2125 @@ +package dataproc + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func resourceDataprocWorkflowTemplateResourceV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jobs": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The Directed Acyclic Graph of Jobs to submit.", + Elem: DataprocWorkflowTemplateJobsSchemaV0(), + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "The location for the resource", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Output only. The resource name of the workflow template, as described in https://cloud.google.com/apis/design/resource_names. * For `projects.regions.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` * For `projects.locations.workflowTemplates`, the resource name of the template has the following format: `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`", + }, + + "placement": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. WorkflowTemplate scheduling information.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementSchemaV0(), + }, + + "dag_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Timeout duration for the DAG of jobs, expressed in seconds (see [JSON representation of duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). The timeout duration must be from 10 minutes (\"600s\") to 24 hours (\"86400s\"). The timer begins when the first job is submitted. If the workflow is running at the end of the timeout period, any remaining jobs are cancelled, the workflow is ended, and if the workflow was running on a [managed cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster), the cluster is deleted.", + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: "All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.", + }, + + "parameters": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is instantiated.", + Elem: DataprocWorkflowTemplateParametersSchemaV0(), + }, + + "project": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "The project for the resource", + }, + + "version": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Output only. The current version of this workflow template.", + Deprecated: "version is not useful as a configurable field, and will be removed in the future.", + }, + + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was created.", + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. Label **keys** must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). Label **values** may be empty, but, if present, must contain 1 to 63 characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be associated with a template.\n\n**Note**: This field is non-authoritative, and will only manage the labels present in your configuration.\nPlease refer to the field `effective_labels` for all of the labels present on the resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + ForceNew: true, + Description: "The combination of labels configured directly on the resource and default labels configured on the provider.", + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time template was last updated.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "step_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The step id. The id must be unique among all jobs within the template. The step id is used as prefix for job id, as job `goog-dataproc-workflow-step-id` label, and in prerequisiteStepIds field from other steps. The id must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). Cannot begin or end with underscore or hyphen. Must consist of between 3 and 50 characters.", + }, + + "hadoop_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hadoop job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobSchemaV0(), + }, + + "hive_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Hive job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobSchemaV0(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and must conform to the following regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given job.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "pig_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Pig job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobSchemaV0(), + }, + + "prerequisite_step_ids": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "presto_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Presto job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobSchemaV0(), + }, + + "pyspark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a PySpark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobSchemaV0(), + }, + + "scheduling": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job scheduling configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSchedulingSchemaV0(), + }, + + "spark_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a Spark job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobSchemaV0(), + }, + + "spark_r_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkR job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobSchemaV0(), + }, + + "spark_sql_job": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Job is a SparkSql job.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchemaV0(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file containing the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file containing the main class. Examples: 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar' 'hdfs:/tmp/test-samples/custom-wordcount.jar' 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHadoopJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains Hive queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsHiveJobQueryListSchemaV0(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Hive command: `SET name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsHiveJobQueryListSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobLoggingConfigSchemaV0(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains the Pig queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPigJobQueryListSchemaV0(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Pig command: `name=[value]`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPigJobQueryListSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_tags": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Presto client tags to attach to this query", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "continue_on_failure": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Whether to continue executing queries if a query fails. The default value is `false`. Setting to `true` can be useful when executing independent parallel queries.", + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchemaV0(), + }, + + "output_format": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values. Used to set Presto [session properties](https://prestodb.io/docs/current/sql/set-session.html) Equivalent to using the --session flag in the Presto CLI", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPrestoJobQueryListSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPrestoJobQueryListSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_python_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchemaV0(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "python_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsPysparkJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSchedulingSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_failures_per_hour": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times per hour a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. A job may be reported as thrashing if driver exits with non-zero code 4 times within 10 minute window. Maximum value is 10.", + }, + + "max_failures_total": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Maximum number of times in total a driver may be restarted as a result of driver exiting with non-zero code before job is reported failed. Maximum value is 240.", + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchemaV0(), + }, + + "main_class": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The name of the driver's main class. The jar file that contains the class must be in the default CLASSPATH or specified in `jar_file_uris`.", + }, + + "main_jar_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the jar file that contains the main class.", + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_r_file_uri": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The HCFS URI of the main R file to use as the driver. Must be a .R file.", + }, + + "archive_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "args": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The arguments to pass to the driver. Do not include arguments, such as `--conf`, that can be set as job properties, since a collision may occur that causes an incorrect job submission.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchemaV0(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/spark/conf/spark-defaults.conf and classes in user code.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkRJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "jar_file_uris": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "logging_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The runtime log config for job execution.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchemaV0(), + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "query_file_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "The HCFS URI of the script that contains SQL queries.", + }, + + "query_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A list of queries.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchemaV0(), + }, + + "script_variables": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET `name=\"value\";`).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobLoggingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "driver_log_levels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The per-package log levels for the driver. This may include \"root\" package name to configure rootLogger. Examples: 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateJobsSparkSqlJobQueryListSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "queries": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. Here is an example of a Dataproc API snippet that uses a QueryList to specify a HiveJob: \"hiveJob\": { \"queryList\": { \"queries\": [ \"query1\", \"query2\", \"query3;query4\", ] } }", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_selector": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A selector that chooses target cluster for jobs based on metadata. The selector is evaluated at the time each job is submitted.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementClusterSelectorSchemaV0(), + }, + + "managed_cluster": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "A cluster that is managed by the workflow.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementClusterSelectorSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_labels": { + Type: schema.TypeMap, + Required: true, + ForceNew: true, + Description: "Required. The cluster labels. Cluster must have all labels to match.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where workflow process executes. This parameter does not affect the selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. The cluster name prefix. A unique cluster name will be formed by appending a random suffix. The name must contain only lower-case letters (a-z), numbers (0-9), and hyphens (-). Must begin with a letter. Cannot begin or end with hyphen. Must consist of between 2 and 35 characters.", + }, + + "config": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. The cluster configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSchemaV0(), + }, + + "labels": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: p{Ll}p{Lo}{0,62} Label values must be between 1 and 63 characters long, and must conform to the following PCRE regular expression: [p{Ll}p{Lo}p{N}_-]{0,63} No more than 32 labels can be associated with a given cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Autoscaling config for the policy associated with the cluster. Cluster does not autoscale if this field is unset.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchemaV0(), + }, + + "encryption_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Encryption settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchemaV0(), + }, + + "endpoint_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Port/endpoint configuration for this cluster", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSchemaV0(), + }, + + "gce_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The shared Compute Engine config settings for all instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchemaV0(), + }, +{{- if ne $.TargetVersionName "ga" }} + "gke_cluster_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. BETA. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes. Setting this is considered mutually exclusive with Compute Engine-based options such as `gce_cluster_config`, `master_config`, `worker_config`, `secondary_worker_config`, and `autoscaling_config`.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSchemaV0(), + }, +{{- end }} + "initialization_actions": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's `role` metadata to run an executable on a master or worker node, as shown below using `curl` (you can also use `wget`): ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) if [[ \"${ROLE}\" == 'Master' ]]; then ... master specific actions ... else ... worker specific actions ... fi", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSchemaV0(), + }, + + "lifecycle_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Lifecycle setting for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSchemaV0(), + }, + + "master_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for the master instance in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchemaV0(), + }, +{{- if ne $.TargetVersionName "ga" }} + "metastore_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Metastore configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSchemaV0(), + }, +{{- end }} + "secondary_worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for additional worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSchemaV0(), + }, + + "security_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Security settings for the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSchemaV0(), + }, + + "software_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The config settings for software inside the cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSchemaV0(), + }, + + "staging_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to stage job dependencies, config files, and job driver console output. If you do not specify a staging bucket, Cloud Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's staging bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket (see [Dataproc staging bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)). **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "temp_bucket": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs data, such as Spark and MapReduce history files. If you do not specify a temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or EU) for your cluster's temp bucket according to the Compute Engine zone where your cluster is deployed, and then create and manage this project-level, per-location bucket. The default bucket has a TTL of 90 days, but you can use any TTL (or none) if you specify a bucket. **This field requires a Cloud Storage bucket name, not a URI to a Cloud Storage bucket.**", + }, + + "worker_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine config settings for worker instances in a cluster.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigAutoscalingConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "policy": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The autoscaling policy used by the cluster. Only resource names including projectid and location (region) are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]` Note that the policy must be in the same project and Dataproc region.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigEncryptionConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "gce_pd_kms_key_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Cloud KMS key name to use for PD disk encryption for all instances in the cluster.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigEndpointConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_http_port_access": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. If true, enable http access to specific ports on the cluster from external sources. Defaults to false.", + }, + + "http_ports": { + Type: schema.TypeMap, + Computed: true, + Description: "Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "internal_ip_only": { + Type: schema.TypeBool, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. If true, all instances in the cluster will only have internal IP addresses. By default, clusters are not restricted to internal IP addresses, and will have ephemeral external IP addresses assigned to each instance. This `internal_ip_only` restriction can only be enabled for subnetwork enabled networks, and all off-cluster dependencies must be configured to be accessible without external IP addresses.", + }, + + "metadata": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "The Compute Engine metadata entries to add to all instances (see [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine network to be used for machine communications. Cannot be specified with subnetwork_uri. If neither `network_uri` nor `subnetwork_uri` is specified, the \"default\" network of the project is used, if it exists. Cannot be a \"Custom Subnet Network\" (see [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for more information). A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` * `projects/[project_id]/regions/global/default` * `default`", + }, + + "node_group_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Node Group Affinity for sole-tenant clusters.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySchemaV0(), + }, + + "private_ipv6_google_access": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The type of IPv6 access for a cluster. Possible values: PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, INHERIT_FROM_SUBNETWORK, OUTBOUND, BIDIRECTIONAL", + }, + + "reservation_affinity": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Reservation Affinity for consuming Zonal reservation.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySchemaV0(), + }, + + "service_account": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The [Dataproc service account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc) (also see [VM Data Plane identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity)) used by Dataproc cluster VM instances to access Google Cloud Platform services. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.", + }, + + "service_account_scopes": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: * https://www.googleapis.com/auth/cloud.useraccounts.readonly * https://www.googleapis.com/auth/devstorage.read_write * https://www.googleapis.com/auth/logging.write If no scopes are specified, the following defaults are also provided: * https://www.googleapis.com/auth/bigquery * https://www.googleapis.com/auth/bigtable.admin.table * https://www.googleapis.com/auth/bigtable.data * https://www.googleapis.com/auth/devstorage.full_control", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "shielded_instance_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Shielded Instance Config for clusters using Compute Engine Shielded VMs.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSchemaV0(), + }, + + "subnetwork": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine subnetwork to be used for machine communications. Cannot be specified with network_uri. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0` * `projects/[project_id]/regions/us-east1/subnetworks/sub0` * `sub0`", + }, + + "tags": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Description: "The Compute Engine tags to add to all instances (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).", + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + + "zone": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The zone where the Compute Engine cluster will be located. On a create request, it is required in the \"global\" region. If omitted in a non-global Dataproc region, the service will pick a zone in the corresponding Compute Engine region. On a get request, zone will always be present. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` * `projects/[project_id]/zones/[zone]` * `us-central1-f`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigNodeGroupAffinitySchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "node_group": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. The URI of a sole-tenant [node group resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups) that the cluster will be created on. A full URL, partial URI, or node group name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `projects/[project_id]/zones/us-central1-a/nodeGroups/node-group-1` * `node-group-1`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigReservationAffinitySchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "consume_reservation_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of reservation to consume Possible values: TYPE_UNSPECIFIED, NO_RESERVATION, ANY_RESERVATION, SPECIFIC_RESERVATION", + }, + + "key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label key of reservation resource.", + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Corresponds to the label values of reservation resource.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGceClusterConfigShieldedInstanceConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enable_integrity_monitoring": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether instances have integrity monitoring enabled. Integrity monitoring compares the most recent boot measurements to the integrity policy baseline and returns a pair of pass/fail results depending on whether they match or not.", + }, + + "enable_secure_boot": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether the instances have Secure Boot enabled. Secure Boot helps ensure that the system only runs authentic software by verifying the digital signature of all boot components, and halting the boot process if signature verification fails.", + }, + + "enable_vtpm": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Defines whether the instance have the vTPM enabled. Virtual Trusted Platform Module protects objects like keys, certificates and enables Measured Boot by performing the measurements needed to create a known good boot baseline, called the integrity policy baseline.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "namespaced_gke_deployment_target": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. A target for the deployment.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigGkeClusterConfigNamespacedGkeDeploymentTargetSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_namespace": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. A namespace within the GKE cluster to deploy into.", + }, + + "target_gke_cluster": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The target GKE cluster to deploy to. Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigInitializationActionsSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "executable_file": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Required. Cloud Storage URI of executable file.", + }, + + "execution_timeout": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Amount of time executable has to complete. Default is 10 minutes (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)). Cluster creation fails with an explanatory error message (the name of the executable that caused the error and the exceeded timeout period) if the executable is not completed at end of the timeout period.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigLifecycleConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "auto_delete_time": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The time when cluster will be auto-deleted (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "auto_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime duration of cluster. The cluster will be auto-deleted at the end of this period. Minimum value is 10 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_delete_ttl": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The duration to keep the cluster alive while idling (when no jobs are running). Passing this threshold will cause the cluster to be deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON representation of [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + + "idle_start_time": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The time when cluster became idle (most recent job finished) and became eligible for deletion due to idleness (see JSON representation of [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSchemaV0(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSchemaV0(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigAcceleratorsSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigDiskConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMasterConfigManagedGroupConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigMetastoreConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "dataproc_metastore_service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Required. Resource name of an existing Dataproc Metastore service. Example: * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSchemaV0(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSchemaV0(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigAcceleratorsSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigDiskConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecondaryWorkerConfigManagedGroupConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kerberos_config": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Kerberos related configuration.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSecurityConfigKerberosConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cross_realm_trust_admin_server": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The admin server (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_kdc": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The KDC (IP or hostname) for the remote trusted realm in a cross realm trust relationship.", + }, + + "cross_realm_trust_realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The remote realm the Dataproc on-cluster KDC will trust, should the user enable cross realm trust.", + }, + + "cross_realm_trust_shared_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the shared password between the on-cluster Kerberos realm and the remote trusted realm, in a cross realm trust relationship.", + }, + + "enable_kerberos": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set this field to true to enable Kerberos on a cluster.", + }, + + "kdc_db_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the master key of the KDC database.", + }, + + "key_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided key. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "keystore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the keystore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "keystore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided keystore. For the self-signed certificate, this password is generated by Dataproc.", + }, + + "kms_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The uri of the KMS key used to encrypt various sensitive files.", + }, + + "realm": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The name of the on-cluster Kerberos realm. If not specified, the uppercased domain of hostnames will be the realm.", + }, + + "root_principal_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the root principal password.", + }, + + "tgt_lifetime_hours": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The lifetime of the ticket granting ticket, in hours. If not specified, or user specifies 0, then default value 10 will be used.", + }, + + "truststore": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of the truststore file used for SSL encryption. If not provided, Dataproc will provide a self-signed certificate.", + }, + + "truststore_password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Cloud Storage URI of a KMS encrypted file containing the password to the user provided truststore. For the self-signed certificate, this password is generated by Dataproc.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigSoftwareConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image_version": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The version of software inside the cluster. It must be one of the supported [Dataproc Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_dataproc_versions), such as \"1.2\" (including a subminor version, such as \"1.2.29\"), or the [\"preview\" version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions). If unspecified, it defaults to the latest Debian version.", + }, + + "optional_components": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. The set of components to activate on the cluster.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "properties": { + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + Description: "Optional. The properties to set on daemon config files. Property keys are specified in `prefix:property` format, for example `core:hadoop.tmp.dir`. The following are supported prefixes and their mappings: * capacity-scheduler: `capacity-scheduler.xml` * core: `core-site.xml` * distcp: `distcp-default.xml` * hdfs: `hdfs-site.xml` * hive: `hive-site.xml` * mapred: `mapred-site.xml` * pig: `pig.properties` * spark: `spark-defaults.conf` * yarn: `yarn-site.xml` For more information, see [Cluster properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerators": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine accelerator configuration for these instances.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSchemaV0(), + }, + + "disk_config": { + Type: schema.TypeList, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Disk option config settings.", + MaxItems: 1, + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSchemaV0(), + }, + + "image": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + Description: "Optional. The Compute Engine image resource used for cluster instances. The URI can represent an image or image family. Image examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]` * `projects/[project_id]/global/images/[image-id]` * `image-id` Image family examples. Dataproc will use the most recent image from the family: * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]` * `projects/[project_id]/global/images/family/[custom-image-family-name]` If the URI is unspecified, it will be inferred from `SoftwareConfig.image_version` or the system default.", + }, + + "machine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. The Compute Engine machine type used for cluster instances. A full URL, partial URI, or short name are valid. Examples: * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` * `n1-standard-2` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the machine type resource, for example, `n1-standard-2`.", + }, + + "min_cpu_platform": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the minimum cpu platform for the Instance Group. See [Dataproc -> Minimum CPU Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).", + }, + + "num_instances": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. The number of VM instances in the instance group. For [HA cluster](/dataproc/docs/concepts/configuring-clusters/high-availability) [master_config](#FIELDS.master_config) groups, **must be set to 3**. For standard cluster [master_config](#FIELDS.master_config) groups, **must be set to 1**.", + }, + + "preemptibility": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Specifies the preemptibility of the instance group. The default value for master and worker groups is `NON_PREEMPTIBLE`. This default cannot be changed. The default value for secondary instances is `PREEMPTIBLE`. Possible values: PREEMPTIBILITY_UNSPECIFIED, NON_PREEMPTIBLE, PREEMPTIBLE", + }, + + "instance_names": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The list of instance names. Dataproc derives the names from `cluster_name`, `num_instances`, and the instance group.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "is_preemptible": { + Type: schema.TypeBool, + Computed: true, + Description: "Output only. Specifies that this instance group contains preemptible instances.", + }, + + "managed_group_config": { + Type: schema.TypeList, + Computed: true, + Description: "Output only. The config for Compute Engine Instance Group Manager that manages this group. This is only used for preemptible instance groups.", + Elem: DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigAcceleratorsSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "The number of the accelerator cards of this type exposed to this instance.", + }, + + "accelerator_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Full URL, partial URI, or short name of the accelerator type resource to expose to this instance. See [Compute Engine AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes). Examples: * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` * `nvidia-tesla-k80` **Auto Zone Exception**: If you are using the Dataproc [Auto Zone Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement) feature, you must use the short name of the accelerator type resource, for example, `nvidia-tesla-k80`.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigDiskConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "boot_disk_size_gb": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Optional. Size in GB of the boot disk (default is 500GB).", + }, + + "boot_disk_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Type of the boot disk (default is \"pd-standard\"). Valid values: \"pd-balanced\" (Persistent Disk Balanced Solid State Drive), \"pd-ssd\" (Persistent Disk Solid State Drive), or \"pd-standard\" (Persistent Disk Hard Disk Drive). See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).", + }, + + "num_local_ssds": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + ForceNew: true, + Description: "Optional. Number of attached SSDs, from 0 to 4 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. If one or more SSDs are attached, this runtime bulk data is spread across them, and the boot disk contains only basic config and installed binaries.", + }, + }, + } +} + +func DataprocWorkflowTemplatePlacementManagedClusterConfigWorkerConfigManagedGroupConfigSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_group_manager_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Group Manager for this group.", + }, + + "instance_template_name": { + Type: schema.TypeString, + Computed: true, + Description: "Output only. The name of the Instance Template used for the Managed Instance Group.", + }, + }, + } +} + +func DataprocWorkflowTemplateParametersSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "fields": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. A field path is similar in syntax to a google.protobuf.FieldMask. For example, a field path that references the zone field of a workflow template's cluster selector would be specified as `placement.clusterSelector.zone`. Also, field paths can reference fields using the following syntax: * Values in maps can be referenced by key: * labels['key'] * placement.clusterSelector.clusterLabels['key'] * placement.managedCluster.labels['key'] * placement.clusterSelector.clusterLabels['key'] * jobs['step-id'].labels['key'] * Jobs in the jobs list can be referenced by step-id: * jobs['step-id'].hadoopJob.mainJarFileUri * jobs['step-id'].hiveJob.queryFileUri * jobs['step-id'].pySparkJob.mainPythonFileUri * jobs['step-id'].hadoopJob.jarFileUris[0] * jobs['step-id'].hadoopJob.archiveUris[0] * jobs['step-id'].hadoopJob.fileUris[0] * jobs['step-id'].pySparkJob.pythonFileUris[0] * Items in repeated fields can be referenced by a zero-based index: * jobs['step-id'].sparkJob.args[0] * Other examples: * jobs['step-id'].hadoopJob.properties['key'] * jobs['step-id'].hadoopJob.args[0] * jobs['step-id'].hiveJob.scriptVariables['key'] * jobs['step-id'].hadoopJob.mainJarFileUri * placement.clusterSelector.zone It may not be possible to parameterize maps and repeated fields in their entirety since only individual map values and individual items in repeated fields can be referenced. For example, the following field paths are invalid: - placement.clusterSelector.clusterLabels - jobs['step-id'].sparkJob.args", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Required. Parameter name. The parameter name is used as the key, and paired with the parameter value, which are passed to the template when the template is instantiated. The name must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters.", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Optional. Brief description of the parameter. Must not exceed 1024 characters.", + }, + + "validation": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Optional. Validation rules to be applied to this parameter's value.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regex": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on regular expressions.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationRegexSchemaV0(), + }, + + "values": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: "Validation based on a list of allowed values.", + MaxItems: 1, + Elem: DataprocWorkflowTemplateParametersValidationValuesSchemaV0(), + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationRegexSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "regexes": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient).", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func DataprocWorkflowTemplateParametersValidationValuesSchemaV0() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "values": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + Description: "Required. List of allowed values for the parameter.", + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func ResourceDataprocWorkflowTemplateUpgradeV0(_ context.Context, rawState map[string]interface{}, meta interface{}) (map[string]interface{}, error) { + return tpgresource.TerraformLabelsStateUpgrade(rawState) +} diff --git a/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_test.go.tmpl b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_test.go.tmpl new file mode 100644 index 000000000000..b27ed8ac03ed --- /dev/null +++ b/mmv1/third_party/terraform/services/dataproc/go/resource_dataproc_workflow_template_test.go.tmpl @@ -0,0 +1,237 @@ +package dataproc_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccDataprocWorkflowTemplate_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + "version": "2.0.35-debian10", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: funcAccTestDataprocWorkflowTemplateCheckDestroy(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccDataprocWorkflowTemplate_basic(context), + }, + { + ImportState: true, + ImportStateVerify: true, + // The "labels" field in the state are decided by the configuration. + // During importing, as the configuration is unavailable, the "labels" field in the state will be empty. + // So add the "labels" to the ImportStateVerifyIgnore list. + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + ResourceName: "google_dataproc_workflow_template.template", + }, + }, + }) +} + +func TestAccDataprocWorkflowTemplate_withShieldedVMs(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project": envvar.GetTestProjectFromEnv(), + "version": "2.0.35-debian10", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: funcAccTestDataprocWorkflowTemplateCheckDestroy(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccDataprocWorkflowTemplate_withShieldedVMs(context), + }, + { + ImportState: true, + ImportStateVerify: true, + ResourceName: "google_dataproc_workflow_template.shielded_vms_template", + }, + }, + }) +} + +func testAccDataprocWorkflowTemplate_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_workflow_template" "template" { + name = "template%{random_suffix}" + location = "us-central1" + placement { + managed_cluster { + cluster_name = "my-cluster" + config { + gce_cluster_config { + zone = "us-central1-a" + tags = ["foo", "bar"] + } + master_config { + num_instances = 1 + machine_type = "n1-standard-1" + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 15 + } + } + worker_config { + num_instances = 3 + machine_type = "n1-standard-2" + disk_config { + boot_disk_size_gb = 10 + num_local_ssds = 2 + } + } + + secondary_worker_config { + num_instances = 2 + } + software_config { + image_version = "%{version}" + } + } + } + } + jobs { + step_id = "someJob" + spark_job { + main_class = "SomeClass" + } + } + jobs { + step_id = "otherJob" + prerequisite_step_ids = ["someJob"] + presto_job { + query_file_uri = "someuri" + } + } + + labels = { + env = "foo" + somekey = "somevalue" + } +} +`, context) +} + +func testAccDataprocWorkflowTemplate_withShieldedVMs(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_workflow_template" "shielded_vms_template" { + name = "template%{random_suffix}" + location = "us-central1" + placement { + managed_cluster { + cluster_name = "my-shielded-cluster" + config { + gce_cluster_config { + zone = "us-central1-a" + tags = ["foo", "bar"] + shielded_instance_config { + enable_secure_boot = true + enable_vtpm = true + enable_integrity_monitoring = true + } + } + master_config { + num_instances = 1 + machine_type = "n1-standard-1" + disk_config { + boot_disk_type = "pd-ssd" + boot_disk_size_gb = 15 + } + } + worker_config { + num_instances = 3 + machine_type = "n1-standard-2" + disk_config { + boot_disk_size_gb = 10 + num_local_ssds = 2 + } + } + + secondary_worker_config { + num_instances = 2 + } + software_config { + image_version = "%{version}" + } + } + } + } + jobs { + step_id = "someJob" + spark_job { + main_class = "SomeClass" + } + } + jobs { + step_id = "otherJob" + prerequisite_step_ids = ["someJob"] + presto_job { + query_file_uri = "someuri" + } + } +} +`, context) +} + +func funcAccTestDataprocWorkflowTemplateCheckDestroy(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_dataproc_workflow_template" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}DataprocBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/workflowTemplates/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("DataprocWorkflowTemplate still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service.go.tmpl b/mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service.go.tmpl new file mode 100644 index 000000000000..bb273c54e6e3 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service.go.tmpl @@ -0,0 +1,43 @@ +package dataprocmetastore + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceDataprocMetastoreService() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceDataprocMetastoreService().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "service_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceDataprocMetastoreServiceRead, + Schema: dsSchema, + } +} + +func dataSourceDataprocMetastoreServiceRead(d *schema.ResourceData, meta interface{}) error { + id, err := tpgresource.ReplaceVars(d, meta.(*transport_tpg.Config), "projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/services/{{"{{"}}service_id{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceDataprocMetastoreServiceRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service_test.go b/mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service_test.go new file mode 100644 index 000000000000..997e5668961a --- /dev/null +++ b/mmv1/third_party/terraform/services/dataprocmetastore/go/data_source_dataproc_metastore_service_test.go @@ -0,0 +1,51 @@ +package dataprocmetastore_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataprocMetastoreServiceDatasource_basic(t *testing.T) { + t.Parallel() + + name := "tf-test-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreServiceDatasource_basic(name, "DEVELOPER"), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_dataproc_metastore_service.my_metastore", "google_dataproc_metastore_service.my_metastore"), + ), + }, + }, + }) +} + +func testAccDataprocMetastoreServiceDatasource_basic(name, tier string) string { + return fmt.Sprintf(` +resource "google_dataproc_metastore_service" "my_metastore" { + service_id = "%s" + location = "us-central1" + tier = "%s" + + hive_metastore_config { + version = "2.3.6" + } + + labels = { + env = "test" + } +} + +data "google_dataproc_metastore_service" "my_metastore" { + service_id = google_dataproc_metastore_service.my_metastore.service_id + location = google_dataproc_metastore_service.my_metastore.location +} +`, name, tier) +} diff --git a/mmv1/third_party/terraform/services/dataprocmetastore/go/dataproc_metastore_service_diff_supress.go b/mmv1/third_party/terraform/services/dataprocmetastore/go/dataproc_metastore_service_diff_supress.go new file mode 100644 index 000000000000..d6e66f639de0 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataprocmetastore/go/dataproc_metastore_service_diff_supress.go @@ -0,0 +1,24 @@ +package dataprocmetastore + +import ( + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +const dataprocMetastoreProvidedOverride = "hive.metastore.warehouse.dir" + +func dataprocMetastoreServiceOverrideSuppress(k, old, new string, d *schema.ResourceData) bool { + // Suppress diffs for the label provided by Google + if strings.Contains(k, dataprocMetastoreProvidedOverride) && new == "" { + return true + } + + // Let diff be determined by labels (above) + if strings.Contains(k, "hive_metastore_config.0.config_overrides.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} diff --git a/mmv1/third_party/terraform/services/dataprocmetastore/go/resource_dataproc_metastore_service_test.go b/mmv1/third_party/terraform/services/dataprocmetastore/go/resource_dataproc_metastore_service_test.go new file mode 100644 index 000000000000..2705fad537a9 --- /dev/null +++ b/mmv1/third_party/terraform/services/dataprocmetastore/go/resource_dataproc_metastore_service_test.go @@ -0,0 +1,169 @@ +package dataprocmetastore_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataprocMetastoreService_updateAndImport(t *testing.T) { + t.Parallel() + + name := "tf-test-metastore-" + acctest.RandString(t, 10) + tier := [2]string{"DEVELOPER", "ENTERPRISE"} + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreService_updateAndImport(name, tier[0]), + }, + { + ResourceName: "google_dataproc_metastore_service.my_metastore", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDataprocMetastoreService_updateAndImport(name, tier[1]), + }, + { + ResourceName: "google_dataproc_metastore_service.my_metastore", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDataprocMetastoreService_updateAndImport(name, tier string) string { + return fmt.Sprintf(` +resource "google_dataproc_metastore_service" "my_metastore" { + service_id = "%s" + location = "us-central1" + tier = "%s" + + hive_metastore_config { + version = "2.3.6" + } +} +`, name, tier) +} + +func TestAccDataprocMetastoreService_dataprocMetastoreServiceScheduledBackupExampleUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreService_dataprocMetastoreServiceScheduledBackupExample(context), + }, + { + ResourceName: "google_dataproc_metastore_service.backup", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id", "location", "labels", "terraform_labels"}, + }, + { + Config: testAccDataprocMetastoreService_dataprocMetastoreServiceScheduledBackupExampleUpdate(context), + }, + }, + }) +} + +func TestAccDataprocMetastoreService_PrivateServiceConnect(t *testing.T) { + t.Skip("Skipping due to https://github.com/hashicorp/terraform-provider-google/issues/13710") + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataprocMetastoreServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataprocMetastoreService_PrivateServiceConnect(context), + }, + { + ResourceName: "google_dataproc_metastore_service.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id", "location"}, + }, + }, + }) +} + +func testAccDataprocMetastoreService_PrivateServiceConnect(context map[string]interface{}) string { + return acctest.Nprintf(` +// Use data source instead of creating a subnetwork due to a bug on API side. +// With the bug, the new created subnetwork cannot be deleted when deleting the dataproc metastore service. +data "google_compute_subnetwork" "subnet" { + name = "default" + region = "us-central1" +} + +resource "google_dataproc_metastore_service" "default" { + service_id = "tf-test-metastore-srv%{random_suffix}" + location = "us-central1" + + hive_metastore_config { + version = "3.1.2" + } + + network_config { + consumers { + subnetwork = data.google_compute_subnetwork.subnet.id + } + } +} +`, context) +} + +func testAccDataprocMetastoreService_dataprocMetastoreServiceScheduledBackupExampleUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_dataproc_metastore_service" "backup" { + service_id = "tf-test-backup%{random_suffix}" + location = "us-central1" + port = 9080 + tier = "DEVELOPER" + + maintenance_window { + hour_of_day = 2 + day_of_week = "SUNDAY" + } + + hive_metastore_config { + version = "2.3.6" + } + + scheduled_backup { + enabled = true + cron_schedule = "0 0 * * 0" + time_zone = "America/Los_Angeles" + backup_location = "gs://${google_storage_bucket.bucket.name}" + } + + labels = { + env = "test" + } +} + +resource "google_storage_bucket" "bucket" { + name = "tf-test-backup%{random_suffix}" + location = "us-central1" +} +`, context) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/deploymentmanager/go/deployment_manager_operation.go.tmpl b/mmv1/third_party/terraform/services/deploymentmanager/go/deployment_manager_operation.go.tmpl new file mode 100644 index 000000000000..6df1efa40000 --- /dev/null +++ b/mmv1/third_party/terraform/services/deploymentmanager/go/deployment_manager_operation.go.tmpl @@ -0,0 +1,104 @@ +package deploymentmanager + +import ( + "bytes" + "fmt" + "time" + + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + +{{ if eq $.TargetVersionName `ga` }} + "google.golang.org/api/compute/v1" +{{- else }} + compute "google.golang.org/api/compute/v0.beta" +{{- end }} +) + +type DeploymentManagerOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + OperationUrl string + tpgcompute.ComputeOperationWaiter +} + +func (w *DeploymentManagerOperationWaiter) IsRetryable(error) bool { + return false +} + +func (w *DeploymentManagerOperationWaiter) QueryOp() (interface{}, error) { + if w == nil || w.Op == nil || w.Op.SelfLink == "" { + return nil, fmt.Errorf("cannot query unset/nil operation") + } + + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: w.Op.SelfLink, + UserAgent: w.UserAgent, + }) + if err != nil { + return nil, err + } + op := &compute.Operation{} + if err := tpgresource.Convert(resp, op); err != nil { + return nil, fmt.Errorf("could not convert response to operation: %v", err) + } + return op, nil +} + + +func DeploymentManagerOperationWaitTime(config *transport_tpg.Config, resp interface{}, project, activity, userAgent string, timeout time.Duration) error { + op := &compute.Operation{} + err := tpgresource.Convert(resp, op) + if err != nil { + return err + } + + w := &DeploymentManagerOperationWaiter{ + Config: config, + UserAgent: userAgent, + OperationUrl: op.SelfLink, + ComputeOperationWaiter: tpgcompute.ComputeOperationWaiter{ + Project: project, + }, + } + if err := w.SetOp(op); err != nil { + return err + } + + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} + +func (w *DeploymentManagerOperationWaiter) Error() error { + if w != nil && w.Op != nil && w.Op.Error != nil { + return DeploymentManagerOperationError{ + HTTPStatusCode: w.Op.HttpErrorStatusCode, + HTTPMessage: w.Op.HttpErrorMessage, + OperationError: *w.Op.Error, + } + } + return nil +} + +// DeploymentManagerOperationError wraps information from the compute.Operation +// in an implementation of Error. +type DeploymentManagerOperationError struct { + HTTPStatusCode int64 + HTTPMessage string + compute.OperationError +} + +func (e DeploymentManagerOperationError) Error() string { + var buf bytes.Buffer + buf.WriteString("Deployment Manager returned errors for this operation, likely due to invalid configuration.") + buf.WriteString(fmt.Sprintf("Operation failed with HTTP error %d: %s.", e.HTTPStatusCode, e.HTTPMessage)) + buf.WriteString("Errors returned: \n") + for _, err := range e.Errors { + buf.WriteString(err.Message + "\n") + } + return buf.String() +} diff --git a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go new file mode 100644 index 000000000000..b8ca477176d9 --- /dev/null +++ b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_agent_test.go @@ -0,0 +1,132 @@ +package dialogflow_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDialogflowAgent_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDialogflowAgent_full1(context), + }, + { + ResourceName: "google_dialogflow_agent.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"avatar_uri", "tier"}, + }, + { + Config: testAccDialogflowAgent_full2(context), + }, + { + ResourceName: "google_dialogflow_agent.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"avatar_uri", "tier"}, + }, + }, + }) +} + +func testAccDialogflowAgent_full1(context map[string]interface{}) string { + // NOTE: we're creating a new project because you're only allowed one Agent per project + // -> to test creating an Agent you need to create the project too + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "foobar" { + project = google_project.agent_project.project_id + display_name = "tf-test-%{random_suffix}" + default_language_code = "en" + supported_language_codes = ["fr","de","es"] + time_zone = "America/New_York" + description = "Description 1." + avatar_uri = "https://storage.cloud.google.com/dialogflow-test-host-image/cloud-logo.png" + enable_logging = true + match_mode = "MATCH_MODE_ML_ONLY" + classification_threshold = 0.3 + api_version = "API_VERSION_V2_BETA_1" + tier = "TIER_STANDARD" + depends_on = [google_project_iam_member.agent_create] + } + `, context) +} + +func testAccDialogflowAgent_full2(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "foobar" { + project = google_project.agent_project.project_id + display_name = "tf-test-%{random_suffix}update" + default_language_code = "en" + supported_language_codes = ["no"] + time_zone = "Europe/London" + description = "Description 2!" + avatar_uri = "https://storage.cloud.google.com/dialogflow-test-host-image/cloud-logo-2.png" + enable_logging = false + match_mode = "MATCH_MODE_HYBRID" + classification_threshold = 0.7 + api_version = "API_VERSION_V2" + tier = "TIER_ENTERPRISE" + depends_on = [google_project_iam_member.agent_create] + } + `, context) +} diff --git a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_entity_type_test.go b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_entity_type_test.go new file mode 100644 index 000000000000..859bcde89a29 --- /dev/null +++ b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_entity_type_test.go @@ -0,0 +1,141 @@ +package dialogflow_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDialogflowEntityType_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDialogflowEntityType_full1(context), + }, + { + ResourceName: "google_dialogflow_entity_type.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDialogflowEntityType_full2(context), + }, + { + ResourceName: "google_dialogflow_entity_type.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDialogflowEntityType_full1(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_entity_type" "foobar" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-entity-%{random_suffix}" + kind = "KIND_MAP" + enable_fuzzy_extraction = true + entities { + value = "value1" + synonyms = ["synonym1","synonym2"] + } + entities { + value = "value2" + synonyms = ["synonym3","synonym4"] + } + } + `, context) +} + +func testAccDialogflowEntityType_full2(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_entity_type" "foobar" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-entity2-%{random_suffix}" + kind = "KIND_LIST" + enable_fuzzy_extraction = false + entities { + value = "value1" + synonyms = ["value1"] + } + } + `, context) +} diff --git a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_fulfillment_test.go b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_fulfillment_test.go new file mode 100644 index 000000000000..63e3b009b224 --- /dev/null +++ b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_fulfillment_test.go @@ -0,0 +1,135 @@ +package dialogflow_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDialogflowFulfillment_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDialogflowFulfillment_basic(context), + }, + { + ResourceName: "google_dialogflow_fulfillment.agent_fulfillment", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDialogflowEntityType_full(context), + }, + { + ResourceName: "google_dialogflow_fulfillment.agent_fulfillment", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDialogflowFulfillment_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_fulfillment" "agent_fulfillment" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-fulfillment-%{random_suffix}" + enabled = true + } + `, context) +} + +func testAccDialogflowEntityType_full(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_fulfillment" "agent_fulfillment" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-entity2-%{random_suffix}" + enabled = true + generic_web_service { + uri = "https://google.com" + username = "admin" + password = "password" + request_headers = { + "name" = "wrench" + } + } + } + `, context) +} diff --git a/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_intent_test.go b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_intent_test.go new file mode 100644 index 000000000000..50f4fa033d8b --- /dev/null +++ b/mmv1/third_party/terraform/services/dialogflow/go/resource_dialogflow_intent_test.go @@ -0,0 +1,209 @@ +package dialogflow_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDialogflowIntent_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDialogflowIntent_basic(context), + }, + { + ResourceName: "google_dialogflow_intent.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDialogflowIntent_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDialogflowIntent_full1(context), + }, + { + ResourceName: "google_dialogflow_intent.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDialogflowIntent_full2(context), + }, + { + ResourceName: "google_dialogflow_intent.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDialogflowIntent_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_intent" "foobar" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-%{random_suffix}" + } + `, context) +} + +func testAccDialogflowIntent_full1(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_intent" "foobar" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-intent-%{random_suffix}" + webhook_state = "WEBHOOK_STATE_ENABLED" + priority = 1 + is_fallback = false + ml_disabled = true + action = "some_action" + reset_contexts = true + input_context_names = ["projects/${google_project.agent_project.project_id}/agent/sessions/-/contexts/some_id"] + events = ["some_event"] + default_response_platforms = ["FACEBOOK","SLACK"] + } + `, context) +} + +func testAccDialogflowIntent_full2(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_project" "agent_project" { + name = "tf-test-dialogflow-%{random_suffix}" + project_id = "tf-test-dialogflow-%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + } + + resource "google_project_service" "agent_project" { + project = google_project.agent_project.project_id + service = "dialogflow.googleapis.com" + disable_dependent_services = false + } + + resource "google_service_account" "dialogflow_service_account" { + account_id = "tf-test-dialogflow-%{random_suffix}" + } + + resource "google_project_iam_member" "agent_create" { + project = google_project_service.agent_project.project + role = "roles/dialogflow.admin" + member = "serviceAccount:${google_service_account.dialogflow_service_account.email}" + } + + resource "google_dialogflow_agent" "agent" { + project = google_project.agent_project.project_id + display_name = "tf-test-agent-%{random_suffix}" + default_language_code = "en" + time_zone = "America/New_York" + depends_on = [google_project_iam_member.agent_create] + } + + resource "google_dialogflow_intent" "foobar" { + depends_on = [google_dialogflow_agent.agent] + project = google_project.agent_project.project_id + display_name = "tf-test-intent-%{random_suffix}2" + webhook_state = "WEBHOOK_STATE_ENABLED_FOR_SLOT_FILLING" + priority = 2 + is_fallback = false + ml_disabled = false + action = "some_other_action" + reset_contexts = false + input_context_names = ["projects/${google_project.agent_project.project_id}/agent/sessions/-/contexts/some_other_id"] + events = ["some_other_event"] + default_response_platforms = ["SKYPE"] + } + `, context) +} diff --git a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb index 1390f9e83cd2..0ac201a6c6be 100644 --- a/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb +++ b/mmv1/third_party/terraform/services/healthcare/resource_healthcare_fhir_store_test.go.erb @@ -133,7 +133,6 @@ resource "google_healthcare_fhir_store" "default" { disable_resource_versioning = false enable_history_import = false version = "R4" - <% unless version == "ga" -%> enable_history_modifications = false <% end -%> @@ -161,7 +160,6 @@ resource "google_healthcare_fhir_store" "default" { send_full_resource = true send_previous_resource_on_delete = true } - <% unless version == "ga" -%> enable_history_modifications = true <% end -%> diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/data_source_google_iam_policy.go b/mmv1/third_party/terraform/services/resourcemanager/go/data_source_google_iam_policy.go new file mode 100644 index 000000000000..ba67ce30dbf5 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/data_source_google_iam_policy.go @@ -0,0 +1,262 @@ +package resourcemanager + +import ( + "encoding/json" + "fmt" + "regexp" + "sort" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + "google.golang.org/api/cloudresourcemanager/v1" +) + +// DataSourceGoogleIamPolicy returns a *schema.Resource that allows a customer +// to express a Google Cloud IAM policy in a data resource. This is an example +// of how the schema would be used in a config: +// +// data "google_iam_policy" "admin" { +// binding { +// role = "roles/storage.objectViewer" +// members = [ +// "user:evanbrown@google.com", +// ] +// } +// } +func DataSourceGoogleIamPolicy() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleIamPolicyRead, + Schema: map[string]*schema.Schema{ + "binding": { + Type: schema.TypeSet, + // Binding is optional because a user may want to set an IAM policy with no bindings + // This allows users to ensure that no bindings were created outside of terraform + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "role": { + Type: schema.TypeString, + Required: true, + }, + "members": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringDoesNotMatch(regexp.MustCompile("^deleted:"), "Terraform does not support IAM policies for deleted principals"), + }, + Set: schema.HashString, + }, + "condition": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expression": { + Type: schema.TypeString, + Required: true, + }, + "title": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "policy_data": { + Type: schema.TypeString, + Computed: true, + }, + "audit_config": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + }, + "audit_log_configs": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_type": { + Type: schema.TypeString, + Required: true, + }, + "exempted_members": { + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +// dataSourceGoogleIamPolicyRead reads a data source from config and writes it +// to state. +func dataSourceGoogleIamPolicyRead(d *schema.ResourceData, meta interface{}) error { + var policy cloudresourcemanager.Policy + var bindings []*cloudresourcemanager.Binding + + // The schema supports multiple binding{} blocks + bset := d.Get("binding").(*schema.Set) + aset := d.Get("audit_config").(*schema.Set) + + // Convert each config binding into a cloudresourcemanager.Binding + // and merge member lists of equivalent binding{} blocks from the config provided by the user + bindingMap := map[string]*cloudresourcemanager.Binding{} + for _, v := range bset.List() { + binding := v.(map[string]interface{}) + members := tpgresource.ConvertStringSet(binding["members"].(*schema.Set)) + condition := tpgiamresource.ExpandIamCondition(binding["condition"]) + + // Map keys are used to identify binding{} blocks that are identical except for the member lists + key := binding["role"].(string) + if condition != nil { + key += fmt.Sprintf("-[%s]-[%s]-[%s]-[%s]", condition.Expression, condition.Title, condition.Description, condition.Location) + } + + if val, ok := bindingMap[key]; ok { + // Add members to existing cloudresourcemanager.Binding in the map + m := append(val.Members, members...) + binding := bindingMap[key] + binding.Members = m + bindingMap[key] = binding + } else { + // Add new cloudresourcemanager.Binding to the map + bindingMap[key] = &cloudresourcemanager.Binding{ + Role: binding["role"].(string), + Members: members, + Condition: condition, + } + } + } + + // All binding{} blocks, post conversion to cloudresourcemanager.Binding and combining of member lists, are stored in an array + bindings = []*cloudresourcemanager.Binding{} + for _, v := range bindingMap { + v := v + bindings = append(bindings, v) + } + policy.Bindings = bindings + + // Sort bindings within the list to get simpler diffs, as it's what the API does + // Sorting is based on the binding's role + condition fields + sort.Slice(policy.Bindings, iamPolicyBindingsLessFunction(policy)) + + // Sort members within each binding in the list to get simpler diffs, as it's what the API does + for i := 0; i < len(policy.Bindings); i++ { + sort.Strings(policy.Bindings[i].Members) + } + + // Convert each audit_config into a cloudresourcemanager.AuditConfig + policy.AuditConfigs = expandAuditConfig(aset) + + // Marshal cloudresourcemanager.Policy to JSON suitable for storing in state + pjson, err := json.Marshal(&policy) + if err != nil { + // should never happen if the above code is correct + return err + } + pstring := string(pjson) + + if err := d.Set("policy_data", pstring); err != nil { + return fmt.Errorf("Error setting policy_data: %s", err) + } + d.SetId(strconv.Itoa(tpgresource.Hashcode(pstring))) + + return nil +} + +func expandAuditConfig(set *schema.Set) []*cloudresourcemanager.AuditConfig { + auditConfigs := make([]*cloudresourcemanager.AuditConfig, 0, set.Len()) + for _, v := range set.List() { + config := v.(map[string]interface{}) + // build list of audit configs first + auditLogConfigSet := config["audit_log_configs"].(*schema.Set) + // the array we're going to add to the outgoing resource + auditLogConfigs := make([]*cloudresourcemanager.AuditLogConfig, 0, auditLogConfigSet.Len()) + for _, y := range auditLogConfigSet.List() { + logConfig := y.(map[string]interface{}) + auditLogConfigs = append(auditLogConfigs, &cloudresourcemanager.AuditLogConfig{ + LogType: logConfig["log_type"].(string), + ExemptedMembers: tpgresource.ConvertStringArr(logConfig["exempted_members"].(*schema.Set).List()), + }) + } + auditConfigs = append(auditConfigs, &cloudresourcemanager.AuditConfig{ + Service: config["service"].(string), + AuditLogConfigs: auditLogConfigs, + }) + } + return auditConfigs +} + +func iamPolicyBindingsLessFunction(policy cloudresourcemanager.Policy) func(i, j int) bool { + + return func(i, j int) bool { + // Sort bindings by role, if they're not the same + sameRole := policy.Bindings[i].Role == policy.Bindings[j].Role + if !sameRole { + return policy.Bindings[i].Role < policy.Bindings[j].Role + } + + iConditionOk := policy.Bindings[i].Condition != nil + jConditionOk := policy.Bindings[j].Condition != nil + + // If both bindings lack conditions we cannot sort them further + if !iConditionOk && !jConditionOk { + return false + } + + // Sort by presence of a condition on only one of the two bindings + if !iConditionOk && jConditionOk { + return true + } + if iConditionOk && !jConditionOk { + return false + } + + // At this point both bindings have conditions + + sameExpression := policy.Bindings[i].Condition.Expression == policy.Bindings[j].Condition.Expression + sameTitle := policy.Bindings[i].Condition.Title == policy.Bindings[j].Condition.Title + sameDescription := policy.Bindings[i].Condition.Description == policy.Bindings[j].Condition.Description + + // Don't sort if conditions are the same + if sameExpression && sameTitle && sameDescription { + return false + } + + // Sort by both bindings' conditions' expressions, if they're not equivalent + if !sameExpression { + return policy.Bindings[i].Condition.Expression < policy.Bindings[j].Condition.Expression + } + + // Sort by both bindings' conditions' titles, if they're not equivalent + if !sameTitle { + return policy.Bindings[i].Condition.Title < policy.Bindings[j].Condition.Title + } + + // Comparing conditions' descriptions is the last available way to sort + return policy.Bindings[i].Condition.Description < policy.Bindings[j].Condition.Description + } +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/iam_service_account.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/iam_service_account.go.tmpl new file mode 100644 index 000000000000..c8187fcc9420 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/iam_service_account.go.tmpl @@ -0,0 +1,117 @@ +package resourcemanager + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" +) + +var IamServiceAccountSchema = map[string]*schema.Schema{ + "service_account_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp(verify.ServiceAccountLinkRegex), + }, +} + +type ServiceAccountIamUpdater struct { + serviceAccountId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewServiceAccountIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + return &ServiceAccountIamUpdater{ + serviceAccountId: d.Get("service_account_id").(string), + d: d, + Config: config, + }, nil +} + +func ServiceAccountIdParseFunc(d *schema.ResourceData, _ *transport_tpg.Config) error { + if err := d.Set("service_account_id", d.Id()); err != nil { + return fmt.Errorf("Error setting service_account_id: %s", err) + } + return nil +} + +func (u *ServiceAccountIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.GetIamPolicy(u.serviceAccountId).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := iamToResourceManagerPolicy(p) + if err != nil { + return nil, err + } + + return cloudResourcePolicy, nil +} + +func (u *ServiceAccountIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + iamPolicy, err := resourceManagerToIamPolicy(policy) + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewIamClient(userAgent).Projects.ServiceAccounts.SetIamPolicy(u.GetResourceId(), &iam.SetIamPolicyRequest{ + Policy: iamPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *ServiceAccountIamUpdater) GetResourceId() string { + return u.serviceAccountId +} + +func (u *ServiceAccountIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-service-account-%s", u.serviceAccountId) +} + +func (u *ServiceAccountIamUpdater) DescribeResource() string { + return fmt.Sprintf("service account '%s'", u.serviceAccountId) +} + +func resourceManagerToIamPolicy(p *cloudresourcemanager.Policy) (*iam.Policy, error) { + out := &iam.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a iam policy: {{"{{"}}err{{"}}"}}", err) + } + return out, nil +} + +func iamToResourceManagerPolicy(p *iam.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a iam policy to a v1 policy: {{"{{"}}err{{"}}"}}", err) + } + return out, nil +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go new file mode 100644 index 000000000000..add26f3ff681 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_binding_test.go @@ -0,0 +1,384 @@ +package resourcemanager_test + +import ( + "fmt" + "regexp" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func projectIamBindingImportStep(resourceName, pid, role string) resource.TestStep { + return resource.TestStep{ + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s %s", pid, role), + ImportState: true, + ImportStateVerify: true, + } +} + +// Test that an IAM binding can be applied to a project +func TestAccProjectIamBinding_basic(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + member := "user:admin@hashicorptest.com" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateBindingBasic(pid, org, role, member), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + }, + }) +} + +// Test that multiple IAM bindings can be applied to a project, one at a time +func TestAccProjectIamBinding_multiple(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + role2 := "roles/viewer" + member := "user:admin@hashicorptest.com" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateBindingBasic(pid, org, role, member), + }, + // Apply another IAM binding + { + Config: testAccProjectAssociateBindingMultiple(pid, org, role, role2), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + projectIamBindingImportStep("google_project_iam_binding.multiple", pid, role2), + }, + }) +} + +// Test that multiple IAM bindings can be applied to a project all at once +func TestAccProjectIamBinding_multipleAtOnce(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + role2 := "roles/viewer" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateBindingMultiple(pid, org, role, role2), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + projectIamBindingImportStep("google_project_iam_binding.multiple", pid, role2), + }, + }) +} + +// Test that an IAM binding can be updated once applied to a project +func TestAccProjectIamBinding_update(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + member := "user:admin@hashicorptest.com" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateBindingBasic(pid, org, role, member), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + + // Apply an updated IAM binding + { + Config: testAccProjectAssociateBindingUpdated(pid, org, role), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + + // Drop the original member + { + Config: testAccProjectAssociateBindingDropMemberFromBasic(pid, org, role), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + }, + }) +} + +// Test that an IAM binding can be removed from a project +func TestAccProjectIamBinding_remove(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + role2 := "roles/viewer" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply multiple IAM bindings + { + Config: testAccProjectAssociateBindingMultiple(pid, org, role, role2), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + projectIamBindingImportStep("google_project_iam_binding.multiple", pid, role2), + + // Remove the bindings + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + }, + }) +} + +// Test that an IAM binding with no members can be applied to a project +func TestAccProjectIamBinding_noMembers(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateBindingNoMembers(pid, org, role), + }, + projectIamBindingImportStep("google_project_iam_binding.acceptance", pid, role), + }, + }) +} + +func TestAccProjectIamBinding_withCondition(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + conditionTitle := "expires_after_2019_12_31" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateBinding_withCondition(pid, org, role, conditionTitle), + }, + { + ResourceName: "google_project_iam_binding.acceptance", + ImportStateId: fmt.Sprintf("%s %s %s", pid, role, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// Test that an IAM binding with invalid members returns an error. +func TestAccProjectIamBinding_invalidMembers(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProjectAssociateBindingBasic(pid, org, role, "admin@hashicorptest.com"), + ExpectError: regexp.MustCompile("invalid value for members\\.0 \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), + }, + { + Config: testAccProjectAssociateBindingBasic(pid, org, role, "user:admin@hashicorptest.com"), + }, + }, + }) +} + +func testAccProjectAssociateBindingBasic(pid, org, role, member string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_binding" "acceptance" { + project = google_project.acceptance.project_id + members = ["%s"] + role = "%s" +} +`, pid, pid, org, member, role) +} + +func testAccProjectAssociateBindingMultiple(pid, org, role, role2 string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_binding" "acceptance" { + project = google_project.acceptance.project_id + members = ["user:admin@hashicorptest.com"] + role = "%s" +} + +resource "google_project_iam_binding" "multiple" { + project = google_project.acceptance.project_id + members = ["user:gterraformtest1@gmail.com"] + role = "%s" +} +`, pid, pid, org, role, role2) +} + +func testAccProjectAssociateBindingUpdated(pid, org, role string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_binding" "acceptance" { + project = google_project.acceptance.project_id + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] + role = "%s" +} +`, pid, pid, org, role) +} + +func testAccProjectAssociateBindingDropMemberFromBasic(pid, org, role string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_binding" "acceptance" { + project = google_project.acceptance.project_id + members = ["user:gterraformtest1@gmail.com"] + role = "%s" +} +`, pid, pid, org, role) +} + +func testAccProjectAssociateBindingNoMembers(pid, org, role string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_binding" "acceptance" { + project = google_project.acceptance.project_id + members = [] + role = "%s" +} +`, pid, pid, org, role) +} + +func testAccProjectAssociateBinding_withCondition(pid, org, role, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_binding" "acceptance" { + project = google_project.acceptance.project_id + members = ["user:admin@hashicorptest.com"] + role = "%s" + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, pid, pid, org, role, conditionTitle) +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go new file mode 100644 index 000000000000..50c3d22d3d43 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_member_test.go @@ -0,0 +1,252 @@ +package resourcemanager_test + +import ( + "fmt" + "regexp" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func projectIamMemberImportStep(resourceName, pid, role, member string) resource.TestStep { + return resource.TestStep{ + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s %s %s", pid, role, member), + ImportState: true, + ImportStateVerify: true, + } +} + +// Test that an IAM binding can be applied to a project +func TestAccProjectIamMember_basic(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + resourceName := "google_project_iam_member.acceptance" + role := "roles/compute.instanceAdmin" + member := "user:admin@hashicorptest.com" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateMemberBasic(pid, org, role, member), + }, + projectIamMemberImportStep(resourceName, pid, role, member), + }, + }) +} + +// Test that multiple IAM bindings can be applied to a project +func TestAccProjectIamMember_multiple(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + envvar.SkipIfEnvNotSet(t, "GOOGLE_ORG") + + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + resourceName := "google_project_iam_member.acceptance" + resourceName2 := "google_project_iam_member.multiple" + role := "roles/compute.instanceAdmin" + member := "user:admin@hashicorptest.com" + member2 := "user:gterraformtest1@gmail.com" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateMemberBasic(pid, org, role, member), + }, + projectIamMemberImportStep(resourceName, pid, role, member), + + // Apply another IAM binding + { + Config: testAccProjectAssociateMemberMultiple(pid, org, role, member, role, member2), + }, + projectIamMemberImportStep(resourceName, pid, role, member), + projectIamMemberImportStep(resourceName2, pid, role, member2), + }, + }) +} + +// Test that an IAM binding can be removed from a project +func TestAccProjectIamMember_remove(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + envvar.SkipIfEnvNotSet(t, "GOOGLE_ORG") + + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + resourceName := "google_project_iam_member.acceptance" + role := "roles/compute.instanceAdmin" + member := "user:admin@hashicorptest.com" + member2 := "user:gterraformtest1@gmail.com" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + + // Apply multiple IAM bindings + { + Config: testAccProjectAssociateMemberMultiple(pid, org, role, member, role, member2), + }, + projectIamMemberImportStep(resourceName, pid, role, member), + projectIamMemberImportStep(resourceName, pid, role, member2), + + // Remove the bindings + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + }, + }) +} + +func TestAccProjectIamMember_withCondition(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + resourceName := "google_project_iam_member.acceptance" + role := "roles/compute.instanceAdmin" + member := "user:admin@hashicorptest.com" + conditionTitle := "expires_after_2019_12_31" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM binding + { + Config: testAccProjectAssociateMember_withCondition(pid, org, role, member, conditionTitle), + }, + { + ResourceName: resourceName, + ImportStateId: fmt.Sprintf("%s %s %s %s", pid, role, member, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccProjectIamMember_invalidMembers(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + role := "roles/compute.instanceAdmin" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProjectAssociateMemberBasic(pid, org, role, "admin@hashicorptest.com"), + ExpectError: regexp.MustCompile("invalid value for member \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), + }, + { + Config: testAccProjectAssociateMemberBasic(pid, org, role, "user:admin@hashicorptest.com"), + }, + }, + }) +} + +func testAccProjectAssociateMemberBasic(pid, org, role, member string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_member" "acceptance" { + project = google_project.acceptance.project_id + role = "%s" + member = "%s" +} +`, pid, pid, org, role, member) +} + +func testAccProjectAssociateMemberMultiple(pid, org, role, member, role2, member2 string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_member" "acceptance" { + project = google_project.acceptance.project_id + role = "%s" + member = "%s" +} + +resource "google_project_iam_member" "multiple" { + project = google_project.acceptance.project_id + role = "%s" + member = "%s" +} +`, pid, pid, org, role, member, role2, member2) +} + +func testAccProjectAssociateMember_withCondition(pid, org, role, member, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_member" "acceptance" { + project = google_project.acceptance.project_id + role = "%s" + member = "%s" + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, pid, pid, org, role, member, conditionTitle) +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go new file mode 100644 index 000000000000..eec0c77bc01d --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_project_iam_policy_test.go @@ -0,0 +1,499 @@ +package resourcemanager_test + +import ( + "encoding/json" + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/resourcemanager" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "google.golang.org/api/cloudresourcemanager/v1" +) + +// Test that an IAM policy can be applied to a project +func TestAccProjectIamPolicy_basic(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + member := "user:evanbrown@google.com" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM policy from a data source. The application + // merges policies, so we validate the expected state. + { + Config: testAccProjectAssociatePolicyBasic(pid, org, member), + Check: resource.TestCheckResourceAttrSet("data.google_project_iam_policy.acceptance", "policy_data"), + }, + { + ResourceName: "google_project_iam_policy.acceptance", + ImportState: true, + }, + }, + }) +} + +// Test that an IAM policy with empty members does not cause a permadiff. +func TestAccProjectIamPolicy_emptyMembers(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProjectIamPolicyEmptyMembers(pid, org), + }, + }, + }) +} + +// Test that a non-collapsed IAM policy doesn't perpetually diff +func TestAccProjectIamPolicy_expanded(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProjectAssociatePolicyExpanded(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectIamPolicyExists("google_project_iam_policy.acceptance", "data.google_iam_policy.expanded", pid), + ), + }, + }, + }) +} + +// Test that an IAM policy with an audit config can be applied to a project +func TestAccProjectIamPolicy_basicAuditConfig(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM policy from a data source. The application + // merges policies, so we validate the expected state. + { + Config: testAccProjectAssociatePolicyAuditConfigBasic(pid, org), + }, + { + ResourceName: "google_project_iam_policy.acceptance", + ImportState: true, + }, + }, + }) +} + +// Test that a non-collapsed IAM policy with AuditConfig doesn't perpetually diff +func TestAccProjectIamPolicy_expandedAuditConfig(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProjectAssociatePolicyAuditConfigExpanded(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleProjectIamPolicyExists("google_project_iam_policy.acceptance", "data.google_iam_policy.expanded", pid), + ), + }, + }, + }) +} + +func TestAccProjectIamPolicy_withCondition(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + // Create a new project + { + Config: testAccProject_create(pid, org), + Check: resource.ComposeTestCheckFunc( + testAccProjectExistingPolicy(t, pid), + ), + }, + // Apply an IAM policy from a data source. The application + // merges policies, so we validate the expected state. + { + Config: testAccProjectAssociatePolicy_withCondition(pid, org), + }, + { + ResourceName: "google_project_iam_policy.acceptance", + ImportState: true, + }, + }, + }) +} + +// Test that an IAM policy with invalid members returns errors. +func TestAccProjectIamPolicy_invalidMembers(t *testing.T) { + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + pid := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccProjectAssociatePolicyBasic(pid, org, "admin@hashicorptest.com"), + ExpectError: regexp.MustCompile("invalid value for bindings\\.1\\.members\\.0 \\(IAM members must have one of the values outlined here: https://cloud.google.com/billing/docs/reference/rest/v1/Policy#Binding\\)"), + }, + { + Config: testAccProjectAssociatePolicyBasic(pid, org, "user:admin@hashicorptest.com"), + }, + }, + }) +} + +func getStatePrimaryResource(s *terraform.State, res, expectedID string) (*terraform.InstanceState, error) { + // Get the project resource + resource, ok := s.RootModule().Resources[res] + if !ok { + return nil, fmt.Errorf("Not found: %s", res) + } + if expectedID != "" && !resourcemanager.CompareProjectName("", resource.Primary.Attributes["id"], expectedID, nil) { + return nil, fmt.Errorf("Expected project %q to match ID %q in state", resource.Primary.ID, expectedID) + } + return resource.Primary, nil +} + +func getGoogleProjectIamPolicyFromResource(resource *terraform.InstanceState) (cloudresourcemanager.Policy, error) { + var p cloudresourcemanager.Policy + ps, ok := resource.Attributes["policy_data"] + if !ok { + return p, fmt.Errorf("Resource %q did not have a 'policy_data' attribute. Attributes were %#v", resource.ID, resource.Attributes) + } + if err := json.Unmarshal([]byte(ps), &p); err != nil { + return p, fmt.Errorf("Could not unmarshal %s:\n: %v", ps, err) + } + return p, nil +} + +func getGoogleProjectIamPolicyFromState(s *terraform.State, res, expectedID string) (cloudresourcemanager.Policy, error) { + project, err := getStatePrimaryResource(s, res, expectedID) + if err != nil { + return cloudresourcemanager.Policy{}, err + } + return getGoogleProjectIamPolicyFromResource(project) +} + +func testAccCheckGoogleProjectIamPolicyExists(projectRes, policyRes, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + projectPolicy, err := getGoogleProjectIamPolicyFromState(s, projectRes, pid) + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for project from state: %s", err) + } + policyPolicy, err := getGoogleProjectIamPolicyFromState(s, policyRes, "") + if err != nil { + return fmt.Errorf("Error retrieving IAM policy for data_policy from state: %s", err) + } + + // The bindings in both policies should be identical + if !tpgiamresource.CompareBindings(projectPolicy.Bindings, policyPolicy.Bindings) { + return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", tpgiamresource.DebugPrintBindings(projectPolicy.Bindings), tpgiamresource.DebugPrintBindings(policyPolicy.Bindings)) + } + + // The audit configs in both policies should be identical + if !tpgiamresource.CompareAuditConfigs(projectPolicy.AuditConfigs, policyPolicy.AuditConfigs) { + return fmt.Errorf("Project and data source policies do not match: project policy is %+v, data resource policy is %+v", tpgiamresource.DebugPrintAuditConfigs(projectPolicy.AuditConfigs), tpgiamresource.DebugPrintAuditConfigs(policyPolicy.AuditConfigs)) + } + return nil + } +} + +// Confirm that a project has an IAM policy with at least 1 binding +func testAccProjectExistingPolicy(t *testing.T, pid string) resource.TestCheckFunc { + return func(s *terraform.State) error { + c := acctest.GoogleProviderConfig(t) + var err error + OriginalPolicy, err := resourcemanager.GetProjectIamPolicy(pid, c) + if err != nil { + return fmt.Errorf("Failed to retrieve IAM Policy for project %q: %s", pid, err) + } + if len(OriginalPolicy.Bindings) == 0 { + return fmt.Errorf("Refuse to run test against project with zero IAM Bindings. This is likely an error in the test code that is not properly identifying the IAM policy of a project.") + } + return nil + } +} + +func testAccProjectAssociatePolicyBasic(pid, org, member string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id + policy_data = data.google_iam_policy.admin.policy_data +} + +data "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id +} + +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.objectViewer" + members = [ + "%s", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + } +} +`, pid, pid, org, member) +} + +func testAccProjectAssociatePolicyAuditConfigBasic(pid, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id + policy_data = data.google_iam_policy.admin.policy_data +} + +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + } + audit_config { + service = "cloudkms.googleapis.com" + audit_log_configs { + log_type = "DATA_READ" + exempted_members = ["user:gterraformtest1@gmail.com"] + } + + audit_log_configs { + log_type = "DATA_WRITE" + } + } + audit_config { + service = "cloudsql.googleapis.com" + audit_log_configs { + log_type = "DATA_READ" + exempted_members = ["user:gterraformtest1@gmail.com"] + } + + audit_log_configs { + log_type = "DATA_WRITE" + } + } +} +`, pid, pid, org) +} + +func testAccProject_create(pid, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} +`, pid, pid, org) +} + +func testAccProjectIamPolicyEmptyMembers(pid, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id + policy_data = data.google_iam_policy.expanded.policy_data +} + +data "google_iam_policy" "expanded" { + binding { + role = "roles/viewer" + members = [] + } +} +`, pid, pid, org) +} + +func testAccProjectAssociatePolicyExpanded(pid, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id + policy_data = data.google_iam_policy.expanded.policy_data +} + +data "google_iam_policy" "expanded" { + binding { + role = "roles/viewer" + members = [ + "user:gterraformtest2@gmail.com", + ] + } + + binding { + role = "roles/viewer" + members = [ + "user:gterraformtest1@gmail.com", + ] + } +} +`, pid, pid, org) +} + +func testAccProjectAssociatePolicyAuditConfigExpanded(pid, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id + policy_data = data.google_iam_policy.expanded.policy_data +} + +data "google_iam_policy" "expanded" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + } + audit_config { + service = "cloudkms.googleapis.com" + audit_log_configs { + log_type = "DATA_READ" + exempted_members = ["user:gterraformtest1@gmail.com"] + } + + audit_log_configs { + log_type = "DATA_WRITE" + } + } + audit_config { + service = "cloudkms.googleapis.com" + audit_log_configs { + log_type = "DATA_READ" + exempted_members = ["user:gterraformtest1@gmail.com"] + } + + audit_log_configs { + log_type = "DATA_WRITE" + } + } +} +`, pid, pid, org) +} + +func testAccProjectAssociatePolicy_withCondition(pid, org string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + project_id = "%s" + name = "%s" + org_id = "%s" +} + +resource "google_project_iam_policy" "acceptance" { + project = google_project.acceptance.id + policy_data = data.google_iam_policy.admin.policy_data +} + +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.objectViewer" + members = [ + "user:evanbrown@google.com", + ] + } + binding { + role = "roles/compute.instanceAdmin" + members = [ + "user:evanbrown@google.com", + "user:evandbrown@gmail.com", + ] + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } + } +} +`, pid, pid, org) +} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_service_account_iam_test.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_service_account_iam_test.go.tmpl new file mode 100644 index 000000000000..16fb4e212156 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_google_service_account_iam_test.go.tmpl @@ -0,0 +1,582 @@ +package resourcemanager_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccServiceAccountIamBinding(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamBinding_basic(account), + Check: testAccCheckGoogleServiceAccountIam(t, account, 1), + }, + { + ResourceName: "google_service_account_iam_binding.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateId: fmt.Sprintf("%s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser"), + }, + }, + }) +} + +func TestAccServiceAccountIamBinding_withCondition(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + conditionExpr := `request.time < timestamp(\"2020-01-01T00:00:00Z\")` + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamBinding_withCondition(account, "user:admin@hashicorptest.com", conditionTitle, conditionExpr), + Check: testAccCheckGoogleServiceAccountIam(t, account, 1), + }, + { + ResourceName: "google_service_account_iam_binding.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateId: fmt.Sprintf("%s %s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser", conditionTitle), + }, + }, + }) +} + +func TestAccServiceAccountIamBinding_withAndWithoutCondition(t *testing.T) { + // Resource creation race condition + acctest.SkipIfVcr(t) + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + conditionExpr := `request.time < timestamp(\"2020-01-01T00:00:00Z\")` + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamBinding_withAndWithoutCondition(account, "user:admin@hashicorptest.com", conditionTitle, conditionExpr), + Check: testAccCheckGoogleServiceAccountIam(t, account, 2), + }, + { + ResourceName: "google_service_account_iam_binding.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateId: fmt.Sprintf("%s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser"), + }, + { + ResourceName: "google_service_account_iam_binding.foo2", + ImportState: true, + ImportStateVerify: true, + ImportStateId: fmt.Sprintf("%s %s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser", conditionTitle), + }, + }, + }) +} + +func TestAccServiceAccountIamMember(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + email := envvar.ServiceAccountCanonicalEmail(account) + identity := fmt.Sprintf("serviceAccount:%s", email) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamMember_basic(account, email), + Check: testAccCheckGoogleServiceAccountIam(t, account, 1), + }, + { + ResourceName: "google_service_account_iam_member.foo", + ImportStateId: fmt.Sprintf("%s %s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser", identity), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccServiceAccountIamMember_basic(account, strings.ToUpper(email)), + PlanOnly: true, + }, + { + Config: testAccServiceAccountIamMember_basic(account, strings.Title(email)), + PlanOnly: true, + }, + }, + }) +} + +func TestAccServiceAccountIamMember_withCondition(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + identity := fmt.Sprintf("serviceAccount:%s", envvar.ServiceAccountCanonicalEmail(account)) + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamMember_withCondition(account, conditionTitle), + Check: testAccCheckGoogleServiceAccountIam(t, account, 1), + }, + { + ResourceName: "google_service_account_iam_member.foo", + ImportStateId: fmt.Sprintf("%s %s %s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser", identity, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccServiceAccountIamMember_withAndWithoutCondition(t *testing.T) { + // Resource creation race condition + acctest.SkipIfVcr(t) + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + identity := fmt.Sprintf("serviceAccount:%s", envvar.ServiceAccountCanonicalEmail(account)) + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamMember_withAndWithoutCondition(account, conditionTitle), + Check: testAccCheckGoogleServiceAccountIam(t, account, 2), + }, + { + ResourceName: "google_service_account_iam_member.foo", + ImportStateId: fmt.Sprintf("%s %s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser", identity), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_account_iam_member.foo2", + ImportStateId: fmt.Sprintf("%s %s %s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser", identity, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccServiceAccountIamPolicy(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamPolicy_basic(account), + Check: resource.TestCheckResourceAttrSet("data.google_service_account_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_service_account_iam_policy.foo", + ImportStateId: serviceAccountCanonicalId(account), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccServiceAccountIamPolicy_withCondition(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamPolicy_withCondition(account), + }, + { + ResourceName: "google_service_account_iam_policy.foo", + ImportStateId: serviceAccountCanonicalId(account), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +// google_iam_workload_identity_pool is beta only +func TestAccServiceAccountIamMember_federatedIdentity(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + pool := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamMember_federatedIdentity(account, pool), + }, + { + ResourceName: "google_service_account_iam_member.impersonate", + ImportStateIdFunc: testAccServiceAccountIamMember_generateFederatedIdentityStateId, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccServiceAccountIamBinding_federatedIdentity(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + pool := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamBinding_federatedIdentity(account, pool), + Check: testAccCheckGoogleServiceAccountIam(t, account, 1), + }, + { + ResourceName: "google_service_account_iam_binding.foo", + ImportState: true, + ImportStateVerify: true, + ImportStateId: fmt.Sprintf("%s %s", serviceAccountCanonicalId(account), "roles/iam.serviceAccountUser"), + }, + }, + }) +} + +func TestAccServiceAccountIamPolicy_federatedIdentity(t *testing.T) { + t.Parallel() + + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + pool := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceAccountIamPolicy_federatedIdentity(account, pool), + }, + { + ResourceName: "google_service_account_iam_policy.foo", + ImportStateId: serviceAccountCanonicalId(account), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccServiceAccountIamMember_generateFederatedIdentityStateId(state *terraform.State) (string, error) { + resourceName := "google_service_account_iam_member.impersonate" + var rawState map[string]string + for _, m := range state.Modules { + if len(m.Resources) > 0 { + if v, ok := m.Resources[resourceName]; ok { + rawState = v.Primary.Attributes + } + } + } + return fmt.Sprintf("%s %s %s", rawState["service_account_id"], rawState["role"], rawState["member"]), nil +} +{{- end }} + +// Ensure that our tests only create the expected number of bindings. +// The content of the binding is tested in the import tests. +func testAccCheckGoogleServiceAccountIam(t *testing.T, account string, numBindings int) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + p, err := config.NewIamClient(config.UserAgent).Projects.ServiceAccounts.GetIamPolicy(serviceAccountCanonicalId(account)).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + if err != nil { + return err + } + + if len(p.Bindings) != numBindings { + return fmt.Errorf("Expected exactly %d binding(s) for account %q, was %d", numBindings, account, len(p.Bindings)) + } + + return nil + } +} + +func serviceAccountCanonicalId(account string) string { + return fmt.Sprintf("projects/%s/serviceAccounts/%s@%s.iam.gserviceaccount.com", envvar.GetTestProjectFromEnv(), account, envvar.GetTestProjectFromEnv()) +} + +func testAccServiceAccountIamBinding_basic(account string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_binding" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + members = ["user:admin@hashicorptest.com"] +} +`, account) +} + +func testAccServiceAccountIamBinding_withCondition(account, member, conditionTitle, conditionExpr string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_binding" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + members = ["%s"] + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "%s" + } +} +`, account, member, conditionTitle, conditionExpr) +} + +func testAccServiceAccountIamBinding_withAndWithoutCondition(account, member, conditionTitle, conditionExpr string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_binding" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + members = ["%s"] +} + +resource "google_service_account_iam_binding" "foo2" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + members = ["%s"] + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "%s" + } +} +`, account, member, member, conditionTitle, conditionExpr) +} + +func testAccServiceAccountIamMember_basic(account, email string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_member" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:%s" + depends_on = [google_service_account.test_account] +} +`, account, email) +} + +func testAccServiceAccountIamMember_withCondition(account, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_member" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:${google_service_account.test_account.email}" + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, account, conditionTitle) +} + +func testAccServiceAccountIamMember_withAndWithoutCondition(account, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_member" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:${google_service_account.test_account.email}" +} + +resource "google_service_account_iam_member" "foo2" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + member = "serviceAccount:${google_service_account.test_account.email}" + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, account, conditionTitle) +} + +func testAccServiceAccountIamPolicy_basic(account string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +data "google_iam_policy" "foo" { + binding { + role = "roles/iam.serviceAccountUser" + + members = ["serviceAccount:${google_service_account.test_account.email}"] + } +} + +resource "google_service_account_iam_policy" "foo" { + service_account_id = google_service_account.test_account.name + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_service_account_iam_policy" "foo" { + service_account_id = google_service_account.test_account.name +} +`, account) +} + +func testAccServiceAccountIamPolicy_withCondition(account string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +data "google_iam_policy" "foo" { + binding { + role = "roles/iam.serviceAccountUser" + + members = ["serviceAccount:${google_service_account.test_account.email}"] + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } + } +} + +resource "google_service_account_iam_policy" "foo" { + service_account_id = google_service_account.test_account.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, account) +} + +{{ if ne $.TargetVersionName `ga` -}} +// google_iam_workload_identity_pool is beta only +func testAccServiceAccountIamMember_federatedIdentity(account, poolId string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_member" "impersonate" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.workloadIdentityUser" + member = "principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.my_pool.name}/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole" +} + +resource "google_iam_workload_identity_pool" "my_pool" { + workload_identity_pool_id = "%s" +} +`, account, poolId) +} + +func testAccServiceAccountIamBinding_federatedIdentity(account, poolId string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +resource "google_service_account_iam_binding" "foo" { + service_account_id = google_service_account.test_account.name + role = "roles/iam.serviceAccountUser" + members = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.my_pool.name}/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole"] +} + +resource "google_iam_workload_identity_pool" "my_pool" { + workload_identity_pool_id = "%s" +} +`, account, poolId) +} + +func testAccServiceAccountIamPolicy_federatedIdentity(account, poolId string) string { + return fmt.Sprintf(` +resource "google_service_account" "test_account" { + account_id = "%s" + display_name = "Service Account Iam Testing Account" +} + +data "google_iam_policy" "foo" { + binding { + role = "roles/iam.serviceAccountUser" + + members = ["principalSet://iam.googleapis.com/${google_iam_workload_identity_pool.my_pool.name}/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole"] + } +} + +resource "google_service_account_iam_policy" "foo" { + service_account_id = google_service_account.test_account.name + policy_data = data.google_iam_policy.foo.policy_data +} + +resource "google_iam_workload_identity_pool" "my_pool" { + workload_identity_pool_id = "%s" +} +`, account, poolId) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl new file mode 100644 index 000000000000..e2dfd8624d81 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl @@ -0,0 +1,129 @@ +package resourcemanager + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform-provider-google/google/services/serviceusage" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceProjectServiceIdentity() *schema.Resource { + return &schema.Resource{ + Create: resourceProjectServiceIdentityCreate, + Read: resourceProjectServiceIdentityRead, + Delete: resourceProjectServiceIdentityDelete, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Read: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "service": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "email": { + Type: schema.TypeString, + Computed: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceProjectServiceIdentityCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}ServiceUsageBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/services/{{"{{"}}service{{"}}"}}:generateServiceIdentity") + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + billingProject := project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating Service Identity: %s", err) + } + + var opRes map[string]interface{} + err = serviceusage.ServiceUsageOperationWaitTimeWithResponse( + config, res, &opRes, billingProject, "Creating Service Identity", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished creating Service Identity %q: %#v", d.Id(), res) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/services/{{"{{"}}service{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // This API may not return the service identity's details, even if the relevant + // Google API is configured for service identities. + if emailVal, ok := opRes["email"]; ok { + email, ok := emailVal.(string) + if !ok { + return fmt.Errorf("unexpected type for email: got %T, want string", email) + } + if err := d.Set("email", email); err != nil { + return fmt.Errorf("Error setting email: %s", err) + } + } + return nil +} + +// There is no read endpoint for this API. +func resourceProjectServiceIdentityRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +// There is no delete endpoint for this API. +func resourceProjectServiceIdentityDelete(d *schema.ResourceData, meta interface{}) error { + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl new file mode 100644 index 000000000000..c759bc47aee2 --- /dev/null +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl @@ -0,0 +1,57 @@ +package resourcemanager_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "strings" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccProjectServiceIdentity_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleProjectServiceIdentity_basic(), + Check: resource.ComposeTestCheckFunc( + // Email field for healthcare service account should be non-empty and contain at least an "@". + resource.TestCheckResourceAttrWith("google_project_service_identity.hc_sa", "email", func(value string) error { + if strings.Contains(value, "@") { + return nil + } + return fmt.Errorf("hc_sa service identity email value was %s, expected a valid email", value) + }), + // Email field for logging service identity will be empty for as long as + // `gcloud beta services identity create --service=logging.googleapis.com` doesn't return an email address + resource.TestCheckNoResourceAttr("google_project_service_identity.log_sa", "email"), + ), + }, + }, + }) +} + +func testGoogleProjectServiceIdentity_basic() string { + return ` +data "google_project" "project" {} + +# Service which has an email returned from service identity API +resource "google_project_service_identity" "hc_sa" { + project = data.google_project.project.project_id + service = "healthcare.googleapis.com" +} + +# Service which does NOT have email returned from service identity API +# Email attribute will be null - correct as of 2022-12-13 +resource "google_project_service_identity" "log_sa" { + project = data.google_project.project.project_id + service = "logging.googleapis.com" +} +` +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl b/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl new file mode 100644 index 000000000000..1698330cc960 --- /dev/null +++ b/mmv1/third_party/terraform/services/sql/go/resource_sql_database_instance.go.tmpl @@ -0,0 +1,2549 @@ +package sql + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/go-cty/cty" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "github.com/hashicorp/terraform-provider-google/google/services/compute" + "github.com/hashicorp/terraform-provider-google/google/services/servicenetworking" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "google.golang.org/api/googleapi" + sqladmin "google.golang.org/api/sqladmin/v1beta4" +) + +// Match fully-qualified or relative URLs +const privateNetworkLinkRegex = "^(?:http(?:s)?://.+/)?projects/(" + verify.ProjectRegex + ")/global/networks/((?:[a-z](?:[-a-z0-9]*[a-z0-9])?))$" + +var sqlDatabaseAuthorizedNetWorkSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "expiration_time": { + Type: schema.TypeString, + Optional: true, + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, +} + +var sqlDatabaseFlagSchemaElem *schema.Resource = &schema.Resource{ + Schema: map[string]*schema.Schema{ + "value": { + Type: schema.TypeString, + Required: true, + Description: `Value of the flag.`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the flag.`, + }, + }, +} + +var ( + backupConfigurationKeys = []string{ + "settings.0.backup_configuration.0.binary_log_enabled", + "settings.0.backup_configuration.0.enabled", + "settings.0.backup_configuration.0.start_time", + "settings.0.backup_configuration.0.location", + "settings.0.backup_configuration.0.point_in_time_recovery_enabled", + "settings.0.backup_configuration.0.backup_retention_settings", + "settings.0.backup_configuration.0.transaction_log_retention_days", + } + + ipConfigurationKeys = []string{ + "settings.0.ip_configuration.0.authorized_networks", + "settings.0.ip_configuration.0.ipv4_enabled", + "settings.0.ip_configuration.0.require_ssl", + "settings.0.ip_configuration.0.private_network", + "settings.0.ip_configuration.0.allocated_ip_range", + "settings.0.ip_configuration.0.enable_private_path_for_google_cloud_services", + "settings.0.ip_configuration.0.psc_config", + "settings.0.ip_configuration.0.ssl_mode", + } + + maintenanceWindowKeys = []string{ + "settings.0.maintenance_window.0.day", + "settings.0.maintenance_window.0.hour", + "settings.0.maintenance_window.0.update_track", + } + + replicaConfigurationKeys = []string{ + "replica_configuration.0.ca_certificate", + "replica_configuration.0.client_certificate", + "replica_configuration.0.client_key", + "replica_configuration.0.connect_retry_interval", + "replica_configuration.0.dump_file_path", + "replica_configuration.0.failover_target", + "replica_configuration.0.master_heartbeat_period", + "replica_configuration.0.password", + "replica_configuration.0.ssl_cipher", + "replica_configuration.0.username", + "replica_configuration.0.verify_server_certificate", + } + + insightsConfigKeys = []string{ + "settings.0.insights_config.0.query_insights_enabled", + "settings.0.insights_config.0.query_string_length", + "settings.0.insights_config.0.record_application_tags", + "settings.0.insights_config.0.record_client_address", + "settings.0.insights_config.0.query_plans_per_minute", + } + + sqlServerAuditConfigurationKeys = []string{ + "settings.0.sql_server_audit_config.0.bucket", + "settings.0.sql_server_audit_config.0.retention_interval", + "settings.0.sql_server_audit_config.0.upload_interval", + } +) + +func ResourceSqlDatabaseInstance() *schema.Resource { + return &schema.Resource{ + Create: resourceSqlDatabaseInstanceCreate, + Read: resourceSqlDatabaseInstanceRead, + Update: resourceSqlDatabaseInstanceUpdate, + Delete: resourceSqlDatabaseInstanceDelete, + Importer: &schema.ResourceImporter{ + State: resourceSqlDatabaseInstanceImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(90 * time.Minute), + Update: schema.DefaultTimeout(90 * time.Minute), + Delete: schema.DefaultTimeout(90 * time.Minute), + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + customdiff.ForceNewIfChange("settings.0.disk_size", compute.IsDiskShrinkage), + customdiff.ForceNewIfChange("master_instance_name", isMasterInstanceNameSet), + customdiff.IfValueChange("instance_type", isReplicaPromoteRequested, checkPromoteConfigurationsAndUpdateDiff), + privateNetworkCustomizeDiff, + pitrSupportDbCustomizeDiff, + ), + + Schema: map[string]*schema.Schema{ + "region": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The region the instance will sit in. Note, Cloud SQL is not available in all regions. A valid region must be provided to use this resource. If a region is not provided in the resource definition, the provider region will be used instead, but this will be an apply-time error for instances if the provider region is not supported with Cloud SQL. If you choose not to provide the region argument for this resource, make sure you understand this.`, + }, + "deletion_protection": { + Type: schema.TypeBool, + Default: true, + Optional: true, + Description: `Used to block Terraform from deleting a SQL Instance. Defaults to true.`, + }, + "settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + AtLeastOneOf: []string{"settings", "clone"}, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "version": { + Type: schema.TypeInt, + Computed: true, + Description: `Used to make sure changes to the settings block are atomic.`, + }, + "tier": { + Type: schema.TypeString, + Required: true, + Description: `The machine type to use. See tiers for more details and supported versions. Postgres supports only shared-core machine types, and custom machine types such as db-custom-2-13312. See the Custom Machine Type Documentation to learn about specifying custom machine types.`, + }, + "edition": { + Type: schema.TypeString, + Optional: true, + Default: "ENTERPRISE", + ValidateFunc: validation.StringInSlice([]string{"ENTERPRISE", "ENTERPRISE_PLUS"}, false), + Description: `The edition of the instance, can be ENTERPRISE or ENTERPRISE_PLUS.`, + }, + "advanced_machine_features": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "threads_per_core": { + Type: schema.TypeInt, + Optional: true, + Description: `The number of threads per physical core. Can be 1 or 2.`, + }, + }, + }, + }, + "data_cache_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: `Data cache configurations.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_cache_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `Whether data cache is enabled for the instance.`, + }, + }, + }, + }, + "activation_policy": { + Type: schema.TypeString, + Optional: true, + Default: "ALWAYS", + Description: `This specifies when the instance should be active. Can be either ALWAYS, NEVER or ON_DEMAND.`, + }, + "active_directory_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "domain": { + Type: schema.TypeString, + Required: true, + Description: `Domain name of the Active Directory for SQL Server (e.g., mydomain.com).`, + }, + }, + }, + }, + "deny_maintenance_period": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "end_date": { + Type: schema.TypeString, + Required: true, + Description: `End date before which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01`, + }, + "start_date": { + Type: schema.TypeString, + Required: true, + Description: `Start date after which maintenance will not take place. The date is in format yyyy-mm-dd i.e., 2020-11-01, or mm-dd, i.e., 11-01`, + }, + "time": { + Type: schema.TypeString, + Required: true, + Description: `Time in UTC when the "deny maintenance period" starts on start_date and ends on end_date. The time is in format: HH:mm:SS, i.e., 00:00:00`, + }, + + }, + }, + }, + "sql_server_audit_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `The name of the destination bucket (e.g., gs://mybucket).`, + }, + "retention_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `How long to keep generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s"..`, + }, + "upload_interval": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: sqlServerAuditConfigurationKeys, + Description: `How often to upload generated audit files. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + }, + }, + }, + "time_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The time_zone to be used by the database engine (supported only for SQL Server), in SQL Server timezone format.`, + }, + "availability_type": { + Type: schema.TypeString, + Optional: true, + Default: "ZONAL", + ValidateFunc: validation.StringInSlice([]string{"REGIONAL", "ZONAL"}, false), + Description: `The availability type of the Cloud SQL instance, high availability +(REGIONAL) or single zone (ZONAL). For all instances, ensure that +settings.backup_configuration.enabled is set to true. +For MySQL instances, ensure that settings.backup_configuration.binary_log_enabled is set to true. +For Postgres instances, ensure that settings.backup_configuration.point_in_time_recovery_enabled +is set to true. Defaults to ZONAL.`, + }, + "backup_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "binary_log_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if binary logging is enabled. If settings.backup_configuration.enabled is false, this must be as well. Can only be used with MySQL.`, + }, + "enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if backup configuration is enabled.`, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + // start_time is randomly assigned if not set + Computed: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `HH:MM format time indicating when backup configuration starts.`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `Location of the backup configuration.`, + }, + "point_in_time_recovery_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `True if Point-in-time recovery is enabled.`, + }, + "transaction_log_retention_days": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Description: `The number of days of transaction logs we retain for point in time restore, from 1-7. (For PostgreSQL Enterprise Plus instances, from 1 to 35.)`, + }, + "backup_retention_settings": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: backupConfigurationKeys, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retained_backups": { + Type: schema.TypeInt, + Required: true, + Description: `Number of backups to retain.`, + }, + "retention_unit": { + Type: schema.TypeString, + Optional: true, + Default: "COUNT", + Description: `The unit that 'retainedBackups' represents. Defaults to COUNT`, + }, + }, + }, + }, + }, + }, + }, + "collation": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The name of server instance collation.`, + }, + "database_flags": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseFlagSchemaElem), + Elem: sqlDatabaseFlagSchemaElem, + }, + "disk_autoresize": { + Type: schema.TypeBool, + Optional: true, + Default: true, + Description: `Enables auto-resizing of the storage size. Defaults to true.`, + }, + "disk_autoresize_limit": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + Description: `The maximum size, in GB, to which storage capacity can be automatically increased. The default value is 0, which specifies that there is no limit.`, + }, + "enable_google_ml_integration": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Vertex AI Integration.`, + }, + "disk_size": { + Type: schema.TypeInt, + Optional: true, + // Default is likely 10gb, but it is undocumented and may change. + Computed: true, + Description: `The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased. The minimum value is 10GB.`, + }, + "disk_type": { + Type: schema.TypeString, + Optional: true, + Default: "PD_SSD", + ForceNew: true, + DiffSuppressFunc: caseDiffDashSuppress, + Description: `The type of data disk: PD_SSD or PD_HDD. Defaults to PD_SSD.`, + }, + "ip_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "authorized_networks": { + Type: schema.TypeSet, + Optional: true, + Set: schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), + Elem: sqlDatabaseAuthorizedNetWorkSchemaElem, + AtLeastOneOf: ipConfigurationKeys, + }, + "ipv4_enabled": { + Type: schema.TypeBool, + Optional: true, + Default: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether this Cloud SQL instance should be assigned a public IPV4 address. At least ipv4_enabled must be enabled or a private_network must be configured.`, + }, + "require_ssl": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether SSL connections over IP are enforced or not. To change this field, also set the corresponding value in ssl_mode if it has been set too.`, + Deprecated: "`require_ssl` will be fully deprecated in a future major release. For now, please use `ssl_mode` with a compatible `require_ssl` value instead.", + }, + "private_network": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: verify.OrEmpty(verify.ValidateRegexp(privateNetworkLinkRegex)), + DiffSuppressFunc: tpgresource.CompareSelfLinkRelativePaths, + AtLeastOneOf: ipConfigurationKeys, + Description: `The VPC network from which the Cloud SQL instance is accessible for private IP. For example, projects/myProject/global/networks/default. Specifying a network enables private IP. At least ipv4_enabled must be enabled or a private_network must be configured. This setting can be updated, but it cannot be removed after it is set.`, + }, + "allocated_ip_range": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the instance ip will be created in the allocated range. The range name must comply with RFC 1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.`, + }, + "enable_private_path_for_google_cloud_services": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: ipConfigurationKeys, + Description: `Whether Google Cloud services such as BigQuery are allowed to access data in this Cloud SQL instance over a private IP connection. SQLSERVER database type is not supported.`, + }, + "psc_config": { + Type: schema.TypeSet, + Optional: true, + Description: `PSC settings for a Cloud SQL instance.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "psc_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether PSC connectivity is enabled for this instance.`, + }, + "allowed_consumer_projects": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Set: schema.HashString, + Description: `List of consumer projects that are allow-listed for PSC connections to this instance. This instance can be connected to with PSC from any network in these projects. Each consumer project in this list may be represented by a project number (numeric) or by a project id (alphanumeric).`, + }, + }, + }, + }, + "ssl_mode": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"ALLOW_UNENCRYPTED_AND_ENCRYPTED", "ENCRYPTED_ONLY", "TRUSTED_CLIENT_CERTIFICATE_REQUIRED"}, false), + Description: `Specify how SSL connection should be enforced in DB connections. This field provides more SSL enforcment options compared to require_ssl. To change this field, also set the correspoding value in require_ssl until next major release.`, + AtLeastOneOf: ipConfigurationKeys, + }, + }, + }, + }, + "location_preference": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "follow_gae_application": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, + Description: `A Google App Engine application whose zone to remain in. Must be in the same region as this instance.`, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"settings.0.location_preference.0.follow_gae_application", "settings.0.location_preference.0.zone"}, + Description: `The preferred compute engine zone.`, + }, + "secondary_zone": { + Type: schema.TypeString, + Optional: true, + Description: `The preferred Compute Engine zone for the secondary/failover`, + }, + }, + }, + }, + "maintenance_window": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "day": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(1, 7), + AtLeastOneOf: maintenanceWindowKeys, + Description: `Day of week (1-7), starting on Monday`, + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 23), + AtLeastOneOf: maintenanceWindowKeys, + Description: `Hour of day (0-23), ignored if day not set`, + }, + "update_track": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: maintenanceWindowKeys, + Description: `Receive updates after one week (canary) or after two weeks (stable) or after five weeks (week5) of notification.`, + }, + }, + }, + Description: `Declares a one-hour maintenance window when an Instance can automatically restart to apply updates. The maintenance window is specified in UTC time.`, + }, + "pricing_plan": { + Type: schema.TypeString, + Optional: true, + Default: "PER_USE", + Description: `Pricing plan for this instance, can only be PER_USE.`, + }, + "user_labels": { + Type: schema.TypeMap, + Optional: true, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value user label pairs to assign to the instance.`, + }, + "insights_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "query_insights_enabled": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights feature is enabled.`, + }, + "query_string_length": { + Type: schema.TypeInt, + Optional: true, + Default: 1024, + ValidateFunc: validation.IntBetween(256, 4500), + AtLeastOneOf: insightsConfigKeys, + Description: `Maximum query length stored in bytes. Between 256 and 4500. Default to 1024.`, + }, + "record_application_tags": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights will record application tags from query when enabled.`, + }, + "record_client_address": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: insightsConfigKeys, + Description: `True if Query Insights will record client address when enabled.`, + }, + "query_plans_per_minute": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 20), + AtLeastOneOf: insightsConfigKeys, + Description: `Number of query execution plans captured by Insights per minute for all queries combined. Between 0 and 20. Default to 5.`, + }, + }, + }, + Description: `Configuration of Query Insights.`, + }, + "password_validation_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_length": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Minimum number of characters allowed.`, + }, + "complexity": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"COMPLEXITY_DEFAULT", "COMPLEXITY_UNSPECIFIED"}, false), + Description: `Password complexity.`, + }, + "reuse_interval": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + Description: `Number of previous passwords that cannot be reused.`, + }, + "disallow_username_substring": { + Type: schema.TypeBool, + Optional: true, + Description: `Disallow username as a part of the password.`, + }, + "password_change_interval": { + Type: schema.TypeString, + Optional: true, + Description: `Minimum interval after which the password can be changed. This flag is only supported for PostgresSQL.`, + }, + "enable_password_policy": { + Type: schema.TypeBool, + Required: true, + Description: `Whether the password policy is enabled or not.`, + }, + }, + }, + }, + "connector_enforcement": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice([]string{"NOT_REQUIRED", "REQUIRED"}, false), + Description: `Specifies if connections must use Cloud SQL connectors.`, + }, + "deletion_protection_enabled": { + Type: schema.TypeBool, + Optional: true, + Description: `Configuration to protect against accidental instance deletion.`, + }, + }, + }, + Description: `The settings to use for the database. The configuration is detailed below.`, + }, + + "connection_name": { + Type: schema.TypeString, + Computed: true, + Description: `The connection name of the instance to be used in connection strings. For example, when connecting with Cloud SQL Proxy.`, + }, + "maintenance_version": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Maintenance version.`, + DiffSuppressFunc: maintenanceVersionDiffSuppress, + }, + "available_maintenance_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `Available Maintenance versions.`, + }, + "database_version": { + Type: schema.TypeString, + Required: true, + Description: `The MySQL, PostgreSQL or SQL Server (beta) version to use. Supported values include MYSQL_5_6, MYSQL_5_7, MYSQL_8_0, POSTGRES_9_6, POSTGRES_10, POSTGRES_11, POSTGRES_12, POSTGRES_13, POSTGRES_14, POSTGRES_15, SQLSERVER_2017_STANDARD, SQLSERVER_2017_ENTERPRISE, SQLSERVER_2017_EXPRESS, SQLSERVER_2017_WEB. Database Version Policies includes an up-to-date reference of supported versions.`, + }, + + "encryption_key_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "root_password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: `Initial root password. Required for MS SQL Server.`, + }, + "ip_address": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ip_address": { + Type: schema.TypeString, + Computed: true, + }, + "type": { + Type: schema.TypeString, + Computed: true, + }, + "time_to_retire": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + + "first_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `The first IPv4 address of any type assigned. This is to support accessing the first address in the list in a terraform output when the resource is configured with a count.`, + }, + + "public_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, + }, + + "private_ip_address": { + Type: schema.TypeString, + Computed: true, + Description: `IPv4 address assigned. This is a workaround for an issue fixed in Terraform 0.12 but also provides a convenient way to access an IP of a specific type without performing filtering in a Terraform config.`, + }, + + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The name of the instance. If the name is left blank, Terraform will randomly generate one when the instance is first created. This is done because after a name is used, it cannot be reused for up to one week.`, + }, + + "master_instance_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The name of the instance that will act as the master in the replication setup. Note, this requires the master to have binary_log_enabled set, as well as existing backups.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "instance_type": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `The type of the instance. The valid values are:- 'SQL_INSTANCE_TYPE_UNSPECIFIED', 'CLOUD_SQL_INSTANCE', 'ON_PREMISES_INSTANCE' and 'READ_REPLICA_INSTANCE'.`, + }, + + "replica_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + // Returned from API on all replicas + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "ca_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the trusted CA's x509 certificate.`, + }, + "client_certificate": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the replica's x509 certificate.`, + }, + "client_key": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `PEM representation of the replica's private key. The corresponding public key in encoded in the client_certificate.`, + }, + "connect_retry_interval": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `The number of seconds between connect retries. MySQL's default is 60 seconds.`, + }, + "dump_file_path": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Path to a SQL file in Google Cloud Storage from which replica instances are created. Format is gs://bucket/filename.`, + }, + "failover_target": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Specifies if the replica is the failover target. If the field is set to true the replica will be designated as a failover replica. If the master instance fails, the replica instance will be promoted as the new master instance. Not supported for Postgres`, + }, + "master_heartbeat_period": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Time in ms between replication heartbeats.`, + }, + "password": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Sensitive: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Password for the replication connection.`, + }, + "ssl_cipher": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Permissible ciphers for use in SSL encryption.`, + }, + "username": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `Username for replication connection.`, + }, + "verify_server_certificate": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + AtLeastOneOf: replicaConfigurationKeys, + Description: `True if the master's common name value is checked during the SSL handshake.`, + }, + }, + }, + Description: `The configuration for replication.`, + }, + "server_ca_cert": { + Type: schema.TypeList, + Computed: true, + Sensitive: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cert": { + Type: schema.TypeString, + Computed: true, + Description: `The CA Certificate used to connect to the SQL Instance via SSL.`, + }, + "common_name": { + Type: schema.TypeString, + Computed: true, + Description: `The CN valid for the CA Cert.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation time of the CA Cert.`, + }, + "expiration_time": { + Type: schema.TypeString, + Computed: true, + Description: `Expiration time of the CA Cert.`, + }, + "sha1_fingerprint": { + Type: schema.TypeString, + Computed: true, + Description: `SHA Fingerprint of the CA Cert.`, + }, + }, + }, + }, + "service_account_email_address": { + Type: schema.TypeString, + Computed: true, + Description: `The service account email address assigned to the instance.`, + }, + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + "psc_service_attachment_link": { + Type: schema.TypeString, + Computed: true, + Description: `The link to service attachment of PSC instance.`, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + Description: `The dns name of the instance.`, + }, + "restore_backup_context": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "backup_run_id": { + Type: schema.TypeInt, + Required: true, + Description: `The ID of the backup run to restore from.`, + }, + "instance_id": { + Type: schema.TypeString, + Optional: true, + Description: `The ID of the instance that the backup was taken from.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Description: `The full project ID of the source instance.`, + }, + }, + }, + }, + "clone": { + Type: schema.TypeList, + Optional: true, + Computed: false, + AtLeastOneOf: []string{"settings", "clone"}, + Description: `Configuration for creating a new instance as a clone of another instance.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_instance_name": { + Type: schema.TypeString, + Required: true, + Description: `The name of the instance from which the point in time should be restored.`, + }, + "point_in_time": { + Type: schema.TypeString, + Optional: true, + DiffSuppressFunc: tpgresource.TimestampDiffSuppress(time.RFC3339Nano), + Description: `The timestamp of the point in time that should be restored.`, + }, + "preferred_zone": { + Type: schema.TypeString, + Optional: true, + Description: `(Point-in-time recovery for PostgreSQL only) Clone to an instance in the specified zone. If no zone is specified, clone to the same zone as the source instance.`, + }, + "database_names": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `(SQL Server only, use with point_in_time) clone only the specified databases from the source instance. Clone all databases if empty.`, + }, + "allocated_ip_range": { + Type: schema.TypeString, + Optional: true, + Description: `The name of the allocated ip range for the private ip CloudSQL instance. For example: "google-managed-services-default". If set, the cloned instance ip will be created in the allocated range. The range name must comply with [RFC 1035](https://tools.ietf.org/html/rfc1035). Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])?.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +// Makes private_network ForceNew if it is changing from set to nil. The API returns an error +// if this change is attempted in-place. +func privateNetworkCustomizeDiff(_ context.Context, d *schema.ResourceDiff, meta interface{}) error { + old, new := d.GetChange("settings.0.ip_configuration.0.private_network") + + if old != "" && new == "" { + if err := d.ForceNew("settings.0.ip_configuration.0.private_network"); err != nil { + return err + } + } + + return nil +} + +// helper function to see if string within list contains a particular substring +func stringContainsSlice(arr []string, str string) bool { + for _, i := range arr { + if strings.Contains(str, i) { + return true + } + } + return false +} + +// Point in time recovery for MySQL database instances needs binary_log_enabled set to true and +// not point_in_time_recovery_enabled, which is confusing to users. This checks for +// point_in_time_recovery_enabled being set to a non-PostgreSQL and non-SQLServer database instances and suggests +// binary_log_enabled. +func pitrSupportDbCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { + pitr := diff.Get("settings.0.backup_configuration.0.point_in_time_recovery_enabled").(bool) + dbVersion := diff.Get("database_version").(string) + dbVersionPitrValid := []string{"POSTGRES", "SQLSERVER"} + if pitr && !stringContainsSlice(dbVersionPitrValid, dbVersion) { + return fmt.Errorf("point_in_time_recovery_enabled is only available for the following %v. You may want to consider using binary_log_enabled instead and remove point_in_time_recovery_enabled (removing point_in_time_recovery_enabled and adding binary_log_enabled will enable pitr for MYSQL)", dbVersionPitrValid) + } + return nil +} + +func resourceSqlDatabaseInstanceCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + region, err := tpgresource.GetRegion(d, config) + if err != nil { + return err + } + + var name string + if v, ok := d.GetOk("name"); ok { + name = v.(string) + } else { + name = id.UniqueId() + } + + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + // SQL Instances that fail to create are expensive- see https://github.com/hashicorp/terraform-provider-google/issues/7154 + // We can fail fast to stop instance names from getting reserved. + network := d.Get("settings.0.ip_configuration.0.private_network").(string) + if network != "" { + err = sqlDatabaseInstanceServiceNetworkPrecheck(d, config, userAgent, network) + if err != nil { + return err + } + } + + databaseVersion := d.Get("database_version").(string) + + instance := &sqladmin.DatabaseInstance{ + Name: name, + Region: region, + DatabaseVersion: databaseVersion, + MasterInstanceName: d.Get("master_instance_name").(string), + ReplicaConfiguration: expandReplicaConfiguration(d.Get("replica_configuration").([]interface{})), + } + + cloneContext, cloneSource := expandCloneContext(d.Get("clone").([]interface{})) + + s, ok := d.GetOk("settings") + desiredSettings := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) + if ok { + instance.Settings = desiredSettings + } + + if _, ok := d.GetOk("maintenance_version"); ok { + instance.MaintenanceVersion = d.Get("maintenance_version").(string) + } + + if _, ok := d.GetOk("instance_type"); ok { + instance.InstanceType = d.Get("instance_type").(string) + } + + instance.RootPassword = d.Get("root_password").(string) + + // Modifying a replica during Create can cause problems if the master is + // modified at the same time. Lock the master until we're done in order + // to prevent that. + if !sqlDatabaseIsMaster(d) { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance.MasterInstanceName)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance.MasterInstanceName)) + } + + if k, ok := d.GetOk("encryption_key_name"); ok { + instance.DiskEncryptionConfiguration = &sqladmin.DiskEncryptionConfiguration{ + KmsKeyName: k.(string), + } + } + + var patchData *sqladmin.DatabaseInstance + + // BinaryLogging can be enabled on replica instances but only after creation. + if instance.MasterInstanceName != "" && instance.Settings != nil && instance.Settings.BackupConfiguration != nil && instance.Settings.BackupConfiguration.BinaryLogEnabled { + settingsCopy := expandSqlDatabaseInstanceSettings(s.([]interface{}), databaseVersion) + bc := settingsCopy.BackupConfiguration + patchData = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{BackupConfiguration: bc}} + + instance.Settings.BackupConfiguration.BinaryLogEnabled = false + } + + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + if cloneContext != nil { + cloneContext.DestinationInstanceName = name + clodeReq := sqladmin.InstancesCloneRequest{CloneContext: cloneContext} + op, operr = config.NewSqlAdminClient(userAgent).Instances.Clone(project, cloneSource, &clodeReq).Do() + } else { + op, operr = config.NewSqlAdminClient(userAgent).Instances.Insert(project, instance).Do() + } + return operr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to create instance %s: %s", instance.Name, err) + } + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = SqlAdminOperationWaitTime(config, op, project, "Create Instance", userAgent, d.Timeout(schema.TimeoutCreate)) + if err != nil { + d.SetId("") + return err + } + + // If a default root user was created with a wildcard ('%') hostname, delete it. Note it + // appears to only be created for certain types of databases, like MySQL. + // Users in a replica instance are inherited from the master instance and should be left alone. + // This deletion is done immediately after the instance is created, in order to minimize the + // risk of it being left on the instance, which would present a security concern. + if sqlDatabaseIsMaster(d) { + var users *sqladmin.UsersListResponse + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + users, err = config.NewSqlAdminClient(userAgent).Users.List(project, instance.Name).Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, attempting to list users associated with instance %s: %s", instance.Name, err) + } + for _, u := range users.Items { + if u.Name == "root" && u.Host == "%" { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Delete(project, instance.Name).Host(u.Host).Name(u.Name).Do() + if err == nil { + err = SqlAdminOperationWaitTime(config, op, project, "Delete default root User", userAgent, d.Timeout(schema.TimeoutCreate)) + } + return err + }, + }) + if err != nil { + return fmt.Errorf("Error, failed to delete default 'root'@'*' u, but the database was created successfully: %s", err) + } + } + } + } + + // patch any fields that need to be sent postcreation + if patchData != nil { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, instance.Name, patchData).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + } + + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + + // Refresh settings from read as they may have defaulted from the API + s = d.Get("settings") + // If we've created an instance as a clone, we need to update it to set any user defined settings + if len(s.([]interface{})) != 0 && cloneContext != nil && desiredSettings != nil { + instanceUpdate := &sqladmin.DatabaseInstance{ + Settings: desiredSettings, + } + _settings := s.([]interface{})[0].(map[string]interface{}) + instanceUpdate.Settings.SettingsVersion = int64(_settings["version"].(int)) + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, name, instanceUpdate).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + // Refresh the state of the instance after updating the settings + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Perform a backup restore if the backup context exists + if r, ok := d.GetOk("restore_backup_context"); ok { + err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, name, r) + if err != nil { + return err + } + } + + return nil +} + +// Available fields for settings vary between database versions. +func expandSqlDatabaseInstanceSettings(configured []interface{}, databaseVersion string) *sqladmin.Settings { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _settings := configured[0].(map[string]interface{}) + settings := &sqladmin.Settings{ + // Version is unset in Create but is set during update + SettingsVersion: int64(_settings["version"].(int)), + DataCacheConfig: expandDataCacheConfig(_settings["data_cache_config"].([]interface{})), + Tier: _settings["tier"].(string), + Edition: _settings["edition"].(string), + AdvancedMachineFeatures: expandSqlServerAdvancedMachineFeatures(_settings["advanced_machine_features"].([]interface{})), + ForceSendFields: []string{"StorageAutoResize", "EnableGoogleMlIntegration"}, + ActivationPolicy: _settings["activation_policy"].(string), + ActiveDirectoryConfig: expandActiveDirectoryConfig(_settings["active_directory_config"].([]interface{})), + DenyMaintenancePeriods: expandDenyMaintenancePeriod(_settings["deny_maintenance_period"].([]interface{})), + SqlServerAuditConfig: expandSqlServerAuditConfig(_settings["sql_server_audit_config"].([]interface{})), + TimeZone: _settings["time_zone"].(string), + AvailabilityType: _settings["availability_type"].(string), + ConnectorEnforcement: _settings["connector_enforcement"].(string), + Collation: _settings["collation"].(string), + DataDiskSizeGb: int64(_settings["disk_size"].(int)), + DataDiskType: _settings["disk_type"].(string), + PricingPlan: _settings["pricing_plan"].(string), + DeletionProtectionEnabled: _settings["deletion_protection_enabled"].(bool), + EnableGoogleMlIntegration: _settings["enable_google_ml_integration"].(bool), + UserLabels: tpgresource.ConvertStringMap(_settings["user_labels"].(map[string]interface{})), + BackupConfiguration: expandBackupConfiguration(_settings["backup_configuration"].([]interface{})), + DatabaseFlags: expandDatabaseFlags(_settings["database_flags"].(*schema.Set).List()), + IpConfiguration: expandIpConfiguration(_settings["ip_configuration"].([]interface{}), databaseVersion), + LocationPreference: expandLocationPreference(_settings["location_preference"].([]interface{})), + MaintenanceWindow: expandMaintenanceWindow(_settings["maintenance_window"].([]interface{})), + InsightsConfig: expandInsightsConfig(_settings["insights_config"].([]interface{})), + PasswordValidationPolicy: expandPasswordValidationPolicy(_settings["password_validation_policy"].([]interface{})), + } + + resize := _settings["disk_autoresize"].(bool) + settings.StorageAutoResize = &resize + settings.StorageAutoResizeLimit = int64(_settings["disk_autoresize_limit"].(int)) + + return settings +} + +func expandReplicaConfiguration(configured []interface{}) *sqladmin.ReplicaConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _replicaConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.ReplicaConfiguration{ + FailoverTarget: _replicaConfiguration["failover_target"].(bool), + + // MysqlReplicaConfiguration has been flattened in the TF schema, so + // we'll keep it flat here instead of another expand method. + MysqlReplicaConfiguration: &sqladmin.MySqlReplicaConfiguration{ + CaCertificate: _replicaConfiguration["ca_certificate"].(string), + ClientCertificate: _replicaConfiguration["client_certificate"].(string), + ClientKey: _replicaConfiguration["client_key"].(string), + ConnectRetryInterval: int64(_replicaConfiguration["connect_retry_interval"].(int)), + DumpFilePath: _replicaConfiguration["dump_file_path"].(string), + MasterHeartbeatPeriod: int64(_replicaConfiguration["master_heartbeat_period"].(int)), + Password: _replicaConfiguration["password"].(string), + SslCipher: _replicaConfiguration["ssl_cipher"].(string), + Username: _replicaConfiguration["username"].(string), + VerifyServerCertificate: _replicaConfiguration["verify_server_certificate"].(bool), + }, + } +} + +func expandCloneContext(configured []interface{}) (*sqladmin.CloneContext, string) { + if len(configured) == 0 || configured[0] == nil { + return nil, "" + } + + _cloneConfiguration := configured[0].(map[string]interface{}) + + databaseNames := []string{} + rawDatabaseNames := _cloneConfiguration["database_names"].([]interface{}) + for _, db := range rawDatabaseNames { + databaseNames = append(databaseNames, db.(string)) + } + + return &sqladmin.CloneContext{ + PointInTime: _cloneConfiguration["point_in_time"].(string), + PreferredZone: _cloneConfiguration["preferred_zone"].(string), + DatabaseNames: databaseNames, + AllocatedIpRange: _cloneConfiguration["allocated_ip_range"].(string), + }, _cloneConfiguration["source_instance_name"].(string) +} + +func expandMaintenanceWindow(configured []interface{}) *sqladmin.MaintenanceWindow { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + window := configured[0].(map[string]interface{}) + return &sqladmin.MaintenanceWindow{ + Day: int64(window["day"].(int)), + Hour: int64(window["hour"].(int)), + UpdateTrack: window["update_track"].(string), + ForceSendFields: []string{"Hour"}, + } +} + +func expandLocationPreference(configured []interface{}) *sqladmin.LocationPreference { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _locationPreference := configured[0].(map[string]interface{}) + return &sqladmin.LocationPreference{ + FollowGaeApplication: _locationPreference["follow_gae_application"].(string), + Zone: _locationPreference["zone"].(string), + SecondaryZone: _locationPreference["secondary_zone"].(string), + } +} + +func expandIpConfiguration(configured []interface{}, databaseVersion string) *sqladmin.IpConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _ipConfiguration := configured[0].(map[string]interface{}) + + forceSendFields := []string{"Ipv4Enabled", "RequireSsl"} + + if !strings.HasPrefix(databaseVersion, "SQLSERVER") { + forceSendFields = append(forceSendFields, "EnablePrivatePathForGoogleCloudServices") + } + + return &sqladmin.IpConfiguration{ + Ipv4Enabled: _ipConfiguration["ipv4_enabled"].(bool), + RequireSsl: _ipConfiguration["require_ssl"].(bool), + PrivateNetwork: _ipConfiguration["private_network"].(string), + AllocatedIpRange: _ipConfiguration["allocated_ip_range"].(string), + AuthorizedNetworks: expandAuthorizedNetworks(_ipConfiguration["authorized_networks"].(*schema.Set).List()), + EnablePrivatePathForGoogleCloudServices: _ipConfiguration["enable_private_path_for_google_cloud_services"].(bool), + ForceSendFields: forceSendFields, + PscConfig: expandPscConfig(_ipConfiguration["psc_config"].(*schema.Set).List()), + SslMode: _ipConfiguration["ssl_mode"].(string), + } +} + +func expandPscConfig(configured []interface{}) *sqladmin.PscConfig { + for _, _pscConfig := range configured { + _entry := _pscConfig.(map[string]interface{}) + return &sqladmin.PscConfig{ + PscEnabled: _entry["psc_enabled"].(bool), + AllowedConsumerProjects: tpgresource.ConvertStringArr(_entry["allowed_consumer_projects"].(*schema.Set).List()), + } + } + + return nil +} + +func expandAuthorizedNetworks(configured []interface{}) []*sqladmin.AclEntry { + an := make([]*sqladmin.AclEntry, 0, len(configured)) + for _, _acl := range configured { + _entry := _acl.(map[string]interface{}) + an = append(an, &sqladmin.AclEntry{ + ExpirationTime: _entry["expiration_time"].(string), + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + + return an +} + +func expandDatabaseFlags(configured []interface{}) []*sqladmin.DatabaseFlags { + databaseFlags := make([]*sqladmin.DatabaseFlags, 0, len(configured)) + for _, _flag := range configured { + if _flag == nil { + continue + } + _entry := _flag.(map[string]interface{}) + + databaseFlags = append(databaseFlags, &sqladmin.DatabaseFlags{ + Name: _entry["name"].(string), + Value: _entry["value"].(string), + }) + } + return databaseFlags +} + +func expandDataCacheConfig(configured interface{}) *sqladmin.DataCacheConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.DataCacheConfig{ + DataCacheEnabled: config["data_cache_enabled"].(bool), + } +} + +func expandBackupConfiguration(configured []interface{}) *sqladmin.BackupConfiguration { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _backupConfiguration := configured[0].(map[string]interface{}) + return &sqladmin.BackupConfiguration{ + BinaryLogEnabled: _backupConfiguration["binary_log_enabled"].(bool), + BackupRetentionSettings: expandBackupRetentionSettings(_backupConfiguration["backup_retention_settings"]), + Enabled: _backupConfiguration["enabled"].(bool), + StartTime: _backupConfiguration["start_time"].(string), + Location: _backupConfiguration["location"].(string), + TransactionLogRetentionDays: int64(_backupConfiguration["transaction_log_retention_days"].(int)), + PointInTimeRecoveryEnabled: _backupConfiguration["point_in_time_recovery_enabled"].(bool), + ForceSendFields: []string{"BinaryLogEnabled", "Enabled", "PointInTimeRecoveryEnabled"}, + } +} + +func expandBackupRetentionSettings(configured interface{}) *sqladmin.BackupRetentionSettings { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + config := l[0].(map[string]interface{}) + return &sqladmin.BackupRetentionSettings{ + RetainedBackups: int64(config["retained_backups"].(int)), + RetentionUnit: config["retention_unit"].(string), + } +} + +func expandActiveDirectoryConfig(configured interface{}) *sqladmin.SqlActiveDirectoryConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlActiveDirectoryConfig{ + Domain: config["domain"].(string), + } +} + +func expandDenyMaintenancePeriod(configured []interface{}) []*sqladmin.DenyMaintenancePeriod { + denyMaintenancePeriod := make([]*sqladmin.DenyMaintenancePeriod, 0, len(configured)) + + for _, _flag := range configured { + if _flag == nil { + continue + } + _entry := _flag.(map[string]interface{}) + + denyMaintenancePeriod = append(denyMaintenancePeriod, &sqladmin.DenyMaintenancePeriod{ + EndDate: _entry["end_date"].(string), + StartDate: _entry["start_date"].(string), + Time: _entry["time"].(string), + }) + } + return denyMaintenancePeriod + +} + + +func expandSqlServerAdvancedMachineFeatures(configured interface{}) *sqladmin.AdvancedMachineFeatures { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.AdvancedMachineFeatures{ + ThreadsPerCore: int64(config["threads_per_core"].(int)), + } +} + +func expandSqlServerAuditConfig(configured interface{}) *sqladmin.SqlServerAuditConfig { + l := configured.([]interface{}) + if len(l) == 0 { + return nil + } + + config := l[0].(map[string]interface{}) + return &sqladmin.SqlServerAuditConfig{ + Bucket: config["bucket"].(string), + RetentionInterval: config["retention_interval"].(string), + UploadInterval: config["upload_interval"].(string), + } +} + +func expandInsightsConfig(configured []interface{}) *sqladmin.InsightsConfig { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _insightsConfig := configured[0].(map[string]interface{}) + return &sqladmin.InsightsConfig{ + QueryInsightsEnabled: _insightsConfig["query_insights_enabled"].(bool), + QueryStringLength: int64(_insightsConfig["query_string_length"].(int)), + RecordApplicationTags: _insightsConfig["record_application_tags"].(bool), + RecordClientAddress: _insightsConfig["record_client_address"].(bool), + QueryPlansPerMinute: int64(_insightsConfig["query_plans_per_minute"].(int)), + } +} + +func expandPasswordValidationPolicy(configured []interface{}) *sqladmin.PasswordValidationPolicy { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _passwordValidationPolicy := configured[0].(map[string]interface{}) + return &sqladmin.PasswordValidationPolicy{ + MinLength: int64(_passwordValidationPolicy["min_length"].(int)), + Complexity: _passwordValidationPolicy["complexity"].(string), + ReuseInterval: int64(_passwordValidationPolicy["reuse_interval"].(int)), + DisallowUsernameSubstring: _passwordValidationPolicy["disallow_username_substring"].(bool), + PasswordChangeInterval: _passwordValidationPolicy["password_change_interval"].(string), + EnablePasswordPolicy: _passwordValidationPolicy["enable_password_policy"].(bool), + } +} + +func resourceSqlDatabaseInstanceRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + var instance *sqladmin.DatabaseInstance + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + instance, rerr = config.NewSqlAdminClient(userAgent).Instances.Get(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("SQL Database Instance %q", d.Get("name").(string))) + } + + if err := d.Set("name", instance.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("region", instance.Region); err != nil { + return fmt.Errorf("Error setting region: %s", err) + } + if err := d.Set("database_version", instance.DatabaseVersion); err != nil { + return fmt.Errorf("Error setting database_version: %s", err) + } + if err := d.Set("connection_name", instance.ConnectionName); err != nil { + return fmt.Errorf("Error setting connection_name: %s", err) + } + if err := d.Set("maintenance_version", instance.MaintenanceVersion); err != nil { + return fmt.Errorf("Error setting maintenance_version: %s", err) + } + if err := d.Set("available_maintenance_versions", instance.AvailableMaintenanceVersions); err != nil { + return fmt.Errorf("Error setting available_maintenance_version: %s", err) + } + if err := d.Set("service_account_email_address", instance.ServiceAccountEmailAddress); err != nil { + return fmt.Errorf("Error setting service_account_email_address: %s", err) + } + if err := d.Set("instance_type", instance.InstanceType); err != nil { + return fmt.Errorf("Error setting instance_type: %s", err) + } + if err := d.Set("settings", flattenSettings(instance.Settings, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Settings") + } + + if instance.DiskEncryptionConfiguration != nil { + if err := d.Set("encryption_key_name", instance.DiskEncryptionConfiguration.KmsKeyName); err != nil { + return fmt.Errorf("Error setting encryption_key_name: %s", err) + } + } + + if err := d.Set("replica_configuration", flattenReplicaConfiguration(instance.ReplicaConfiguration, d)); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance Replica Configuration") + } + ipAddresses := flattenIpAddresses(instance.IpAddresses) + if err := d.Set("ip_address", ipAddresses); err != nil { + log.Printf("[WARN] Failed to set SQL Database Instance IP Addresses") + } + + if len(ipAddresses) > 0 { + if err := d.Set("first_ip_address", ipAddresses[0]["ip_address"]); err != nil { + return fmt.Errorf("Error setting first_ip_address: %s", err) + } + } + + publicIpAddress := "" + privateIpAddress := "" + for _, ip := range instance.IpAddresses { + if publicIpAddress == "" && ip.Type == "PRIMARY" { + publicIpAddress = ip.IpAddress + } + + if privateIpAddress == "" && ip.Type == "PRIVATE" { + privateIpAddress = ip.IpAddress + } + } + + if err := d.Set("public_ip_address", publicIpAddress); err != nil { + return fmt.Errorf("Error setting public_ip_address: %s", err) + } + if err := d.Set("private_ip_address", privateIpAddress); err != nil { + return fmt.Errorf("Error setting private_ip_address: %s", err) + } + + if err := d.Set("server_ca_cert", flattenServerCaCerts([]*sqladmin.SslCert{instance.ServerCaCert})); err != nil { + log.Printf("[WARN] Failed to set SQL Database CA Certificate") + } + + if err := d.Set("master_instance_name", strings.TrimPrefix(instance.MasterInstanceName, project+":")); err != nil { + return fmt.Errorf("Error setting master_instance_name: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("self_link", instance.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("psc_service_attachment_link", instance.PscServiceAttachmentLink); err != nil { + return fmt.Errorf("Error setting psc_service_attachment_link: %s", err) + } + if err := d.Set("dns_name", instance.DnsName); err != nil { + return fmt.Errorf("Error setting dns_name: %s", err) + } + d.SetId(instance.Name) + + return nil +} + +func resourceSqlDatabaseInstanceUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + var maintenance_version string + if v, ok := d.GetOk("maintenance_version"); ok { + maintenance_version = v.(string) + } + + promoteReadReplicaRequired := false + if d.HasChange("instance_type") { + oldInstanceType, newInstanceType := d.GetChange("instance_type") + + if isReplicaPromoteRequested(nil, oldInstanceType, newInstanceType, nil) { + err = checkPromoteConfigurations(d) + if err != nil { + return err + } + + promoteReadReplicaRequired = true + } + } + + desiredSetting := d.Get("settings") + var op *sqladmin.Operation + var instance *sqladmin.DatabaseInstance + + databaseVersion := d.Get("database_version").(string) + + // Check if the activation policy is being updated. If it is being changed to ALWAYS this should be done first. + if d.HasChange("settings.0.activation_policy") && d.Get("settings.0.activation_policy").(string) == "ALWAYS" { + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{ActivationPolicy: "ALWAYS"}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the database version is being updated, because patching database version is an atomic operation and can not be + // performed with other fields, we first patch database version before updating the rest of the fields. + if d.HasChange("database_version") { + instance = &sqladmin.DatabaseInstance{DatabaseVersion: databaseVersion} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the root_password is being updated, because updating root_password is an atomic operation and can not be + // performed with other fields, we first update root password before updating the rest of the fields. + if d.HasChange("root_password") { + oldPwd, newPwd := d.GetChange("root_password") + password := newPwd.(string) + dv := d.Get("database_version").(string) + name := "" + host := "" + if strings.Contains(dv, "MYSQL") { + name = "root" + host = "%" + } else if strings.Contains(dv, "POSTGRES") { + name = "postgres" + } else if strings.Contains(dv, "SQLSERVER") { + name = "sqlserver" + if len(password) == 0 { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, root password cannot be empty for SQL Server instance.") + } + }else { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, invalid database version") + } + instance := d.Get("name").(string) + + user := &sqladmin.User{ + Name: name, + Instance: instance, + Password: password, + } + + transport_tpg.MutexStore.Lock(instanceMutexKey(project, instance)) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, instance)) + var op *sqladmin.Operation + updateFunc := func() error { + op, err = config.NewSqlAdminClient(userAgent).Users.Update(project, instance, user).Host(host).Name(name).Do() + return err + } + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: updateFunc, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, failed to update root_password : %s", err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Insert User", userAgent, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + if err := d.Set("root_password", oldPwd.(string)); err != nil { + return fmt.Errorf("Error re-setting root_password: %s", err) + } + return fmt.Errorf("Error, failed to update root_password : %s", err) + } + } + + // Check if the maintenance version is being updated, because patching maintenance version is an atomic operation and can not be + // performed with other fields, we first patch maintenance version before updating the rest of the fields. + if d.HasChange("maintenance_version") { + instance = &sqladmin.DatabaseInstance{MaintenanceVersion: maintenance_version} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + if promoteReadReplicaRequired { + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.PromoteReplica(project, d.Get("name").(string)).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to promote read replica instance as primary stand-alone %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Promote Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + // Check if the edition is being updated, because patching edition is an atomic operation and can not be + // performed with other fields, we first patch edition, tier and data cache config before updating the rest of the fields. + if d.HasChange("settings.0.edition") { + edition := d.Get("settings.0.edition").(string) + tier := d.Get("settings.0.tier").(string) + dataCacheConfig := expandDataCacheConfig(d.Get("settings.0.data_cache_config").([]interface{})) + instance = &sqladmin.DatabaseInstance{Settings: &sqladmin.Settings{Edition: edition, Tier: tier, DataCacheConfig: dataCacheConfig}} + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Patch(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to patch instance settings for %s: %s", instance.Name, err) + } + err = SqlAdminOperationWaitTime(config, op, project, "Patch Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + err = resourceSqlDatabaseInstanceRead(d, meta) + if err != nil { + return err + } + } + + s := d.Get("settings") + instance = &sqladmin.DatabaseInstance{ + Settings: expandSqlDatabaseInstanceSettings(desiredSetting.([]interface{}), databaseVersion), + } + _settings := s.([]interface{})[0].(map[string]interface{}) + // Instance.Patch operation on completion updates the settings proto version by +8. As terraform does not know this it tries + // to make an update call with the proto version before patch and fails. To resolve this issue we update the setting version + // before making the update call. + instance.Settings.SettingsVersion = int64(_settings["version"].(int)) + // Collation cannot be included in the update request + instance.Settings.Collation = "" + + // Lock on the master_instance_name just in case updating any replica + // settings causes operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) + } + + if _, ok := d.GetOk("instance_type"); ok { + instance.InstanceType = d.Get("instance_type").(string) + } + + // Database Version is required for all calls with Google ML integration enabled or it will be rejected by the API. + if d.Get("settings.0.enable_google_ml_integration").(bool) { + instance.DatabaseVersion = databaseVersion + } + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Update(project, d.Get("name").(string), instance).Do() + return rerr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to update instance settings for %s: %s", instance.Name, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Update Instance", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + // Perform a backup restore if the backup context exists and has changed + if r, ok := d.GetOk("restore_backup_context"); ok { + if d.HasChange("restore_backup_context") { + err = sqlDatabaseInstanceRestoreFromBackup(d, config, userAgent, project, d.Get("name").(string), r) + if err != nil { + return err + } + } + } + + return resourceSqlDatabaseInstanceRead(d, meta) +} + +func maintenanceVersionDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + // Ignore the database version part and only compare the last part of the maintenance version which represents the release date of the version. + if len(old) > 14 && len(new) > 14 && old[len(old)-14:] >= new[len(new)-14:] { + log.Printf("[DEBUG] Maintenance version in configuration [%s] is older than current maintenance version [%s] on instance. Suppressing diff", new, old) + return true + } else { + return false + } +} + +func resourceSqlDatabaseInstanceDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Check if deletion protection is enabled. + + if d.Get("deletion_protection").(bool) { + return fmt.Errorf("Error, failed to delete instance because deletion_protection is set to true. Set it to false to proceed with instance deletion") + } + + // Lock on the master_instance_name just in case deleting a replica causes + // operations on the master. + if v, ok := d.GetOk("master_instance_name"); ok { + transport_tpg.MutexStore.Lock(instanceMutexKey(project, v.(string))) + defer transport_tpg.MutexStore.Unlock(instanceMutexKey(project, v.(string))) + } + + var op *sqladmin.Operation + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (rerr error) { + op, rerr = config.NewSqlAdminClient(userAgent).Instances.Delete(project, d.Get("name").(string)).Do() + if rerr != nil { + return rerr + } + err = SqlAdminOperationWaitTime(config, op, project, "Delete Instance", userAgent, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return err + } + return nil + }, + Timeout: d.Timeout(schema.TimeoutDelete), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError, IsSqlInternalError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to delete instance %s: %s", d.Get("name").(string), err) + } + return nil +} + +func resourceSqlDatabaseInstanceImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{ + "projects/(?P[^/]+)/instances/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + if err := d.Set("deletion_protection", true); err != nil { + return nil, fmt.Errorf("Error setting deletion_protection: %s", err) + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/instances/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenSettings(settings *sqladmin.Settings, d *schema.ResourceData) []map[string]interface{} { + data := map[string]interface{}{ + "version": settings.SettingsVersion, + "tier": settings.Tier, + "edition": flattenEdition(settings.Edition), + "activation_policy": settings.ActivationPolicy, + "availability_type": settings.AvailabilityType, + "collation": settings.Collation, + "connector_enforcement": settings.ConnectorEnforcement, + "disk_type": settings.DataDiskType, + "disk_size": settings.DataDiskSizeGb, + "pricing_plan": settings.PricingPlan, + "user_labels": settings.UserLabels, + "password_validation_policy": settings.PasswordValidationPolicy, + "time_zone": settings.TimeZone, + "deletion_protection_enabled": settings.DeletionProtectionEnabled, + } + + if settings.ActiveDirectoryConfig != nil { + data["active_directory_config"] = flattenActiveDirectoryConfig(settings.ActiveDirectoryConfig) + } + + if settings.DenyMaintenancePeriods != nil { + data["deny_maintenance_period"] = flattenDenyMaintenancePeriod(settings.DenyMaintenancePeriods) + } + + if settings.SqlServerAuditConfig != nil { + data["sql_server_audit_config"] = flattenSqlServerAuditConfig(settings.SqlServerAuditConfig) + } + + if settings.BackupConfiguration != nil { + data["backup_configuration"] = flattenBackupConfiguration(settings.BackupConfiguration) + } + + if settings.DatabaseFlags != nil { + data["database_flags"] = flattenDatabaseFlags(settings.DatabaseFlags) + } + + if settings.IpConfiguration != nil { + data["ip_configuration"] = flattenIpConfiguration(settings.IpConfiguration, d) + } + + if settings.LocationPreference != nil { + data["location_preference"] = flattenLocationPreference(settings.LocationPreference) + } + + if settings.MaintenanceWindow != nil { + data["maintenance_window"] = flattenMaintenanceWindow(settings.MaintenanceWindow) + } + + if settings.InsightsConfig != nil { + data["insights_config"] = flattenInsightsConfig(settings.InsightsConfig) + } + + data["disk_autoresize"] = settings.StorageAutoResize + data["disk_autoresize_limit"] = settings.StorageAutoResizeLimit + + data["enable_google_ml_integration"] = settings.EnableGoogleMlIntegration + + if settings.UserLabels != nil { + data["user_labels"] = settings.UserLabels + } + + if settings.PasswordValidationPolicy != nil { + data["password_validation_policy"] = flattenPasswordValidationPolicy(settings.PasswordValidationPolicy) + } + + if settings.DataCacheConfig != nil { + data["data_cache_config"] = flattenDataCacheConfig(settings.DataCacheConfig) + } + + if settings.AdvancedMachineFeatures != nil { + data["advanced_machine_features"] = flattenSqlServerAdvancedMachineFeatures(settings.AdvancedMachineFeatures) + } + + return []map[string]interface{}{data} +} + +func flattenDataCacheConfig(d *sqladmin.DataCacheConfig) []map[string]interface{} { + if d == nil { + return nil + } + return []map[string]interface{}{ + { + "data_cache_enabled": d.DataCacheEnabled, + }, + } +} + +func flattenBackupConfiguration(backupConfiguration *sqladmin.BackupConfiguration) []map[string]interface{} { + data := map[string]interface{}{ + "binary_log_enabled": backupConfiguration.BinaryLogEnabled, + "enabled": backupConfiguration.Enabled, + "start_time": backupConfiguration.StartTime, + "location": backupConfiguration.Location, + "point_in_time_recovery_enabled": backupConfiguration.PointInTimeRecoveryEnabled, + "backup_retention_settings": flattenBackupRetentionSettings(backupConfiguration.BackupRetentionSettings), + "transaction_log_retention_days": backupConfiguration.TransactionLogRetentionDays, + } + + return []map[string]interface{}{data} +} + +func flattenBackupRetentionSettings(b *sqladmin.BackupRetentionSettings) []map[string]interface{} { + if b == nil { + return nil + } + return []map[string]interface{}{ + { + "retained_backups": b.RetainedBackups, + "retention_unit": b.RetentionUnit, + }, + } +} + +func flattenActiveDirectoryConfig(sqlActiveDirectoryConfig *sqladmin.SqlActiveDirectoryConfig) []map[string]interface{} { + if sqlActiveDirectoryConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "domain": sqlActiveDirectoryConfig.Domain, + }, + } +} + +func flattenDenyMaintenancePeriod(denyMaintenancePeriod []*sqladmin.DenyMaintenancePeriod) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(denyMaintenancePeriod)) + + for _, flag := range denyMaintenancePeriod { + data := map[string]interface{}{ + "end_date": flag.EndDate, + "start_date": flag.StartDate, + "time": flag.Time, + + } + + flags = append(flags, data) + } + + return flags +} + +func flattenSqlServerAdvancedMachineFeatures(advancedMachineFeatures *sqladmin.AdvancedMachineFeatures) []map[string]interface{} { + if advancedMachineFeatures == nil { + return nil + } + return []map[string]interface{}{ + { + "threads_per_core": advancedMachineFeatures.ThreadsPerCore, + }, + } +} + +func flattenSqlServerAuditConfig(sqlServerAuditConfig *sqladmin.SqlServerAuditConfig) []map[string]interface{} { + if sqlServerAuditConfig == nil { + return nil + } + return []map[string]interface{}{ + { + "bucket": sqlServerAuditConfig.Bucket, + "retention_interval": sqlServerAuditConfig.RetentionInterval, + "upload_interval": sqlServerAuditConfig.UploadInterval, + }, + } +} + +func flattenDatabaseFlags(databaseFlags []*sqladmin.DatabaseFlags) []map[string]interface{} { + flags := make([]map[string]interface{}, 0, len(databaseFlags)) + + for _, flag := range databaseFlags { + data := map[string]interface{}{ + "name": flag.Name, + "value": flag.Value, + } + + flags = append(flags, data) + } + + return flags +} + +func flattenIpConfiguration(ipConfiguration *sqladmin.IpConfiguration, d *schema.ResourceData) interface{} { + data := map[string]interface{}{ + "ipv4_enabled": ipConfiguration.Ipv4Enabled, + "private_network": ipConfiguration.PrivateNetwork, + "allocated_ip_range": ipConfiguration.AllocatedIpRange, + "require_ssl": ipConfiguration.RequireSsl, + "enable_private_path_for_google_cloud_services": ipConfiguration.EnablePrivatePathForGoogleCloudServices, + } + + if ipConfiguration.AuthorizedNetworks != nil { + data["authorized_networks"] = flattenAuthorizedNetworks(ipConfiguration.AuthorizedNetworks) + } + + if ipConfiguration.PscConfig != nil { + data["psc_config"] = flattenPscConfigs(ipConfiguration.PscConfig) + } + + // We store the ssl_mode value only if the customer already uses `ssl_mode`. + if _, ok := d.GetOk("settings.0.ip_configuration.0.ssl_mode"); ok { + data["ssl_mode"] = ipConfiguration.SslMode + } + + return []map[string]interface{}{data} +} + +func flattenPscConfigs(pscConfig *sqladmin.PscConfig) interface{} { + data := map[string]interface{}{ + "psc_enabled": pscConfig.PscEnabled, + "allowed_consumer_projects": schema.NewSet(schema.HashString, tpgresource.ConvertStringArrToInterface(pscConfig.AllowedConsumerProjects)), + } + + return []map[string]interface{}{data} +} + +func flattenAuthorizedNetworks(entries []*sqladmin.AclEntry) interface{} { + networks := schema.NewSet(schema.HashResource(sqlDatabaseAuthorizedNetWorkSchemaElem), []interface{}{}) + + for _, entry := range entries { + data := map[string]interface{}{ + "expiration_time": entry.ExpirationTime, + "name": entry.Name, + "value": entry.Value, + } + + networks.Add(data) + } + + return networks +} + +func flattenLocationPreference(locationPreference *sqladmin.LocationPreference) interface{} { + data := map[string]interface{}{ + "follow_gae_application": locationPreference.FollowGaeApplication, + "zone": locationPreference.Zone, + "secondary_zone": locationPreference.SecondaryZone, + } + + return []map[string]interface{}{data} +} + +func flattenMaintenanceWindow(maintenanceWindow *sqladmin.MaintenanceWindow) interface{} { + data := map[string]interface{}{ + "day": maintenanceWindow.Day, + "hour": maintenanceWindow.Hour, + "update_track": maintenanceWindow.UpdateTrack, + } + + return []map[string]interface{}{data} +} + +func flattenReplicaConfiguration(replicaConfiguration *sqladmin.ReplicaConfiguration, d *schema.ResourceData) []map[string]interface{} { + rc := []map[string]interface{}{} + + if replicaConfiguration != nil { + data := map[string]interface{}{ + "failover_target": replicaConfiguration.FailoverTarget, + + // Don't attempt to assign anything from replicaConfiguration.MysqlReplicaConfiguration, + // since those fields are set on create and then not stored. See description at + // https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances. + // Instead, set them to the values they previously had so we don't set them all to zero. + "ca_certificate": d.Get("replica_configuration.0.ca_certificate"), + "client_certificate": d.Get("replica_configuration.0.client_certificate"), + "client_key": d.Get("replica_configuration.0.client_key"), + "connect_retry_interval": d.Get("replica_configuration.0.connect_retry_interval"), + "dump_file_path": d.Get("replica_configuration.0.dump_file_path"), + "master_heartbeat_period": d.Get("replica_configuration.0.master_heartbeat_period"), + "password": d.Get("replica_configuration.0.password"), + "ssl_cipher": d.Get("replica_configuration.0.ssl_cipher"), + "username": d.Get("replica_configuration.0.username"), + "verify_server_certificate": d.Get("replica_configuration.0.verify_server_certificate"), + } + rc = append(rc, data) + } + + return rc +} + +func flattenIpAddresses(ipAddresses []*sqladmin.IpMapping) []map[string]interface{} { + var ips []map[string]interface{} + + for _, ip := range ipAddresses { + data := map[string]interface{}{ + "ip_address": ip.IpAddress, + "type": ip.Type, + "time_to_retire": ip.TimeToRetire, + } + + ips = append(ips, data) + } + + return ips +} + +func flattenServerCaCerts(caCerts []*sqladmin.SslCert) []map[string]interface{} { + var certs []map[string]interface{} + + for _, caCert := range caCerts { + if caCert != nil { + data := map[string]interface{}{ + "cert": caCert.Cert, + "common_name": caCert.CommonName, + "create_time": caCert.CreateTime, + "expiration_time": caCert.ExpirationTime, + "sha1_fingerprint": caCert.Sha1Fingerprint, + } + + certs = append(certs, data) + } + } + + return certs +} + +func flattenInsightsConfig(insightsConfig *sqladmin.InsightsConfig) interface{} { + data := map[string]interface{}{ + "query_insights_enabled": insightsConfig.QueryInsightsEnabled, + "query_string_length": insightsConfig.QueryStringLength, + "record_application_tags": insightsConfig.RecordApplicationTags, + "record_client_address": insightsConfig.RecordClientAddress, + "query_plans_per_minute": insightsConfig.QueryPlansPerMinute, + } + + return []map[string]interface{}{data} +} + +func flattenPasswordValidationPolicy(passwordValidationPolicy *sqladmin.PasswordValidationPolicy) interface{} { + data := map[string]interface{}{ + "min_length": passwordValidationPolicy.MinLength, + "complexity": passwordValidationPolicy.Complexity, + "reuse_interval": passwordValidationPolicy.ReuseInterval, + "disallow_username_substring": passwordValidationPolicy.DisallowUsernameSubstring, + "password_change_interval": passwordValidationPolicy.PasswordChangeInterval, + "enable_password_policy": passwordValidationPolicy.EnablePasswordPolicy, + } + return []map[string]interface{}{data} +} + +func flattenEdition(v interface{}) string { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "ENTERPRISE" + } + + return v.(string) +} + +func instanceMutexKey(project, instance_name string) string { + return fmt.Sprintf("google-sql-database-instance-%s-%s", project, instance_name) +} + +// sqlDatabaseIsMaster returns true if the provided schema.ResourceData represents a +// master SQL Instance, and false if it is a replica. +func sqlDatabaseIsMaster(d *schema.ResourceData) bool { + _, ok := d.GetOk("master_instance_name") + return !ok +} + +func sqlDatabaseInstanceServiceNetworkPrecheck(d *schema.ResourceData, config *transport_tpg.Config, userAgent, network string) error { + log.Printf("[DEBUG] checking network %q for at least one service networking connection", network) + // This call requires projects.get permissions, which may not have been granted to the Terraform actor, + // particularly in shared VPC setups. Most will! But it's not strictly required. + serviceNetworkingNetworkName, err := servicenetworking.RetrieveServiceNetworkingNetworkName(d, config, network, userAgent) + if err != nil { + var gerr *googleapi.Error + if errors.As(err, &gerr) { + log.Printf("[DEBUG] retrieved googleapi error while creating sn name for %q. precheck skipped. code %v and message: %s", network, gerr.Code, gerr.Body) + return nil + } + + return err + } + + response, err := config.NewServiceNetworkingClient(userAgent).Services.Connections.List("services/servicenetworking.googleapis.com").Network(serviceNetworkingNetworkName).Do() + if err != nil { + // It is possible that the actor creating the SQL Instance might not have permissions to call servicenetworking.services.connections.list + log.Printf("[WARNING] Failed to list Service Networking of the project. Skipped Service Networking precheck.") + return nil + } + + if len(response.Connections) < 1 { + return fmt.Errorf("Error, failed to create instance because the network doesn't have at least 1 private services connection. Please see https://cloud.google.com/sql/docs/mysql/private-ip#network_requirements for how to create this connection.") + } + + return nil +} + +func expandRestoreBackupContext(configured []interface{}) *sqladmin.RestoreBackupContext { + if len(configured) == 0 || configured[0] == nil { + return nil + } + + _rc := configured[0].(map[string]interface{}) + return &sqladmin.RestoreBackupContext{ + BackupRunId: int64(_rc["backup_run_id"].(int)), + InstanceId: _rc["instance_id"].(string), + Project: _rc["project"].(string), + } +} + +func sqlDatabaseInstanceRestoreFromBackup(d *schema.ResourceData, config *transport_tpg.Config, userAgent, project, instanceId string, r interface{}) error { + log.Printf("[DEBUG] Initiating SQL database instance backup restore") + restoreContext := r.([]interface{}) + + backupRequest := &sqladmin.InstancesRestoreBackupRequest{ + RestoreBackupContext: expandRestoreBackupContext(restoreContext), + } + + var op *sqladmin.Operation + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + op, operr = config.NewSqlAdminClient(userAgent).Instances.RestoreBackup(project, instanceId, backupRequest).Do() + return operr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsSqlOperationInProgressError}, + }) + if err != nil { + return fmt.Errorf("Error, failed to restore instance from backup %s: %s", instanceId, err) + } + + err = SqlAdminOperationWaitTime(config, op, project, "Restore Backup", userAgent, d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + return nil +} + +func caseDiffDashSuppress(_, old, new string, _ *schema.ResourceData) bool { + postReplaceNew := strings.Replace(new, "-", "_", -1) + return strings.ToUpper(postReplaceNew) == strings.ToUpper(old) +} + +func isMasterInstanceNameSet(_ context.Context, oldMasterInstanceName interface{}, newMasterInstanceName interface{}, _ interface{}) bool { + new := newMasterInstanceName.(string) + if new == "" { + return false + } + + return true +} + +func isReplicaPromoteRequested(_ context.Context, oldInstanceType interface{}, newInstanceType interface{}, _ interface{}) bool { + oldInstanceType = oldInstanceType.(string) + newInstanceType = newInstanceType.(string) + + if newInstanceType == "CLOUD_SQL_INSTANCE" && oldInstanceType == "READ_REPLICA_INSTANCE" { + return true + } + + return false +} + +func checkPromoteConfigurations(d *schema.ResourceData) error { + masterInstanceName := d.GetRawConfig().GetAttr("master_instance_name") + replicaConfiguration := d.GetRawConfig().GetAttr("replica_configuration").AsValueSlice() + + return validatePromoteConfigurations(masterInstanceName, replicaConfiguration) +} + +func checkPromoteConfigurationsAndUpdateDiff(_ context.Context, diff *schema.ResourceDiff, _ interface{}) error { + masterInstanceName := diff.GetRawConfig().GetAttr("master_instance_name") + replicaConfiguration := diff.GetRawConfig().GetAttr("replica_configuration").AsValueSlice() + + err := validatePromoteConfigurations(masterInstanceName, replicaConfiguration) + if (err != nil) { + return err + } + + err = diff.SetNew("master_instance_name", nil) + if err != nil { + return err + } + + err = diff.SetNew("replica_configuration", nil) + if err != nil { + return err + } + return nil +} + +func validatePromoteConfigurations(masterInstanceName cty.Value, replicaConfigurations []cty.Value) error { + if !masterInstanceName.IsNull() { + return fmt.Errorf("Replica promote configuration check failed. Please remove master_instance_name and try again.") + } + + if len(replicaConfigurations) != 0 { + return fmt.Errorf("Replica promote configuration check failed. Please remove replica_configuration and try again.") + } + return nil +} From ade0a1ec36b97ef2853044110fea0cdd4bec6383 Mon Sep 17 00:00:00 2001 From: bcreddy-gcp <123543489+bcreddy-gcp@users.noreply.github.com> Date: Tue, 18 Jun 2024 12:50:47 -0700 Subject: [PATCH 166/356] Resize disk in `google_workbench_instance` resource (#10972) --- mmv1/products/workbench/Instance.yaml | 4 - .../terraform/constants/workbench_instance.go | 45 +++++ .../pre_update/workbench_instance.go.erb | 9 +- .../resource_workbench_instance_test.go.erb | 177 ++++++++++++++++++ 4 files changed, 230 insertions(+), 5 deletions(-) diff --git a/mmv1/products/workbench/Instance.yaml b/mmv1/products/workbench/Instance.yaml index 2f431e1ce050..b139523ff190 100644 --- a/mmv1/products/workbench/Instance.yaml +++ b/mmv1/products/workbench/Instance.yaml @@ -271,12 +271,10 @@ properties: name: bootDisk default_from_api: true description: The definition of a boot disk. - immutable: true properties: - !ruby/object:Api::Type::String name: diskSizeGb default_from_api: true - immutable: true description: | Optional. The size of the boot disk in GB attached to this instance, up to a maximum of 64000 GB (64 TB). If not specified, this defaults to the @@ -315,13 +313,11 @@ properties: description: Data disks attached to the VM instance. Currently supports only one data disk. max_size: 1 default_from_api: true - immutable: true item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::String name: diskSizeGb default_from_api: true - immutable: true description: | Optional. The size of the disk in GB attached to this VM instance, up to a maximum of 64000 GB (64 TB). If not specified, this defaults to diff --git a/mmv1/templates/terraform/constants/workbench_instance.go b/mmv1/templates/terraform/constants/workbench_instance.go index 9616479a6365..f7e40a4d924d 100644 --- a/mmv1/templates/terraform/constants/workbench_instance.go +++ b/mmv1/templates/terraform/constants/workbench_instance.go @@ -194,4 +194,49 @@ func waitForWorkbenchOperation(config *transport_tpg.Config, d *schema.ResourceD } return nil } + +func resizeWorkbenchInstanceDisk(config *transport_tpg.Config, d *schema.ResourceData, project string, userAgent string, isBoot bool) (error) { + diskObj := make(map[string]interface{}) + var sizeString string + var diskKey string + if isBoot{ + sizeString = "gce_setup.0.boot_disk.0.disk_size_gb" + diskKey = "bootDisk" + } else{ + sizeString = "gce_setup.0.data_disks.0.disk_size_gb" + diskKey = "dataDisk" + } + disk := make(map[string]interface{}) + disk["diskSizeGb"] = d.Get(sizeString) + diskObj[diskKey] = disk + + + resizeUrl, err := tpgresource.ReplaceVars(d, config, "{{WorkbenchBasePath}}projects/{{project}}/locations/{{location}}/instances/{{name}}:resizeDisk") + if err != nil { + return err + } + + dRes, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + RawURL: resizeUrl, + UserAgent: userAgent, + Body: diskObj, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + + if err != nil { + return fmt.Errorf("Error resizing disk: %s", err) + } + + var opRes map[string]interface{} + err = WorkbenchOperationWaitTimeWithResponse( + config, dRes, &opRes, project, "Resizing disk", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return fmt.Errorf("Error resizing disk: %s", err) + } + + return nil +} <% end -%> diff --git a/mmv1/templates/terraform/pre_update/workbench_instance.go.erb b/mmv1/templates/terraform/pre_update/workbench_instance.go.erb index 0b873071374d..b1fb82cd28c9 100644 --- a/mmv1/templates/terraform/pre_update/workbench_instance.go.erb +++ b/mmv1/templates/terraform/pre_update/workbench_instance.go.erb @@ -27,7 +27,7 @@ if d.HasChange("gce_setup.0.metadata") { if d.HasChange("effective_labels") { newUpdateMask = append(newUpdateMask, "labels") } - +updateMask = newUpdateMask // Overwrite the previously set mask. url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) if err != nil { @@ -55,3 +55,10 @@ if stopInstance{ } else { log.Printf("[DEBUG] Workbench Instance %q need not be stopped for the update.", name) } + +if d.HasChange("gce_setup.0.boot_disk.0.disk_size_gb") { + resizeWorkbenchInstanceDisk(config, d, project, userAgent, true) +} +if d.HasChange("gce_setup.0.data_disks.0.disk_size_gb") { + resizeWorkbenchInstanceDisk(config, d, project, userAgent, false) +} diff --git a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb index 374b188fde54..c7717e77fc71 100644 --- a/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/workbench/resource_workbench_instance_test.go.erb @@ -79,6 +79,14 @@ resource "google_workbench_instance" "instance" { enable_integrity_monitoring = false } + boot_disk { + disk_size_gb = 310 + } + + data_disks { + disk_size_gb = 330 + } + metadata = { terraform = "true" } @@ -448,3 +456,172 @@ resource "google_workbench_instance" "instance" { } `, context) } + +func TestAccWorkbenchInstance_updateBootDisk(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateBootDisk(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_updateDataDisk(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateDataDisk(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_updateBothDisks(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateBothDisks(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_updateBootDisk(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + boot_disk { + disk_size_gb = 310 + } + } +} +`, context) +} + +func testAccWorkbenchInstance_updateDataDisk(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + data_disks { + disk_size_gb = 330 + } + } +} +`, context) +} + +func testAccWorkbenchInstance_updateBothDisks(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + boot_disk { + disk_size_gb = 310 + } + + data_disks { + disk_size_gb = 330 + } + } +} +`, context) +} From 9a15e62df673e203bded4c0cbd7d3009828337ba Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Tue, 18 Jun 2024 12:52:39 -0700 Subject: [PATCH 167/356] Skip condition field of iam resources (#10962) --- tools/diff-processor/detector/detector.go | 8 +++++++- tools/diff-processor/detector/detector_test.go | 7 +++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/tools/diff-processor/detector/detector.go b/tools/diff-processor/detector/detector.go index 92ff4f384420..8305aaac1407 100644 --- a/tools/diff-processor/detector/detector.go +++ b/tools/diff-processor/detector/detector.go @@ -51,6 +51,10 @@ func getChangedFieldsFromSchemaDiff(schemaDiff diff.SchemaDiff) map[string]Resou // Skip the project field. continue } + if strings.Contains(resource, "iam") && field == "condition" { + // Skip the condition field of iam resources because some iam resources do not support it. + continue + } if fieldDiff.New == nil { // Skip deleted fields. continue @@ -69,7 +73,9 @@ func getChangedFieldsFromSchemaDiff(schemaDiff diff.SchemaDiff) map[string]Resou resourceChanges[field] = &Field{Changed: true} } } - changedFields[resource] = resourceChanges + if len(resourceChanges) > 0 { + changedFields[resource] = resourceChanges + } } return changedFields } diff --git a/tools/diff-processor/detector/detector_test.go b/tools/diff-processor/detector/detector_test.go index 8c339d2694d3..60ad7739bc7f 100644 --- a/tools/diff-processor/detector/detector_test.go +++ b/tools/diff-processor/detector/detector_test.go @@ -43,6 +43,13 @@ func TestGetChangedFieldsFromSchemaDiff(t *testing.T) { }, }, }, + "iam_resource": diff.ResourceDiff{ + Fields: map[string]diff.FieldDiff{ + "condition": { + New: &schema.Schema{}, + }, + }, + }, }, changedFields: map[string]ResourceChanges{ "covered_resource": { From 89adfa77d478f937fce6c3121fd7e1414f6e0856 Mon Sep 17 00:00:00 2001 From: sahsagar-google <126025352+sahsagar-google@users.noreply.github.com> Date: Wed, 19 Jun 2024 02:57:04 -0700 Subject: [PATCH 168/356] Enabling empty value send for fleet default member config in google_gke_hub_feature (#10963) Co-authored-by: Shuya Ma <87669292+shuyama1@users.noreply.github.com> --- mmv1/products/gkehub2/Feature.yaml | 1 + .../resource_gke_hub_feature_test.go.erb | 43 +++++++++++++++++++ .../tests/data/example_gke_hub_feature.json | 3 +- 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/mmv1/products/gkehub2/Feature.yaml b/mmv1/products/gkehub2/Feature.yaml index 33d622d2ee0f..0dd00735c910 100644 --- a/mmv1/products/gkehub2/Feature.yaml +++ b/mmv1/products/gkehub2/Feature.yaml @@ -258,6 +258,7 @@ properties: - !ruby/object:Api::Type::NestedObject name: fleetDefaultMemberConfig description: Optional. Fleet Default Membership Configuration. + send_empty_value: true properties: - !ruby/object:Api::Type::NestedObject name: mesh diff --git a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb index faed586ab947..990e5b3f93af 100644 --- a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb +++ b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb @@ -380,6 +380,22 @@ func TestAccGKEHubFeature_FleetDefaultMemberConfigServiceMesh(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshRemovalUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshReAddUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -416,6 +432,33 @@ resource "google_gke_hub_feature" "feature" { `, context) } +func testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshRemovalUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "servicemesh" + location = "global" + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.mesh] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshReAddUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "servicemesh" + location = "global" + fleet_default_member_config { + mesh { + management = "MANAGEMENT_MANUAL" + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.mesh] + project = google_project.project.project_id +} +`, context) +} + func TestAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(t *testing.T) { // VCR fails to handle batched project services acctest.SkipIfVcr(t) diff --git a/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json b/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json index 39a22f1c5b26..fc13dafd911b 100644 --- a/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json +++ b/mmv1/third_party/tgc/tests/data/example_gke_hub_feature.json @@ -9,6 +9,7 @@ "discovery_name": "Feature", "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", "data": { + "fleetDefaultMemberConfig": null, "labels": { "foo": "bar" } @@ -16,4 +17,4 @@ }, "ancestors": ["organizations/{{.OrgID}}"] } -] \ No newline at end of file +] From f63d714a92c325503ae097869418640487d6499a Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Wed, 19 Jun 2024 10:19:30 +0000 Subject: [PATCH 169/356] Add support for ```logging.googleapis.com/LogBucket``` to TGC (#10980) --- mmv1/provider/terraform_tgc.rb | 6 +- mmv1/templates/tgc/resource_converters.go.erb | 8 +- .../tgc/logging_folder_bucket_config.go | 179 ++++++++++++++++++ .../tgc/logging_organization_bucket_config.go | 175 +++++++++++++++++ ...e_google_logging_folder_bucket_config.json | 41 ++++ ...ple_google_logging_folder_bucket_config.tf | 29 +++ ...le_logging_organization_bucket_config.json | 26 +++ ...ogle_logging_organization_bucket_config.tf | 24 +++ 8 files changed, 484 insertions(+), 4 deletions(-) create mode 100644 mmv1/third_party/tgc/logging_folder_bucket_config.go create mode 100644 mmv1/third_party/tgc/logging_organization_bucket_config.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.tf create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 02f693eaf220..150b1c80a1ce 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -328,7 +328,11 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/appengine_application.go', 'third_party/tgc/appengine_application.go'], ['converters/google/resources/apikeys_key.go', - 'third_party/tgc/apikeys_key.go'] + 'third_party/tgc/apikeys_key.go'], + ['converters/google/resources/logging_folder_bucket_config.go', + 'third_party/tgc/logging_folder_bucket_config.go'], + ['converters/google/resources/logging_organization_bucket_config.go', + 'third_party/tgc/logging_organization_bucket_config.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index bebbed41d9e5..fd999cc3a5ad 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -101,9 +101,9 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_spanner_instance_iam_binding": {spanner.ResourceConverterSpannerInstanceIamBinding()}, "google_spanner_instance_iam_member": {spanner.ResourceConverterSpannerInstanceIamMember()}, "google_project_service": {resourceConverterServiceUsage()}, - "google_secret_manager_secret_version": {secretmanager.ResourceConverterSecretManagerSecretVersion()}, + "google_secret_manager_secret_version": {secretmanager.ResourceConverterSecretManagerSecretVersion()}, "google_pubsub_lite_reservation": {pubsublite.ResourceConverterPubsubLiteReservation()}, - "google_pubsub_lite_subscription": {pubsublite.ResourceConverterPubsubLiteSubscription()}, + "google_pubsub_lite_subscription": {pubsublite.ResourceConverterPubsubLiteSubscription()}, "google_pubsub_lite_topic": {pubsublite.ResourceConverterPubsubLiteTopic()}, "google_pubsub_schema": {pubsub.ResourceConverterPubsubSchema()}, "google_pubsub_subscription": {pubsub.ResourceConverterPubsubSubscription()}, @@ -114,7 +114,9 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_storage_bucket_iam_binding": {resourceConverterStorageBucketIamBinding()}, "google_storage_bucket_iam_member": {resourceConverterStorageBucketIamMember()}, "google_compute_node_group": {compute.ResourceConverterComputeNodeGroup()}, - "google_cloud_tasks_queue": {cloudtasks.ResourceConverterCloudTasksQueue()}, + "google_logging_folder_bucket_config": {resourceConverterLogFolderBucket()}, + "google_logging_organization_bucket_config": {resourceConverterLogOrganizationBucket()}, + "google_cloud_tasks_queue": {cloudtasks.ResourceConverterCloudTasksQueue()}, "google_pubsub_topic": {pubsub.ResourceConverterPubsubTopic()}, "google_kms_crypto_key": {kms.ResourceConverterKMSCryptoKey()}, "google_kms_key_ring": {kms.ResourceConverterKMSKeyRing()}, diff --git a/mmv1/third_party/tgc/logging_folder_bucket_config.go b/mmv1/third_party/tgc/logging_folder_bucket_config.go new file mode 100644 index 000000000000..564fc2555c9c --- /dev/null +++ b/mmv1/third_party/tgc/logging_folder_bucket_config.go @@ -0,0 +1,179 @@ +package google + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const logFolderBucketAssetType string = "logging.googleapis.com/LogBucket" + +func resourceConverterLogFolderBucket() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: logFolderBucketAssetType, + Convert: GetLogFolderBucketCaiObject, + } +} + +func GetLogFolderBucketCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//logging.googleapis.com/projects/{{project}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetLogFolderBucketApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: logFolderBucketAssetType, + Resource: &cai.AssetResource{ + Version: "v2", + DiscoveryDocumentURI: "https://logging.googleapis.com/$discovery/rest?version=v2", + DiscoveryName: "LogBucket", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetLogFolderBucketApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + folderProp, err := expandLogFolderBucketFolderId(d.Get("folder"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("folder"); !tpgresource.IsEmptyValue(reflect.ValueOf(folderProp)) && (ok || !reflect.DeepEqual(v, folderProp)) { + obj["id"] = folderProp + } + + nameProp, err := expandLogFolderBucketName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + bucketIdProp, err := expandLogFolderBucketBucketId(d.Get("bucket_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("bucket_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketIdProp)) && (ok || !reflect.DeepEqual(v, bucketIdProp)) { + obj["bucketId"] = bucketIdProp + } + + locationProp, err := expandLogFolderBucketLocation(d.Get("location"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + + descriptionProp, err := expandLogFolderBucketDescription(d.Get("description"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + retentionDaysProp, err := expandLogFolderBucketRetentionDays(d.Get("retention_days"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("retention_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionDaysProp)) && (ok || !reflect.DeepEqual(v, retentionDaysProp)) { + obj["retentionDays"] = retentionDaysProp + } + + indexConfigsProp, err := expandLogFolderBucketIndexConfigs(d.Get("index_configs"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("index_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexConfigsProp)) && (ok || !reflect.DeepEqual(v, indexConfigsProp)) { + obj["indexConfigs"] = indexConfigsProp + } + + lifecycleStateProp, err := expandLogFolderBucketLifecycleState(d.Get("lifecycle_state"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("lifecycle_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifecycleStateProp)) && (ok || !reflect.DeepEqual(v, lifecycleStateProp)) { + obj["lifecycleState"] = lifecycleStateProp + } + + return obj, nil +} + +func expandLogFolderBucketFolderId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v, err := tpgresource.ReplaceVars(d, config, "folders/{{folder}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return nil, err + } + + return v, nil +} + +func expandLogFolderBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketLifecycleState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketIndexConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFieldPath, err := expandLogFolderBucketFieldPath(original["field_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fieldPath"] = transformedFieldPath + } + + transformedType, err := expandLogFolderBucketType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandLogFolderBucketType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketFieldPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketRetentionDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketBucketId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogFolderBucketFolder(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/logging_organization_bucket_config.go b/mmv1/third_party/tgc/logging_organization_bucket_config.go new file mode 100644 index 000000000000..c617d961665d --- /dev/null +++ b/mmv1/third_party/tgc/logging_organization_bucket_config.go @@ -0,0 +1,175 @@ +package google + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const logOrganizationBucketAssetType string = "logging.googleapis.com/LogBucket" + +func resourceConverterLogOrganizationBucket() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: logOrganizationBucketAssetType, + Convert: GetLogOrganizationBucketCaiObject, + } +} + +func GetLogOrganizationBucketCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//logging.googleapis.com/projects/{{project}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetLogOrganizationBucketApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: logOrganizationBucketAssetType, + Resource: &cai.AssetResource{ + Version: "v2", + DiscoveryDocumentURI: "https://logging.googleapis.com/$discovery/rest?version=v2", + DiscoveryName: "LogBucket", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetLogOrganizationBucketApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + organizationProp, err := expandLogOrganizationBucketOrganizationId(d.Get("organization"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("organization"); !tpgresource.IsEmptyValue(reflect.ValueOf(organizationProp)) && (ok || !reflect.DeepEqual(v, organizationProp)) { + obj["id"] = organizationProp + } + + nameProp, err := expandLogOrganizationBucketName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + bucketIdProp, err := expandLogOrganizationBucketBucketId(d.Get("bucket_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("bucket_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketIdProp)) && (ok || !reflect.DeepEqual(v, bucketIdProp)) { + obj["bucketId"] = bucketIdProp + } + + locationProp, err := expandLogOrganizationBucketLocation(d.Get("location"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + + descriptionProp, err := expandLogOrganizationBucketDescription(d.Get("description"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + retentionDaysProp, err := expandLogOrganizationBucketRetentionDays(d.Get("retention_days"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("retention_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionDaysProp)) && (ok || !reflect.DeepEqual(v, retentionDaysProp)) { + obj["retentionDays"] = retentionDaysProp + } + + indexConfigsProp, err := expandLogOrganizationBucketIndexConfigs(d.Get("index_configs"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("index_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexConfigsProp)) && (ok || !reflect.DeepEqual(v, indexConfigsProp)) { + obj["indexConfigs"] = indexConfigsProp + } + + lifecycleStateProp, err := expandLogOrganizationBucketLifecycleState(d.Get("lifecycle_state"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("lifecycle_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifecycleStateProp)) && (ok || !reflect.DeepEqual(v, lifecycleStateProp)) { + obj["lifecycleState"] = lifecycleStateProp + } + + return obj, nil +} + +func expandLogOrganizationBucketOrganizationId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v, err := tpgresource.ReplaceVars(d, config, "organizations/{{organization}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return nil, err + } + + return v, nil +} + +func expandLogOrganizationBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketLifecycleState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketIndexConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFieldPath, err := expandLogOrganizationBucketFieldPath(original["field_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fieldPath"] = transformedFieldPath + } + + transformedType, err := expandLogOrganizationBucketType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandLogOrganizationBucketType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketFieldPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketRetentionDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogOrganizationBucketBucketId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.json b/mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.json new file mode 100644 index 000000000000..a099bb75a681 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.json @@ -0,0 +1,41 @@ +[ + { + "name": "//cloudresourcemanager.googleapis.com/folders/placeholder-1mV6asRD", + "asset_type": "cloudresourcemanager.googleapis.com/Folder", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/compute/v1/rest", + "discovery_name": "Folder", + "parent": "//cloudresourcemanager.googleapis.com/organizations/{{.OrgID}}", + "data": { + "display_name": "some-folder-name", + "parent": "organizations/{{.OrgID}}" + } + }, + "ancestry_path": "organization/{{.OrgID}}" + }, + { + "name": "//logging.googleapis.com/projects/{{.Provider.project}}/locations/global/buckets/_Default", + "asset_type": "logging.googleapis.com/LogBucket", + "resource": { + "version": "v2", + "discovery_document_uri": "https://logging.googleapis.com/$discovery/rest?version=v2", + "discovery_name": "LogBucket", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "bucketId": "_Default", + "id": "folders//locations/global/buckets/_Default", + "indexConfigs": [ + { + "fieldPath": "jsonPayload.request.status", + "type": "INDEX_TYPE_STRING" + } + ], + "location": "global", + "retentionDays": 30 + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.tf b/mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.tf new file mode 100644 index 000000000000..ef2e64b836b1 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_folder_bucket_config.tf @@ -0,0 +1,29 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_folder" "default" { + display_name = "some-folder-name" + parent = "organizations/{{.OrgID}}" +} + +resource "google_logging_folder_bucket_config" "basic" { + folder = google_folder.default.name + location = "global" + retention_days = 30 + bucket_id = "_Default" + + index_configs { + field_path = "jsonPayload.request.status" + type = "INDEX_TYPE_STRING" + } +} \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.json b/mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.json new file mode 100644 index 000000000000..481d8b955d87 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.json @@ -0,0 +1,26 @@ +[ + { + "name": "//logging.googleapis.com/projects/{{.Provider.project}}/locations/global/buckets/_Default", + "asset_type": "logging.googleapis.com/LogBucket", + "resource": { + "version": "v2", + "discovery_document_uri": "https://logging.googleapis.com/$discovery/rest?version=v2", + "discovery_name": "LogBucket", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "bucketId": "_Default", + "id": "organizations/{{.OrgID}}/locations/global/buckets/_Default", + "indexConfigs": [ + { + "fieldPath": "jsonPayload.request.status", + "type": "INDEX_TYPE_STRING" + } + ], + "location": "global", + "retentionDays": 30 + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.tf b/mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.tf new file mode 100644 index 000000000000..1703b72d9451 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_organization_bucket_config.tf @@ -0,0 +1,24 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_logging_organization_bucket_config" "basic" { + organization = "12345" + location = "global" + retention_days = 30 + bucket_id = "_Default" + + index_configs { + field_path = "jsonPayload.request.status" + type = "INDEX_TYPE_STRING" + } +} \ No newline at end of file From 0b5921ecf8f3f313e470cfd0f60a1bff74973909 Mon Sep 17 00:00:00 2001 From: Salome Papiashvili Date: Thu, 20 Jun 2024 17:55:16 +0200 Subject: [PATCH 170/356] Updates in the documentation of google_composer_environment (#11006) --- .../docs/r/composer_environment.html.markdown | 33 ++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown b/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown index 330f9e76b800..31735da2cccb 100644 --- a/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/composer_environment.html.markdown @@ -47,6 +47,11 @@ will not be able to find or manage many of these underlying resources automatica resource "google_composer_environment" "test" { name = "example-composer-env" region = "us-central1" + config { + software_config { + image_version = "composer-1-airflow-2" + } + } } ``` @@ -415,14 +420,40 @@ The following arguments are supported: They cannot specify Apache Airflow software configuration overrides (they cannot match the regular expression `AIRFLOW__[A-Z0-9_]+__[A-Z0-9_]+`), and they cannot match any of the following reserved names: ``` + AIRFLOW_DATABASE_VERSION AIRFLOW_HOME - C_FORCE_ROOT + AIRFLOW_SRC_DIR + AIRFLOW_WEBSERVER + AUTO_GKE + CLOUDSDK_METRICS_ENVIRONMENT + CLOUD_LOGGING_ONLY + COMPOSER_ENVIRONMENT + COMPOSER_GKE_LOCATION + COMPOSER_GKE_NAME + COMPOSER_GKE_ZONE + COMPOSER_LOCATION + COMPOSER_OPERATION_UUID + COMPOSER_PYTHON_VERSION + COMPOSER_VERSION CONTAINER_NAME + C_FORCE_ROOT DAGS_FOLDER GCP_PROJECT + GCP_TENANT_PROJECT + GCSFUSE_EXTRACTED GCS_BUCKET GKE_CLUSTER_NAME + GKE_IN_TENANT + GOOGLE_APPLICATION_CREDENTIALS + MAJOR_VERSION + MINOR_VERSION + PATH + PIP_DISABLE_PIP_VERSION_CHECK + PORT + PROJECT_ID + PYTHONPYCACHEPREFIX SQL_DATABASE + SQL_HOST SQL_INSTANCE SQL_PASSWORD SQL_PROJECT From 0580617eab696140a6af28763ac2575db33dca9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Barbosa=20Sampaio?= Date: Thu, 20 Jun 2024 11:56:26 -0400 Subject: [PATCH 171/356] Bigtable: Adds ignore_warning to gc policy resource (#10982) --- .../bigtable/resource_bigtable_gc_policy.go | 17 +++- .../resource_bigtable_gc_policy_test.go | 80 +++++++++++++++++++ .../docs/r/bigtable_gc_policy.html.markdown | 3 + 3 files changed, 99 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy.go index 28a54ca82abf..c9d710371664 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy.go @@ -216,6 +216,16 @@ func ResourceBigtableGCPolicy() *schema.Resource { in a replicated instance. Possible values are: "ABANDON".`, ValidateFunc: validation.StringInSlice([]string{"ABANDON", ""}, false), }, + + "ignore_warnings": { + Type: schema.TypeBool, + Optional: true, + Description: `Allows ignoring warnings when updating the GC policy. This can be used + to increase the gc policy on replicated clusters. Doing this may make clusters be + inconsistent for a longer period of time, before using this make sure you understand + the risks listed at https://cloud.google.com/bigtable/docs/garbage-collection#increasing`, + Default: false, + }, }, UseJSONNumber: true, } @@ -253,9 +263,14 @@ func resourceBigtableGCPolicyUpsert(d *schema.ResourceData, meta interface{}) er tableName := d.Get("table").(string) columnFamily := d.Get("column_family").(string) + ignoreWarnings := d.Get("ignore_warnings").(bool) + updateOpts := []bigtable.GCPolicyOption{} + if ignoreWarnings { + updateOpts = append(updateOpts, bigtable.IgnoreWarnings()) + } retryFunc := func() error { - reqErr := c.SetGCPolicy(ctx, tableName, columnFamily, gcPolicy) + reqErr := c.SetGCPolicyWithOptions(ctx, tableName, columnFamily, gcPolicy, updateOpts...) return reqErr } // The default create timeout is 20 minutes. diff --git a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go index 28a834acd86a..6176e4bbd64f 100644 --- a/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go +++ b/mmv1/third_party/terraform/services/bigtable/resource_bigtable_gc_policy_test.go @@ -39,6 +39,43 @@ func TestAccBigtableGCPolicy_basic(t *testing.T) { }) } +func TestAccBigtableGCPolicy_ignoreWarnings(t *testing.T) { + // bigtable instance does not use the shared HTTP client, this test creates an instance + acctest.SkipIfVcr(t) + t.Parallel() + + instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + tableName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + familyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cluster1Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cluster2Name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + gcRulesOriginal := `{"rules":[{"max_age":"10h"}]}` + gcRulesNew := `{"rules":[{"max_age":"12h"}]}` + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckBigtableGCPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccBigtableGCPolicyIgnoreWarning(instanceName, tableName, familyName, cluster1Name, cluster2Name, gcRulesOriginal, false), + Check: resource.ComposeTestCheckFunc( + testAccBigtableGCPolicyExists(t, "google_bigtable_gc_policy.policy", true), + resource.TestCheckResourceAttr("google_bigtable_gc_policy.policy", "gc_rules", gcRulesOriginal), + ), + }, + { + Config: testAccBigtableGCPolicyIgnoreWarning(instanceName, tableName, familyName, cluster1Name, cluster2Name, gcRulesNew, true), + Check: resource.ComposeTestCheckFunc( + testAccBigtableGCPolicyExists(t, "google_bigtable_gc_policy.policy", true), + resource.TestCheckResourceAttr("google_bigtable_gc_policy.policy", "gc_rules", gcRulesNew), + ), + }, + }, + }) +} + func TestAccBigtableGCPolicy_abandoned(t *testing.T) { // bigtable instance does not use the shared HTTP client, this test creates an instance acctest.SkipIfVcr(t) @@ -563,6 +600,49 @@ resource "google_bigtable_gc_policy" "policy" { `, instanceName, instanceName, tableName, family, family) } +func testAccBigtableGCPolicyIgnoreWarning(instanceName, tableName, family string, cluster1 string, cluster2 string, gcRule string, ignoreWarnings bool) string { + return fmt.Sprintf(` +resource "google_bigtable_instance" "instance" { + name = "%s" + + cluster { + cluster_id = "%s" + num_nodes = 1 + zone = "us-central1-b" + } + + cluster { + cluster_id = "%s" + num_nodes = 1 + zone = "us-central1-c" + } + + deletion_protection = false +} + +resource "google_bigtable_table" "table" { + name = "%s" + instance_name = google_bigtable_instance.instance.id + + column_family { + family = "%s" + } +} + +resource "google_bigtable_gc_policy" "policy" { + instance_name = google_bigtable_instance.instance.id + table = google_bigtable_table.table.name + column_family = "%s" + gc_rules = < Date: Thu, 20 Jun 2024 21:45:24 +0530 Subject: [PATCH 172/356] add update support to big_query field on google_vertex_ai_feature_group resource (#10974) --- mmv1/products/vertexai/FeatureGroup.yaml | 4 +++- .../resource_vertex_ai_feature_group_test.go | 14 +++++++++++++- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/mmv1/products/vertexai/FeatureGroup.yaml b/mmv1/products/vertexai/FeatureGroup.yaml index 22ca5e4df9d9..635a97f554db 100644 --- a/mmv1/products/vertexai/FeatureGroup.yaml +++ b/mmv1/products/vertexai/FeatureGroup.yaml @@ -90,6 +90,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: bigQuery description: Indicates that features for this group come from BigQuery Table/View. By default treats the source as a sparse time series source, which is required to have an entityId and a feature_timestamp column in the source. + update_mask_fields: + - 'bigQuery.entityIdColumns' properties: - !ruby/object:Api::Type::NestedObject name: bigQuerySource @@ -103,5 +105,5 @@ properties: description: 'BigQuery URI to a table, up to 2000 characters long. For example: `bq://projectId.bqDatasetId.bqTableId.`' - !ruby/object:Api::Type::Array name: entityIdColumns - description: Columns to construct entityId / row keys. Currently only supports 1 entity_id_column. If not provided defaults to entityId. + description: Columns to construct entityId / row keys. If not provided defaults to entityId. item_type: Api::Type::String diff --git a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_group_test.go b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_group_test.go index a31bb11f5bee..22db16aef282 100644 --- a/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_group_test.go +++ b/mmv1/third_party/terraform/services/vertexai/resource_vertex_ai_feature_group_test.go @@ -78,6 +78,12 @@ resource "google_bigquery_table" "sample_table" { "type": "STRING", "mode": "NULLABLE" }, + { + "name": "test_entity_column", + "type": "STRING", + "mode": "NULLABLE", + "description": "test secondary entity column" + }, { "name": "feature_timestamp", "type": "TIMESTAMP", @@ -103,7 +109,7 @@ func testAccVertexAIFeatureGroup_updated(context map[string]interface{}) string big_query_source { input_uri = "bq://${google_bigquery_table.sample_table.project}.${google_bigquery_table.sample_table.dataset_id}.${google_bigquery_table.sample_table.table_id}" } - entity_id_columns = ["feature_id"] + entity_id_columns = ["feature_id","test_entity_column"] } } @@ -126,6 +132,12 @@ resource "google_bigquery_table" "sample_table" { "type": "STRING", "mode": "NULLABLE" }, + { + "name": "test_entity_column", + "type": "STRING", + "mode": "NULLABLE", + "description": "test secondary entity column" + }, { "name": "feature_timestamp", "type": "TIMESTAMP", From 4d6a60e9882b2a50770ac510e2be0080f75c04e8 Mon Sep 17 00:00:00 2001 From: Shingo Furuyama Date: Fri, 21 Jun 2024 01:32:59 +0900 Subject: [PATCH 173/356] compute: promote `google_compute_network_attachment` GA (#10966) --- mmv1/products/compute/Instance.yaml | 1 - mmv1/products/compute/NetworkAttachment.yaml | 1 - .../terraform/examples/network_attachment_basic.tf.erb | 5 ----- .../examples/network_attachment_instance_usage.tf.erb | 4 ---- 4 files changed, 11 deletions(-) diff --git a/mmv1/products/compute/Instance.yaml b/mmv1/products/compute/Instance.yaml index e3912dac2c84..d316af142a77 100644 --- a/mmv1/products/compute/Instance.yaml +++ b/mmv1/products/compute/Instance.yaml @@ -526,7 +526,6 @@ properties: - !ruby/object:Api::Type::ResourceRef name: 'networkAttachment' resource: 'networkAttachment' - min_version: beta imports: 'selfLink' description: | The URL of the network attachment that this interface should connect to in the following format: diff --git a/mmv1/products/compute/NetworkAttachment.yaml b/mmv1/products/compute/NetworkAttachment.yaml index 95eb9d46418a..dcfe283e0ca6 100644 --- a/mmv1/products/compute/NetworkAttachment.yaml +++ b/mmv1/products/compute/NetworkAttachment.yaml @@ -13,7 +13,6 @@ --- !ruby/object:Api::Resource name: 'NetworkAttachment' -min_version: beta kind: 'compute#networkAttachment' description: | A network attachment is a resource that lets a producer Virtual Private Cloud (VPC) network initiate connections to a consumer VPC network through a Private Service Connect interface. diff --git a/mmv1/templates/terraform/examples/network_attachment_basic.tf.erb b/mmv1/templates/terraform/examples/network_attachment_basic.tf.erb index a0196c7a4b3f..3fe14b29adfe 100644 --- a/mmv1/templates/terraform/examples/network_attachment_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_attachment_basic.tf.erb @@ -1,5 +1,4 @@ resource "google_compute_network_attachment" "default" { - provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" region = "us-central1" description = "basic network attachment description" @@ -19,13 +18,11 @@ resource "google_compute_network_attachment" "default" { } resource "google_compute_network" "default" { - provider = google-beta name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = google-beta name = "<%= ctx[:vars]['subnetwork_name'] %>" region = "us-central1" @@ -34,7 +31,6 @@ resource "google_compute_subnetwork" "default" { } resource "google_project" "rejected_producer_project" { - provider = google-beta project_id = "<%= ctx[:vars]['rejected_producer_project_name'] %>" name = "<%= ctx[:vars]['rejected_producer_project_name'] %>" org_id = "<%= ctx[:test_env_vars]['org_id'] %>" @@ -42,7 +38,6 @@ resource "google_project" "rejected_producer_project" { } resource "google_project" "accepted_producer_project" { - provider = google-beta project_id = "<%= ctx[:vars]['accepted_producer_project_name'] %>" name = "<%= ctx[:vars]['accepted_producer_project_name'] %>" org_id = "<%= ctx[:test_env_vars]['org_id'] %>" diff --git a/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb b/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb index 6e5963d781c8..b1696a1201fe 100644 --- a/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb +++ b/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb @@ -1,11 +1,9 @@ resource "google_compute_network" "default" { - provider = google-beta name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = google-beta name = "<%= ctx[:vars]['subnetwork_name'] %>" region = "us-central1" @@ -14,7 +12,6 @@ resource "google_compute_subnetwork" "default" { } resource "google_compute_network_attachment" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" region = "us-central1" description = "my basic network attachment" @@ -24,7 +21,6 @@ resource "google_compute_network_attachment" "<%= ctx[:primary_resource_id] %>" } resource "google_compute_instance" "default" { - provider = google-beta name = "<%= ctx[:vars]['instance_name'] %>" zone = "us-central1-a" machine_type = "e2-micro" From 90c9bfac6055d14ef2d33578147d5cb47fa96286 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 20 Jun 2024 09:35:23 -0700 Subject: [PATCH 174/356] Remove labels custom_diff functions when converting yaml files (#10998) --- mmv1/products/compute/go_Disk.yaml | 1 - mmv1/products/compute/go_Firewall.yaml | 1 + mmv1/products/compute/go_ForwardingRule.yaml | 1 - mmv1/products/compute/go_RegionDisk.yaml | 1 - mmv1/products/datafusion/go_Instance.yaml | 2 +- mmv1/templates/terraform/yaml_conversion.erb | 9 +++++++-- 6 files changed, 9 insertions(+), 6 deletions(-) diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml index 1a1b1d207055..9b0f045ee0fe 100644 --- a/mmv1/products/compute/go_Disk.yaml +++ b/mmv1/products/compute/go_Disk.yaml @@ -72,7 +72,6 @@ custom_code: custom_diff: - 'customdiff.ForceNewIfChange("size", IsDiskShrinkage)' - 'hyperDiskIopsUpdateDiffSupress' - - 'tpgresource.SetLabelsDiff' examples: - name: 'disk_basic' primary_resource_id: 'default' diff --git a/mmv1/products/compute/go_Firewall.yaml b/mmv1/products/compute/go_Firewall.yaml index 8d820debf8d2..d67f8e1166ab 100644 --- a/mmv1/products/compute/go_Firewall.yaml +++ b/mmv1/products/compute/go_Firewall.yaml @@ -161,6 +161,7 @@ properties: description: | An optional description of this resource. Provide this property when you create the resource. + send_empty_value: true - name: 'destinationRanges' type: Array description: | diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index ef174df9b6f4..93399233a2fe 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -50,7 +50,6 @@ custom_code: post_create: 'templates/terraform/post_create/go/labels.tmpl' custom_diff: - 'forwardingRuleCustomizeDiff' - - 'tpgresource.SetLabelsDiff' legacy_long_form_project: true examples: - name: 'internal_http_lb_with_mig_backend' diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml index eb9eb2ec6181..3a4d7e04b8af 100644 --- a/mmv1/products/compute/go_RegionDisk.yaml +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -70,7 +70,6 @@ custom_code: custom_diff: - 'customdiff.ForceNewIfChange("size", IsDiskShrinkage)' - 'hyperDiskIopsUpdateDiffSupress' - - 'tpgresource.SetLabelsDiff' examples: - name: 'region_disk_basic' primary_resource_id: 'regiondisk' diff --git a/mmv1/products/datafusion/go_Instance.yaml b/mmv1/products/datafusion/go_Instance.yaml index fd14261d37be..f76d7bc4950f 100644 --- a/mmv1/products/datafusion/go_Instance.yaml +++ b/mmv1/products/datafusion/go_Instance.yaml @@ -305,6 +305,7 @@ properties: If accelerators are enabled it is possible a permadiff will be created with the Options field. Users will need to either manually update their state file to include these diffed options, or include the field in a [lifecycle ignore changes block](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#ignore_changes). item_type: + type: NestedObject properties: - name: 'acceleratorType' type: Enum @@ -323,4 +324,3 @@ properties: enum_values: - 'ENABLED' - 'DISABLED' - type: NestedObject diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 6cf23b2f7a84..590a2de8cc22 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -398,9 +398,14 @@ custom_code: test_check_destroy: '<%= object.convert_go_file( object.custom_code.test_check_destroy )%>' <% end -%> <% end -%> -<% unless object.custom_diff.empty? || (object.custom_diff.size == 1 && object.custom_diff.include?("tpgresource.SetLabelsDiff")) -%> +<% +custom_diff = object.custom_diff.reject { + |cdiff| cdiff == "tpgresource.SetLabelsDiff" || cdiff == "tpgresource.SetMetadataLabelsDiff" || cdiff == "tpgresource.SetAnnotationsDiff" || cdiff == "tpgresource.SetMetadataAnnotationsDiff" +} +-%> +<% unless custom_diff.empty? -%> custom_diff: -<% object.custom_diff.each do |cdiff| -%> +<% custom_diff.each do |cdiff| -%> - '<%= cdiff %>' <% end -%> <% end -%> From a31fba6699f082304231e48cc26db4ef621ebebd Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 20 Jun 2024 17:52:38 +0100 Subject: [PATCH 175/356] Make sweeper for google_vmwareengine_network handwritten, fix how it accesses data from the list operation, add looping through locations (#10994) --- mmv1/products/vmwareengine/Network.yaml | 4 + .../resource_vmwareengine_network_sweeper.go | 132 ++++++++++++++++++ 2 files changed, 136 insertions(+) create mode 100644 mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_network_sweeper.go diff --git a/mmv1/products/vmwareengine/Network.yaml b/mmv1/products/vmwareengine/Network.yaml index bc74de06cb59..196ef5e6cd2b 100644 --- a/mmv1/products/vmwareengine/Network.yaml +++ b/mmv1/products/vmwareengine/Network.yaml @@ -43,6 +43,10 @@ async: !ruby/object:Api::OpAsync import_format: ["projects/{{project}}/locations/{{location}}/vmwareEngineNetworks/{{name}}"] autogen_async: true + +# There is a handwritten sweeper that provides a list of locations to sweep +skip_sweeper: true + examples: - !ruby/object:Provider::Terraform::Examples name: "vmware_engine_network_standard" diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_network_sweeper.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_network_sweeper.go new file mode 100644 index 000000000000..8c5140376384 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_network_sweeper.go @@ -0,0 +1,132 @@ +package vmwareengine + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VmwareengineNetwork", testSweepVmwareengineNetwork) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVmwareengineNetwork(region string) error { + resourceName := "VmwareengineNetwork" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // List of location values includes: + // * global location + // * regions used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "global", "southamerica-west1", "me-west1"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + log.Printf("[INFO][SWEEPER_LOG] Beginning the process of sweeping location '%s'.", location) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/vmwareEngineNetworks", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue + } + + resourceList, ok := res["vmwareEngineNetworks"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/vmwareEngineNetworks/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + } + + return nil +} From d5b8fc2dd6490cc27bc6d5a1cf8584cffa3997e5 Mon Sep 17 00:00:00 2001 From: Riley Karson Date: Thu, 20 Jun 2024 09:57:15 -0700 Subject: [PATCH 176/356] Use `id` in getting started guide (#10987) --- .../docs/guides/getting_started.html.markdown | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/guides/getting_started.html.markdown b/mmv1/third_party/terraform/website/docs/guides/getting_started.html.markdown index 8ecb74a85217..346d72b6c7d8 100644 --- a/mmv1/third_party/terraform/website/docs/guides/getting_started.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/getting_started.html.markdown @@ -100,23 +100,23 @@ are used as a short way to identify resources, and a resource's display name in the Cloud Console will be the one defined in the `name` field. When linking resources in a Terraform config though, you'll primarily want to -use a different field, the `self_link` of a resource. Like `name`, nearly every -resource has a `self_link`. They look like: +use a different field, the `id` of a resource. Every Terraform resource has an +`id`. In the Google provider they generally look like: ``` -{{API base url}}/projects/{{your project}}/{{location type}}/{{location}}/{{resource type}}/{{name}} +projects/{{your project}}/{{location type}}/{{location}}/{{resource type}}/{{name}} ``` For example, the instance defined earlier in a project named `foo` will have -the `self_link`: +the `id`: ``` -https://www.googleapis.com/compute/v1/projects/foo/zones/us-central1-c/instances/terraform-instance +projects/foo/zones/us-central1-c/instances/terraform-instance ``` -A resource's `self_link` is a unique reference to that resource. When +A resource's `id` is a unique reference to that resource. When linking two resources in Terraform, you can use Terraform interpolation to -avoid typing out the self link! Let's use a `google_compute_network` to +avoid typing out the id! Let's use a `google_compute_network` to demonstrate. Add this block to your config: @@ -136,7 +136,7 @@ with a subnetwork in each region. Next, change the network of the network_interface { - # A default network is created for all GCP projects - network = "default" -+ network = google_compute_network.vpc_network.self_link ++ network = google_compute_network.vpc_network.id access_config { ``` @@ -211,7 +211,7 @@ resource "google_compute_instance" "vm_instance" { network_interface { # A default network is created for all GCP projects - network = google_compute_network.vpc_network.self_link + network = google_compute_network.vpc_network.id access_config { } } @@ -237,7 +237,7 @@ a virtual machine on Google Cloud Platform. The key concepts unique to GCP are: * and how to use a default `project` in your provider * What a resource being global, regional, or zonal means on GCP * and how to specify a default `region` and `zone` -* How GCP uses `name` and `self_link` to identify resources +* How GCP uses `name` and `id` to identify resources * How to add GCP service account credentials to Terraform Run `terraform destroy` to tear down your resources. From 12c48c9980c5001d2cae723de54485a324bd990c Mon Sep 17 00:00:00 2001 From: Aleksandr Averbukh Date: Thu, 20 Jun 2024 19:32:07 +0200 Subject: [PATCH 177/356] Fix resource_compute_shared_reservation_update encoder (#11007) --- mmv1/products/compute/Reservation.yaml | 1 + .../update_encoder/go/reservation.go.tmpl | 33 ++++++++++++++----- .../update_encoder/reservation.go.erb | 33 ++++++++++++++----- ..._compute_shared_reservation_update_test.go | 1 - 4 files changed, 51 insertions(+), 17 deletions(-) diff --git a/mmv1/products/compute/Reservation.yaml b/mmv1/products/compute/Reservation.yaml index 55285cfcbe91..eee584be87b5 100644 --- a/mmv1/products/compute/Reservation.yaml +++ b/mmv1/products/compute/Reservation.yaml @@ -141,6 +141,7 @@ properties: description: | Type of sharing for this shared-reservation default_from_api: true + immutable: true - !ruby/object:Api::Type::Map name: 'projectMap' description: | diff --git a/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl b/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl index 1d26fc9900fe..0aa16a8869a7 100644 --- a/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl +++ b/mmv1/templates/terraform/update_encoder/go/reservation.go.tmpl @@ -37,10 +37,27 @@ // Set project_map. projectMap := make(map[string]interface{}) old, new := d.GetChange("share_settings") - oldMap := old.([]interface{})[0].(map[string]interface{})["project_map"] - newMap := new.([]interface{})[0].(map[string]interface{})["project_map"] - before := oldMap.(*schema.Set) - after := newMap.(*schema.Set) + + var before *schema.Set + if oldSlice, ok := old.([]interface{}); ok && len(oldSlice) > 0 { + if oldMap, ok := oldSlice[0].(map[string]interface{})["project_map"]; ok { + before = oldMap.(*schema.Set) + } else { + before = schema.NewSet(schema.HashString, []interface{}{}) + } + } else { + before = schema.NewSet(schema.HashString, []interface{}{}) + } + var after *schema.Set + if newSlice, ok := new.([]interface{}); ok && len(newSlice) > 0 { + if newMap, ok := newSlice[0].(map[string]interface{})["project_map"]; ok { + after = newMap.(*schema.Set) + } else { + after = schema.NewSet(schema.HashString, []interface{}{}) + } + } else { + after = schema.NewSet(schema.HashString, []interface{}{}) + } for _, raw := range after.Difference(before).List() { original := raw.(map[string]interface{}) @@ -56,10 +73,10 @@ } projectMap[transformedId] = singleProject // add added projects to updateMask - if firstProject != true { - maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["project_id"]) + if !firstProject { + maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["id"]) } else { - maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["project_id"]) + maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["id"]) firstProject = false } decodedPath, _ := url.QueryUnescape(maskId) @@ -86,7 +103,7 @@ projectNum := project.ProjectNumber projectIdOrNum = fmt.Sprintf("%d", projectNum) } - if firstProject != true { + if !firstProject { maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", projectIdOrNum) } else { maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", projectIdOrNum) diff --git a/mmv1/templates/terraform/update_encoder/reservation.go.erb b/mmv1/templates/terraform/update_encoder/reservation.go.erb index 00d6e64107fa..cebe23bc9435 100644 --- a/mmv1/templates/terraform/update_encoder/reservation.go.erb +++ b/mmv1/templates/terraform/update_encoder/reservation.go.erb @@ -39,10 +39,27 @@ // Set project_map. projectMap := make(map[string]interface{}) old, new := d.GetChange("share_settings") - oldMap := old.([]interface{})[0].(map[string]interface{})["project_map"] - newMap := new.([]interface{})[0].(map[string]interface{})["project_map"] - before := oldMap.(*schema.Set) - after := newMap.(*schema.Set) + + var before *schema.Set + if oldSlice, ok := old.([]interface{}); ok && len(oldSlice) > 0 { + if oldMap, ok := oldSlice[0].(map[string]interface{})["project_map"]; ok { + before = oldMap.(*schema.Set) + } else { + before = schema.NewSet(schema.HashString, []interface{}{}) + } + } else { + before = schema.NewSet(schema.HashString, []interface{}{}) + } + var after *schema.Set + if newSlice, ok := new.([]interface{}); ok && len(newSlice) > 0 { + if newMap, ok := newSlice[0].(map[string]interface{})["project_map"]; ok { + after = newMap.(*schema.Set) + } else { + after = schema.NewSet(schema.HashString, []interface{}{}) + } + } else { + after = schema.NewSet(schema.HashString, []interface{}{}) + } for _, raw := range after.Difference(before).List() { original := raw.(map[string]interface{}) @@ -58,10 +75,10 @@ } projectMap[transformedId] = singleProject // add added projects to updateMask - if firstProject != true { - maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["project_id"]) + if !firstProject { + maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", original["id"]) } else { - maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["project_id"]) + maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", original["id"]) firstProject = false } decodedPath, _ := url.QueryUnescape(maskId) @@ -88,7 +105,7 @@ projectNum := project.ProjectNumber projectIdOrNum = fmt.Sprintf("%d", projectNum) } - if firstProject != true { + if !firstProject { maskId = fmt.Sprintf("%s%s", "&paths=shareSettings.projectMap.", projectIdOrNum) } else { maskId = fmt.Sprintf("%s%s", "?paths=shareSettings.projectMap.", projectIdOrNum) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_shared_reservation_update_test.go b/mmv1/third_party/terraform/services/compute/resource_compute_shared_reservation_update_test.go index 21685e85f191..e28f8d3def33 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_shared_reservation_update_test.go +++ b/mmv1/third_party/terraform/services/compute/resource_compute_shared_reservation_update_test.go @@ -9,7 +9,6 @@ import ( ) func TestAccComputeSharedReservation_update(t *testing.T) { - acctest.SkipIfVcr(t) // large number of parallel resources. t.Parallel() context := map[string]interface{}{ From c503a56dfae490edb5db6f8782066e0f8885590f Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 20 Jun 2024 10:36:13 -0700 Subject: [PATCH 178/356] Convert handwritten erb files (part 2) (#10997) --- mmv1/provider/terraform.go | 4 +- mmv1/template-converter.go | 2 +- .../constants/go/datastream_stream.go.tmpl | 2 +- .../terraform/constants/go/router_nat.go.tmpl | 4 +- .../go/active_directory_domain_trust.go.tmpl | 2 +- .../go/monitoring_uptime_check_config.go.tmpl | 4 +- .../privateca_certificate_509_config.go.tmpl | 2 +- .../go/reference_to_backend.tmpl | 4 +- ..._forward_ssh_connectivity_password.go.tmpl | 2 +- ...rward_ssh_connectivity_private_key.go.tmpl | 2 +- ...ion_profile_mysql_profile_password.go.tmpl | 2 +- ..._profile_ssl_config_ca_certificate.go.tmpl | 2 +- ...file_ssl_config_client_certificate.go.tmpl | 2 +- ...ysql_profile_ssl_config_client_key.go.tmpl | 2 +- ...on_profile_oracle_profile_password.go.tmpl | 2 +- ...rofile_postgresql_profile_password.go.tmpl | 2 +- ...rofile_sql_server_profile_password.go.tmpl | 2 +- .../custom_flatten/go/guard_self_link.go.tmpl | 2 +- .../go/guard_self_link_array.go.tmpl | 2 +- .../privateca_certificate_509_config.go.tmpl | 2 +- .../go/region_backend_service.go.tmpl | 2 +- .../encoders/go/bigtable_app_profile.go.tmpl | 4 +- .../templates/terraform/encoders/go/disk.tmpl | 2 +- .../encoders/go/health_check_type.tmpl | 1 - .../go/region_backend_service.go.tmpl | 2 +- .../encoders/go/spanner_database.go.tmpl | 2 +- .../terraform/encoders/health_check_type.erb | 1 - .../go/bigquery_dataset_resource_tags.tf.tmpl | 41 + .../examples/go/tpu_node_basic.tf.tmpl | 4 +- .../examples/go/tpu_node_full.tf.tmpl | 8 +- .../examples/go/tpu_node_full_test.tf.tmpl | 12 +- .../examples/go/vpc_access_connector.tf.tmpl | 2 +- .../vpc_access_connector_shared_vpc.tf.tmpl | 7 +- .../post_update/go/workbench_instance.go.tmpl | 9 +- .../compute_region_network_endpoint.go.tmpl | 2 +- .../pre_update/go/workbench_instance.go.tmpl | 63 +- .../go/resource_compute_firewall_test.go | 34 + .../go/resource_dialogflowcx_agent_test.go | 138 + .../dns/go/data_source_dns_key_test.go | 178 ++ .../data_source_dns_managed_zone_test.go.tmpl | 59 + .../dns/go/data_source_dns_record_set_test.go | 54 + .../go/resource_dns_managed_zone_test.go.tmpl | 663 +++++ .../dns/go/resource_dns_policy_test.go | 72 + .../go/resource_dns_record_set_test.go.tmpl | 1186 ++++++++ ...urce_dns_response_policy_rule_test.go.tmpl | 197 ++ .../resource_dns_response_policy_test.go.tmpl | 210 ++ .../go/resource_eventarc_channel_test.go.tmpl | 207 ++ ...ventarc_google_channel_config_test.go.tmpl | 206 ++ .../go/resource_eventarc_trigger_test.go.tmpl | 239 ++ ...source_google_firebase_android_app.go.tmpl | 49 + ...google_firebase_android_app_config.go.tmpl | 153 + ...e_firebase_android_app_config_test.go.tmpl | 70 + ...e_google_firebase_android_app_test.go.tmpl | 62 + ...a_source_google_firebase_apple_app.go.tmpl | 49 + ...e_google_firebase_apple_app_config.go.tmpl | 153 + ...gle_firebase_apple_app_config_test.go.tmpl | 85 + ...rce_google_firebase_apple_app_test.go.tmpl | 64 + ...ata_source_google_firebase_web_app.go.tmpl | 49 + ...rce_google_firebase_web_app_config.go.tmpl | 205 ++ ...ource_google_firebase_web_app_test.go.tmpl | 58 + ...e_firebase_android_app_update_test.go.tmpl | 85 + ...rce_firebase_apple_app_update_test.go.tmpl | 81 + .../go/resource_firebase_project_test.go.tmpl | 58 + .../go/resource_firebase_web_app_test.go.tmpl | 186 ++ ...e_app_check_app_attest_config_test.go.tmpl | 61 + ...irebase_app_check_debug_token_test.go.tmpl | 87 + ...app_check_device_check_config_test.go.tmpl | 62 + ...p_check_play_integrity_config_test.go.tmpl | 60 + ...k_recaptcha_enterprise_config_test.go.tmpl | 59 + ...app_check_recaptcha_v3_config_test.go.tmpl | 59 + ...base_app_check_service_config_test.go.tmpl | 137 + ...ce_firebase_database_instance_test.go.tmpl | 74 + ..._firebase_extensions_instance_test.go.tmpl | 149 + ...ce_google_firebase_hosting_channel.go.tmpl | 47 + ...ogle_firebase_hosting_channel_test.go.tmpl | 60 + ...urce_firebase_hosting_channel_test.go.tmpl | 196 ++ ...irebase_hosting_custom_domain_test.go.tmpl | 81 + ...esource_firebase_hosting_site_test.go.tmpl | 82 + ...resource_firestore_database_update_test.go | 151 + .../resource_gke_backup_backup_plan_test.go | 366 +++ .../resource_gke_backup_restore_plan_test.go | 207 ++ ...ce_gke_hub_feature_membership_test.go.tmpl | 1438 +++++++++ .../gkehub2/go/iam_gke_hub_feature_test.go | 316 ++ .../go/resource_gke_hub_feature_test.go.tmpl | 974 ++++++ .../go/resource_gke_hub_fleet_test.go.tmpl | 180 ++ .../gkeonprem/go/gkeonprem_operation.go | 145 + ...ource_gkeonprem_bare_metal_cluster_test.go | 575 ++++ ...rce_gkeonprem_bare_metal_node_pool_test.go | 226 ++ .../resource_gkeonprem_vmware_cluster_test.go | 484 +++ ...esource_gkeonprem_vmware_node_pool_test.go | 212 ++ ...esource_healthcare_fhir_store_test.go.tmpl | 232 ++ ...ource_healthcare_hl7_v2_store_test.go.tmpl | 311 ++ .../iam2/go/resource_iam_deny_policy_test.go | 220 ++ ..._source_iam_workload_identity_pool.go.tmpl | 44 + ...am_workload_identity_pool_provider.go.tmpl | 45 + ...rkload_identity_pool_provider_test.go.tmpl | 61 + ...ce_iam_workload_identity_pool_test.go.tmpl | 47 + ...iam_workload_identity_pool_id_test.go.tmpl | 36 + ...oad_identity_pool_provider_id_test.go.tmpl | 36 + ...rkload_identity_pool_provider_test.go.tmpl | 242 ++ ...ce_iam_workload_identity_pool_test.go.tmpl | 101 + ...e_iam_workforce_pool_provider_test.go.tmpl | 621 ++++ .../go/resource_iam_workforce_pool_test.go | 138 + ...m_workforce_pool_workforce_pool_id_test.go | 35 + ...ce_pool_workforce_pool_provider_id_test.go | 33 + ...ource_google_kms_secret_asymmetric.go.tmpl | 156 + ..._google_kms_secret_asymmetric_test.go.tmpl | 159 + .../kms/go/iam_kms_crypto_key.go.tmpl | 110 + .../kms/go/iam_kms_crypto_key_test.go.tmpl | 717 +++++ .../services/kms/go/iam_kms_key_ring.go.tmpl | 129 + .../kms/go/iam_kms_key_ring_test.go.tmpl | 593 ++++ ...esource_managed_kafka_cluster_test.go.tmpl | 110 + .../resource_managed_kafka_topic_test.go.tmpl | 124 + ...e_network_connectivity_hub_sweeper.go.tmpl | 126 + ...network_connectivity_spoke_sweeper.go.tmpl | 126 + ...security_authorization_policy_test.go.tmpl | 83 + ...rk_security_client_tls_policy_test.go.tmpl | 96 + ...firewall_endpoint_association_test.go.tmpl | 214 ++ ...rk_security_firewall_endpoint_test.go.tmpl | 119 + ...rk_security_security_profile_group_test.go | 96 + ..._network_security_security_profile_test.go | 87 + ...rk_security_server_tls_policy_test.go.tmpl | 81 + ...ecurity_tls_inspection_policy_test.go.tmpl | 380 +++ ...work_services_endpoint_policy_test.go.tmpl | 92 + ...e_network_services_grpc_route_test.go.tmpl | 133 + ...e_network_services_http_route_test.go.tmpl | 85 + ...esource_network_services_mesh_test.go.tmpl | 65 + ...work_services_service_binding_test.go.tmpl | 60 + ..._services_service_lb_policies_test.go.tmpl | 78 + ...ce_network_services_tcp_route_test.go.tmpl | 120 + ...ce_network_services_tls_route_test.go.tmpl | 110 + ...esource_notebooks_environment_test.go.tmpl | 46 + ..._notebooks_instance_container_test.go.tmpl | 53 + ...source_notebooks_instance_gpu_test.go.tmpl | 58 + ...urce_notebooks_instance_state_test.go.tmpl | 84 + .../go/resource_notebooks_instance_test.go | 152 + .../go/resource_notebooks_runtime_test.go | 111 + ...ource_org_policy_custom_constraint_test.go | 77 + ...s_config_os_policy_assignment_test.go.tmpl | 256 ++ ...source_parallelstore_instance_test.go.tmpl | 137 + ...ed_access_manager_entitlement_test.go.tmpl | 136 + .../go/resource_redis_cluster_test.go.tmpl | 299 ++ .../data_source_runtimeconfig_config.go.tmpl | 43 + ...a_source_runtimeconfig_config_test.go.tmpl | 43 + ...data_source_runtimeconfig_variable.go.tmpl | 46 + ...source_runtimeconfig_variable_test.go.tmpl | 50 + .../go/resource_runtimeconfig_config.go.tmpl | 207 ++ ...resource_runtimeconfig_config_test.go.tmpl | 179 ++ .../resource_runtimeconfig_variable.go.tmpl | 253 ++ ...source_runtimeconfig_variable_test.go.tmpl | 243 ++ .../go/iam_secret_manager_secret_test.go | 118 + .../go/resource_secret_manager_secret_test.go | 1222 ++++++++ ...urce_secret_manager_secret_version_test.go | 97 + ..._security_scanner_scan_config_test.go.tmpl | 76 + ...ce_service_directory_endpoint_test.go.tmpl | 105 + ...e_service_directory_namespace_test.go.tmpl | 83 + ...rce_service_directory_service_test.go.tmpl | 92 + ...usage_consumer_quota_override_test.go.tmpl | 55 + .../go/resource_spanner_database_test.go.tmpl | 608 ++++ .../storage/go/resource_storage_bucket.go | 1903 ++++++++++++ .../go/resource_storage_bucket_test.go | 2636 +++++++++++++++++ .../go/resource_storage_transfer_job.go.tmpl | 1313 ++++++++ ...esource_tags_location_tag_bindings.go.tmpl | 388 +++ ...ta_source_tpu_v2_accelerator_types.go.tmpl | 95 + ...urce_tpu_v2_accelerator_types_test.go.tmpl | 72 + ...ata_source_tpu_v2_runtime_versions.go.tmpl | 95 + ...ource_tpu_v2_runtime_versions_test.go.tmpl | 72 + .../tpuv2/go/resource_tpu_v2_vm_test.go.tmpl | 168 ++ .../go/iam_vertex_endpoint_test.go.tmpl | 363 +++ ...urce_vertex_ai_metadata_store_test.go.tmpl | 93 + .../vertexai/go/vertex_ai_operation.go.tmpl | 77 + ...workbench_instance_shielded_config_test.go | 228 ++ .../go/resource_workbench_instance_test.go | 626 ++++ .../go/resource_workflows_workflow_test.go | 222 ++ ...kstations_workstation_cluster_test.go.tmpl | 151 + ...rkstations_workstation_config_test.go.tmpl | 1295 ++++++++ ...urce_workstations_workstation_test.go.tmpl | 150 + 177 files changed, 32800 insertions(+), 75 deletions(-) create mode 100644 mmv1/templates/terraform/examples/go/bigquery_dataset_resource_tags.tf.tmpl create mode 100644 mmv1/third_party/terraform/services/dialogflowcx/go/resource_dialogflowcx_agent_test.go create mode 100644 mmv1/third_party/terraform/services/dns/go/data_source_dns_key_test.go create mode 100644 mmv1/third_party/terraform/services/dns/go/data_source_dns_managed_zone_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dns/go/data_source_dns_record_set_test.go create mode 100644 mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dns/go/resource_dns_policy_test.go create mode 100644 mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_rule_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_channel_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_google_channel_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_trigger_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_config.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/resource_firebase_android_app_update_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/resource_firebase_apple_app_update_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/resource_firebase_project_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebase/go/resource_firebase_web_app_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_app_attest_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_debug_token_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_device_check_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_play_integrity_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_enterprise_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_v3_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_service_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebasedatabase/go/resource_firebase_database_instance_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebaseextensions/go/resource_firebase_extensions_instance_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_channel_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_custom_domain_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/firestore/go/resource_firestore_database_update_test.go create mode 100644 mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_backup_plan_test.go create mode 100644 mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_restore_plan_test.go create mode 100644 mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub2/go/iam_gke_hub_feature_test.go create mode 100644 mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_fleet_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/gkeonprem/go/gkeonprem_operation.go create mode 100644 mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_cluster_test.go create mode 100644 mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_node_pool_test.go create mode 100644 mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_cluster_test.go create mode 100644 mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_node_pool_test.go create mode 100644 mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_fhir_store_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_hl7_v2_store_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go create mode 100644 mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_id_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_id_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_provider_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_test.go create mode 100644 mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_id_test.go create mode 100644 mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_provider_id_test.go create mode 100644 mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring.go.tmpl create mode 100644 mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_cluster_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_topic_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_hub_sweeper.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_spoke_sweeper.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_authorization_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_association_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_group_test.go create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_test.go create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_server_tls_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_endpoint_policy_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_grpc_route_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_http_route_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_mesh_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_binding_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_lb_policies_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tls_route_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_environment_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_container_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_gpu_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_state_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_test.go create mode 100644 mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_runtime_test.go create mode 100644 mmv1/third_party/terraform/services/orgpolicy/go/resource_org_policy_custom_constraint_test.go create mode 100644 mmv1/third_party/terraform/services/osconfig/go/resource_os_config_os_policy_assignment_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/privilegedaccessmanager/go/resource_privileged_access_manager_entitlement_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable.go.tmpl create mode 100644 mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/secretmanager/go/iam_secret_manager_secret_test.go create mode 100644 mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_test.go create mode 100644 mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_version_test.go create mode 100644 mmv1/third_party/terraform/services/securityscanner/go/resource_security_scanner_scan_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_endpoint_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_namespace_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_service_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/serviceusage/go/resource_service_usage_consumer_quota_override_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go create mode 100644 mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go create mode 100644 mmv1/third_party/terraform/services/storagetransfer/go/resource_storage_transfer_job.go.tmpl create mode 100644 mmv1/third_party/terraform/services/tags/go/resource_tags_location_tag_bindings.go.tmpl create mode 100644 mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types.go.tmpl create mode 100644 mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions.go.tmpl create mode 100644 mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/tpuv2/go/resource_tpu_v2_vm_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/vertexai/go/iam_vertex_endpoint_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/vertexai/go/resource_vertex_ai_metadata_store_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/vertexai/go/vertex_ai_operation.go.tmpl create mode 100644 mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_shielded_config_test.go create mode 100644 mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_test.go create mode 100644 mmv1/third_party/terraform/services/workflows/go/resource_workflows_workflow_test.go create mode 100644 mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_cluster_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl create mode 100644 mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_test.go.tmpl diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index 893c98c36587..febaa113a050 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -332,7 +332,7 @@ func (t Terraform) getCopyFilesInFolder(folderPath, targetDir string) map[string m := make(map[string]string, 0) filepath.WalkDir(folderPath, func(path string, di fs.DirEntry, err error) error { if !di.IsDir() && !strings.HasSuffix(di.Name(), ".tmpl") && !strings.HasSuffix(di.Name(), ".erb") { - fname := strings.TrimPrefix(path, "third_party/terraform/") + fname := strings.TrimPrefix(strings.Replace(path, "/go/", "/", 1), "third_party/terraform/") target := fname if targetDir != "." { target = fmt.Sprintf("%s/%s", targetDir, fname) @@ -438,7 +438,7 @@ func (t Terraform) getCompileFilesInFolder(folderPath, targetDir string) map[str m := make(map[string]string, 0) filepath.WalkDir(folderPath, func(path string, di fs.DirEntry, err error) error { if !di.IsDir() && strings.HasSuffix(di.Name(), ".tmpl") { - fname := strings.TrimPrefix(path, "third_party/terraform/") + fname := strings.TrimPrefix(strings.Replace(path, "/go/", "/", 1), "third_party/terraform/") fname = strings.TrimSuffix(fname, ".tmpl") target := fname if targetDir != "" { diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index cab87cd86b63..fd09ad261b0a 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -282,7 +282,7 @@ func replace(data []byte) []byte { if err != nil { log.Fatalf("Cannot compile the regular expression: %v", err) } - data = r.ReplaceAll(data, []byte(`{{- if eq $.Name "Disk" }}`)) + data = r.ReplaceAll(data, []byte(`{{ if eq $.Name "Disk" -}}`)) // Replace <% elsif object.name == 'RegionDisk' -%> r, err = regexp.Compile(`<% elsif object.name == 'RegionDisk' -%>`) diff --git a/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl b/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl index 0bc5c0e76850..a5f35834357a 100644 --- a/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl +++ b/mmv1/templates/terraform/constants/go/datastream_stream.go.tmpl @@ -76,4 +76,4 @@ func resourceDatastreamStreamDatabaseIdDiffSuppress(_, old, new string, _ *schem return old == new } -{{- end }} +{{ end }} diff --git a/mmv1/templates/terraform/constants/go/router_nat.go.tmpl b/mmv1/templates/terraform/constants/go/router_nat.go.tmpl index c40c3073acb8..f7550929c0ac 100644 --- a/mmv1/templates/terraform/constants/go/router_nat.go.tmpl +++ b/mmv1/templates/terraform/constants/go/router_nat.go.tmpl @@ -96,7 +96,7 @@ func computeRouterNatIPsHash(v interface{}) int { return schema.HashString(tpgresource.GetResourceNameFromSelfLink(val)) } -{{- if ne $.TargetVersionName "ga" }} +{{ if ne $.TargetVersionName `ga` -}} func computeRouterNatRulesSubnetHash(v interface{}) int { return computeRouterNatIPsHash(v) } @@ -145,7 +145,7 @@ func computeRouterNatRulesHash(v interface{}) int { } } - {{- if ne $.TargetVersionName "ga" }} + {{ if ne $.TargetVersionName `ga` -}} sourceNatActiveRanges := action["source_nat_active_ranges"] if sourceNatActiveRanges != nil { sourceNatActiveRangesSet := sourceNatActiveRanges.(*schema.Set) diff --git a/mmv1/templates/terraform/custom_delete/go/active_directory_domain_trust.go.tmpl b/mmv1/templates/terraform/custom_delete/go/active_directory_domain_trust.go.tmpl index dc2c6f62d125..2b9dfd7bbce6 100644 --- a/mmv1/templates/terraform/custom_delete/go/active_directory_domain_trust.go.tmpl +++ b/mmv1/templates/terraform/custom_delete/go/active_directory_domain_trust.go.tmpl @@ -8,7 +8,7 @@ return err } - {{- /* The generate DELETE method isn't including the {trust: } object in the response body thus custom_delete is needed */}} + {{/* The generate DELETE method isn't including the {trust: } object in the response body thus custom_delete is needed */ -}} obj := make(map[string]interface{}) targetDomainNameProp, err := expandNestedActiveDirectoryDomainTrustTargetDomainName(d.Get("target_domain_name"), d, config) diff --git a/mmv1/templates/terraform/custom_delete/go/monitoring_uptime_check_config.go.tmpl b/mmv1/templates/terraform/custom_delete/go/monitoring_uptime_check_config.go.tmpl index 190d76b338dd..30dc2bf22c6a 100644 --- a/mmv1/templates/terraform/custom_delete/go/monitoring_uptime_check_config.go.tmpl +++ b/mmv1/templates/terraform/custom_delete/go/monitoring_uptime_check_config.go.tmpl @@ -38,8 +38,8 @@ res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ }) -{{- /* The generated DELETE method returns a generic error for 400. - Need to include a help message about deleting associated Alert Policies. */}} +{{/* The generated DELETE method returns a generic error for 400. + Need to include a help message about deleting associated Alert Policies. */ -}} if err != nil { if transport_tpg.IsGoogleApiErrorWithCode(err, 400) { diff --git a/mmv1/templates/terraform/custom_expand/go/privateca_certificate_509_config.go.tmpl b/mmv1/templates/terraform/custom_expand/go/privateca_certificate_509_config.go.tmpl index c5b7c5b4a30e..deb1179b7cee 100644 --- a/mmv1/templates/terraform/custom_expand/go/privateca_certificate_509_config.go.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/privateca_certificate_509_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* See mmv1/third_party/terraform/utils/privateca_utils.go for the sub-expanders and explanation */}} +{{/* See mmv1/third_party/terraform/utils/privateca_utils.go for the sub-expanders and explanation */}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { if v == nil { return v, nil diff --git a/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl b/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl index bcdd41d6f9df..dac33816188e 100644 --- a/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl +++ b/mmv1/templates/terraform/custom_expand/go/reference_to_backend.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* This provides the best long-form self link possible given the input. +{{/* This provides the best long-form self link possible given the input. If the input is a full URL including scheme, we return it unmodified https://compute.googleapis.com/v1/projects/foo/regions/bar/backendBuckets/baz -> (the same) If the input is a partial self-link, we return it with the compute base path in front. @@ -21,7 +21,7 @@ If the input is just project/region/name, region/name, or just name, we treat it like a backendService. baz -> https://compute.googleapis.com/v1/projects/provider-project/regions/provider-region/backendServices/baz bar/baz -> https://compute.googleapis.com/v1/projects/provider-project/regions/bar/backendServices/baz - foo/bar/baz -> https://compute.googleapis.com/v1/projects/foo/regions/bar/backendServices/baz */}} + foo/bar/baz -> https://compute.googleapis.com/v1/projects/foo/regions/bar/backendServices/baz */ -}} func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { // This method returns a full self link from whatever the input is. if v == nil || v.(string) == "" { diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl index 6686da377457..3d39974fbfe2 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_password.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("forward_ssh_connectivity.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl index b9ba7b80e025..e78406acd183 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_forward_ssh_connectivity_private_key.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("forward_ssh_connectivity.0.private_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl index a13a80c652ad..6b9fa6fdb940 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_password.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl index 085c8a4017dc..16045abdbd41 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_ca_certificate.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.ca_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl index 748ee3af5070..27439273695b 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_certificate.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.client_certificate") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl index c672cf97634d..d20130e621a5 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_mysql_profile_ssl_config_client_key.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("mysql_profile.0.ssl_config.0.client_key") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl index 6fc344c88f3b..dfd854d7a631 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_oracle_profile_password.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("oracle_profile.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl index c68160c71e7a..b4c7ac7f58da 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_postgresql_profile_password.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("postgresql_profile.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl index 2c39ddfa4edd..dfb1adb3079a 100644 --- a/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/datastream_connection_profile_sql_server_profile_password.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} +{{/* Workaround for https://github.com/hashicorp/terraform-provider-google/issues/12410 */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { return d.Get("sql_server_profile.0.password") } diff --git a/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl index b409691f8a1a..ebdfcd113dd2 100644 --- a/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/guard_self_link.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Not all self links behave like ResourceRef expects, eg they may expect a fully qualified url. In those +{{/* Not all self links behave like ResourceRef expects, eg they may expect a fully qualified url. In those cases, we need to manually define this flattener. */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { diff --git a/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl index e6772ff622f4..991d78e9eb36 100644 --- a/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/guard_self_link_array.go.tmpl @@ -10,7 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* This should be used for multi-resource ref fields that can't be made to real resource refs yet */}} +{{/* This should be used for multi-resource ref fields that can't be made to real resource refs yet */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { return v diff --git a/mmv1/templates/terraform/custom_flatten/go/privateca_certificate_509_config.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/privateca_certificate_509_config.go.tmpl index d5b8bfa2a772..c08addd0f5e3 100644 --- a/mmv1/templates/terraform/custom_flatten/go/privateca_certificate_509_config.go.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/privateca_certificate_509_config.go.tmpl @@ -1,4 +1,4 @@ -{{- /* See mmv1/third_party/terraform/utils/privateca_utils.go for the sub-expanders and explanation */}} +{{/* See mmv1/third_party/terraform/utils/privateca_utils.go for the sub-expanders and explanation */}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil { v = make(map[string]interface{}) diff --git a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl index 024fd0117bbe..560cd1243da7 100644 --- a/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/decoders/go/region_backend_service.go.tmpl @@ -22,7 +22,7 @@ if ok && m["enabled"] == false { delete(res, "iap") } -{{- if ne $.TargetVersionName "ga" }} +{{ if ne $.TargetVersionName `ga` -}} // Since we add in a NONE subsetting policy, we need to remove it in some // cases for backwards compatibility with the config v, ok = res["subsetting"] diff --git a/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl b/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl index c08c105443be..848352f99c92 100644 --- a/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/bigtable_app_profile.go.tmpl @@ -10,9 +10,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} -{{- /* Because instance is a URL param only, it does not get expanded and +{{/* Because instance is a URL param only, it does not get expanded and the URL is constructed from ResourceData. Set it in - state and use a encoder instead of a field expander */}} + state and use a encoder instead of a field expander */ -}} // Instance is a URL parameter only, so replace self-link/path with resource name only. if err := d.Set("instance", tpgresource.GetResourceNameFromSelfLink(d.Get("instance").(string))); err != nil { return nil, fmt.Errorf("Error setting instance: %s", err) diff --git a/mmv1/templates/terraform/encoders/go/disk.tmpl b/mmv1/templates/terraform/encoders/go/disk.tmpl index d32ecae6a410..51901dcbf237 100644 --- a/mmv1/templates/terraform/encoders/go/disk.tmpl +++ b/mmv1/templates/terraform/encoders/go/disk.tmpl @@ -10,7 +10,7 @@ if err != nil { return nil, err } -{{- if eq $.Name "Disk" }} +{{ if eq $.Name "Disk" -}} if v, ok := d.GetOk("type"); ok { log.Printf("[DEBUG] Loading disk type: %s", v.(string)) diskType, err := readDiskType(config, d, v.(string)) diff --git a/mmv1/templates/terraform/encoders/go/health_check_type.tmpl b/mmv1/templates/terraform/encoders/go/health_check_type.tmpl index e58d074f46a3..e5d7cdae36b6 100644 --- a/mmv1/templates/terraform/encoders/go/health_check_type.tmpl +++ b/mmv1/templates/terraform/encoders/go/health_check_type.tmpl @@ -10,7 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} - if _, ok := d.GetOk("http_health_check"); ok { hc := d.Get("http_health_check").([]interface{})[0] ps := hc.(map[string]interface{})["port_specification"] diff --git a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl index dd54de781463..40010b8ac301 100644 --- a/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/region_backend_service.go.tmpl @@ -32,7 +32,7 @@ if d.Get("load_balancing_scheme").(string) == "EXTERNAL_MANAGED" || d.Get("load_ return obj, nil } -{{- if ne $.TargetVersionName "ga" }} +{{ if ne $.TargetVersionName `ga` -}} // To remove subsetting on an ILB, "NONE" must be specified. If subsetting // isn't specified, we set the value to NONE to make this use case work. _, ok := obj["subsetting"] diff --git a/mmv1/templates/terraform/encoders/go/spanner_database.go.tmpl b/mmv1/templates/terraform/encoders/go/spanner_database.go.tmpl index 0e8ae39b76cf..78cd8aba964d 100644 --- a/mmv1/templates/terraform/encoders/go/spanner_database.go.tmpl +++ b/mmv1/templates/terraform/encoders/go/spanner_database.go.tmpl @@ -10,7 +10,7 @@ log.Printf("[DEBUG] Preparing to create new Database. Any extra DDL statements w delete(obj, "name") delete(obj, "instance") -{{- /* These are added back in post-create, but do not remove for Validator. */}} +{{/* These are added back in post-create, but do not remove for Validator. */}} {{- if ne $.Compiler "terraformgoogleconversion-codegen" }} delete(obj, "versionRetentionPeriod") delete(obj, "extraStatements") diff --git a/mmv1/templates/terraform/encoders/health_check_type.erb b/mmv1/templates/terraform/encoders/health_check_type.erb index efedcc412be0..a3efcd50d134 100644 --- a/mmv1/templates/terraform/encoders/health_check_type.erb +++ b/mmv1/templates/terraform/encoders/health_check_type.erb @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -%> - if _, ok := d.GetOk("http_health_check"); ok { hc := d.Get("http_health_check").([]interface{})[0] ps := hc.(map[string]interface{})["port_specification"] diff --git a/mmv1/templates/terraform/examples/go/bigquery_dataset_resource_tags.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_dataset_resource_tags.tf.tmpl new file mode 100644 index 000000000000..b7366659c55e --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_dataset_resource_tags.tf.tmpl @@ -0,0 +1,41 @@ +data "google_project" "project" { + provider = "google-beta" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = "google-beta" + parent = "projects/${data.google_project.project.number}" + short_name = "{{index $.Vars "tag_key1"}}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = "google-beta" + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "{{index $.Vars "tag_value1"}}" +} + +resource "google_tags_tag_key" "tag_key2" { + provider = "google-beta" + parent = "projects/${data.google_project.project.number}" + short_name = "{{index $.Vars "tag_key2"}}" +} + +resource "google_tags_tag_value" "tag_value2" { + provider = "google-beta" + parent = "tagKeys/${google_tags_tag_key.tag_key2.name}" + short_name = "{{index $.Vars "tag_value2"}}" +} + +resource "google_bigquery_dataset" "{{$.PrimaryResourceId}}" { + provider = google-beta + + dataset_id = "{{index $.Vars "dataset_id"}}" + friendly_name = "test" + description = "This is a test description" + location = "EU" + + resource_tags = { + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key1.short_name}" = "${google_tags_tag_value.tag_value1.short_name}" + "${data.google_project.project.project_id}/${google_tags_tag_key.tag_key2.short_name}" = "${google_tags_tag_value.tag_value2.short_name}" + } +} diff --git a/mmv1/templates/terraform/examples/go/tpu_node_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/tpu_node_basic.tf.tmpl index be56977b3a37..9f516fd9ad65 100644 --- a/mmv1/templates/terraform/examples/go/tpu_node_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/tpu_node_basic.tf.tmpl @@ -1,6 +1,6 @@ -{{- /* WARNING: cidr_block must not overlap with other existing TPU blocks +{{/* WARNING: cidr_block must not overlap with other existing TPU blocks Make sure if you change this value that it does not overlap with the - autogenerated examples. */}} + autogenerated examples. */ -}} data "google_tpu_tensorflow_versions" "available" { } diff --git a/mmv1/templates/terraform/examples/go/tpu_node_full.tf.tmpl b/mmv1/templates/terraform/examples/go/tpu_node_full.tf.tmpl index 26517fd56b85..4432f204ae80 100644 --- a/mmv1/templates/terraform/examples/go/tpu_node_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/tpu_node_full.tf.tmpl @@ -1,9 +1,9 @@ data "google_tpu_tensorflow_versions" "available" { } -{{- /* WARNING: cidr_block must not overlap with other existing TPU blocks +{{/* WARNING: cidr_block must not overlap with other existing TPU blocks Make sure if you change this value that it does not overlap with the - autogenerated examples. */}} + autogenerated examples. */ -}} resource "google_tpu_node" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "node_name"}}" @@ -15,10 +15,10 @@ resource "google_tpu_node" "{{$.PrimaryResourceId}}" { description = "Terraform Google Provider test TPU" use_service_networking = true -{{- /* We previously used a separate network resource here, but TPUs only allow using 50 +{{/* We previously used a separate network resource here, but TPUs only allow using 50 different network names, ever. This caused our tests to start failing, so just use the default network in order to still demonstrate using as many fields as - possible on the resource. */}} + possible on the resource. */ -}} network = google_service_networking_connection.private_service_connection.network labels = { diff --git a/mmv1/templates/terraform/examples/go/tpu_node_full_test.tf.tmpl b/mmv1/templates/terraform/examples/go/tpu_node_full_test.tf.tmpl index 073a05285273..7f7090d678bd 100644 --- a/mmv1/templates/terraform/examples/go/tpu_node_full_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/tpu_node_full_test.tf.tmpl @@ -1,6 +1,6 @@ -{{- /* WARNING: cidr_block must not overlap with other existing TPU blocks +{{/* WARNING: cidr_block must not overlap with other existing TPU blocks Make sure if you change this value that it does not overlap with the - autogenerated examples. */}} + autogenerated examples. */ -}} resource "google_tpu_node" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "node_name"}}" @@ -8,18 +8,18 @@ resource "google_tpu_node" "{{$.PrimaryResourceId}}" { accelerator_type = "v3-8" -{{- /* We previously used the first available version from the +{{/* We previously used the first available version from the google_tpu_tensorflow_versions data source. However, this started to return a random set of versions which caused our tests to occasionally fail, so we pin - tensorflow_version to a specific version so that our tests pass reliably. */}} + tensorflow_version to a specific version so that our tests pass reliably. */ -}} tensorflow_version = "2.10.0" description = "Terraform Google Provider test TPU" use_service_networking = true -{{- /* We previously used a separate network resource here, but TPUs only allow using 50 +{{/* We previously used a separate network resource here, but TPUs only allow using 50 different network names, ever. This caused our tests to start failing, so just use the default network in order to still demonstrate using as many fields as - possible on the resource. */}} + possible on the resource. */ -}} network = data.google_compute_network.network.id labels = { diff --git a/mmv1/templates/terraform/examples/go/vpc_access_connector.tf.tmpl b/mmv1/templates/terraform/examples/go/vpc_access_connector.tf.tmpl index dc3e00536625..bd34a18b1ca0 100644 --- a/mmv1/templates/terraform/examples/go/vpc_access_connector.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/vpc_access_connector.tf.tmpl @@ -1,5 +1,5 @@ resource "google_vpc_access_connector" "connector" { name = "{{index $.Vars "name"}}" ip_cidr_range = "10.8.0.0/28" - network = "default" + network = "{{index $.Vars "network_name"}}" } diff --git a/mmv1/templates/terraform/examples/go/vpc_access_connector_shared_vpc.tf.tmpl b/mmv1/templates/terraform/examples/go/vpc_access_connector_shared_vpc.tf.tmpl index 142bfca18406..b66e96ba8ab1 100644 --- a/mmv1/templates/terraform/examples/go/vpc_access_connector_shared_vpc.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/vpc_access_connector_shared_vpc.tf.tmpl @@ -10,10 +10,5 @@ resource "google_compute_subnetwork" "custom_test" { name = "{{index $.Vars "name"}}" ip_cidr_range = "10.2.0.0/28" region = "us-central1" - network = google_compute_network.custom_test.id -} - -resource "google_compute_network" "custom_test" { - name = "{{index $.Vars "name"}}" - auto_create_subnetworks = false + network = "{{index $.Vars "network_name"}}" } \ No newline at end of file diff --git a/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl b/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl index cd216f018387..604ee537a488 100644 --- a/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl +++ b/mmv1/templates/terraform/post_update/go/workbench_instance.go.tmpl @@ -1,7 +1,7 @@ state := d.Get("state").(string) desired_state := d.Get("desired_state").(string) -if state != desired_state { +if state != desired_state || stopInstance{ verb := "start" if desired_state == "STOPPED" { verb = "stop" @@ -15,6 +15,13 @@ if state != desired_state { return fmt.Errorf("Error waiting to modify Workbench Instance state: %s", err) } + if verb == "start"{ + if err := waitForWorkbenchInstanceActive(d, config, d.Timeout(schema.TimeoutUpdate) - time.Minute); err != nil { + return fmt.Errorf("Workbench instance %q did not reach ACTIVE state: %q", d.Get("name").(string), err) + } + + } + } else { log.Printf("[DEBUG] Workbench Instance %q has state %q.", name, state) } diff --git a/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl b/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl index 3d0169cd742b..e17a021da2d6 100644 --- a/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl +++ b/mmv1/templates/terraform/pre_delete/go/compute_region_network_endpoint.go.tmpl @@ -27,7 +27,7 @@ if fqdnProp != "" { toDelete["fqdn"] = fqdnProp } -{{- if ne $.TargetVersionName "ga" }} +{{ if ne $.TargetVersionName `ga` -}} // Instance instanceProp, err := expandNestedComputeRegionNetworkEndpointInstance(d.Get("instance"), d, config) if err != nil { diff --git a/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl b/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl index 847a0bcd1311..b1fb82cd28c9 100644 --- a/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl +++ b/mmv1/templates/terraform/pre_update/go/workbench_instance.go.tmpl @@ -1,5 +1,41 @@ +// Build custom mask since the notebooks API does not support gce_setup as a valid mask +stopInstance := false +newUpdateMask := []string{} +if d.HasChange("gce_setup.0.machine_type") { + newUpdateMask = append(newUpdateMask, "gce_setup.machine_type") + stopInstance = true +} +if d.HasChange("gce_setup.0.accelerator_configs") { + newUpdateMask = append(newUpdateMask, "gce_setup.accelerator_configs") + stopInstance = true +} +if d.HasChange("gce_setup.0.shielded_instance_config.0.enable_secure_boot") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config.enable_secure_boot") + stopInstance = true +} +if d.HasChange("gce_setup.0.shielded_instance_config.0.enable_vtpm") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config.enable_vtpm") + stopInstance = true +} +if d.HasChange("gce_setup.0.shielded_instance_config.0.enable_integrity_monitoring") { + newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config.enable_integrity_monitoring") + stopInstance = true +} +if d.HasChange("gce_setup.0.metadata") { + newUpdateMask = append(newUpdateMask, "gceSetup.metadata") +} +if d.HasChange("effective_labels") { + newUpdateMask = append(newUpdateMask, "labels") +} +updateMask = newUpdateMask +// Overwrite the previously set mask. +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) +if err != nil { + return err +} + name := d.Get("name").(string) -if d.HasChange("gce_setup.0.machine_type") || d.HasChange("gce_setup.0.accelerator_configs") || d.HasChange("gce_setup.0.shielded_instance_config"){ +if stopInstance{ state := d.Get("state").(string) if state != "STOPPED" { @@ -20,26 +56,9 @@ if d.HasChange("gce_setup.0.machine_type") || d.HasChange("gce_setup.0.accelerat log.Printf("[DEBUG] Workbench Instance %q need not be stopped for the update.", name) } -// Build custom mask since the notebooks API does not support gce_setup as a valid mask -newUpdateMask := []string{} -if d.HasChange("gce_setup.0.machine_type") { - newUpdateMask = append(newUpdateMask, "gce_setup.machine_type") +if d.HasChange("gce_setup.0.boot_disk.0.disk_size_gb") { + resizeWorkbenchInstanceDisk(config, d, project, userAgent, true) } -if d.HasChange("gce_setup.0.accelerator_configs") { - newUpdateMask = append(newUpdateMask, "gce_setup.accelerator_configs") -} -if d.HasChange("gce_setup.0.shielded_instance_config") { - newUpdateMask = append(newUpdateMask, "gce_setup.shielded_instance_config") -} -if d.HasChange("gce_setup.0.metadata") { - newUpdateMask = append(newUpdateMask, "gceSetup.metadata") -} -if d.HasChange("effective_labels") { - newUpdateMask = append(newUpdateMask, "labels") -} - -// Overwrite the previously set mask. -url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(newUpdateMask, ",")}) -if err != nil { - return err +if d.HasChange("gce_setup.0.data_disks.0.disk_size_gb") { + resizeWorkbenchInstanceDisk(config, d, project, userAgent, false) } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go index bb0644a6f901..f729388d4361 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_firewall_test.go @@ -36,6 +36,17 @@ func TestAccComputeFirewall_update(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccComputeFirewall_nullDescription(networkName, firewallName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("google_compute_firewall.foobar", "description", ""), + ), + }, + { + ResourceName: "google_compute_firewall.foobar", + ImportState: true, + ImportStateVerify: true, + }, { Config: testAccComputeFirewall_basic(networkName, firewallName), }, @@ -391,6 +402,29 @@ resource "google_compute_firewall" "foobar" { `, network, firewall) } +func testAccComputeFirewall_nullDescription(network, firewall string) string { + return fmt.Sprintf(` +resource "google_compute_network" "foobar" { + name = "%s" + auto_create_subnetworks = false +} + +resource "google_compute_firewall" "foobar" { + name = "%s" + description = null + network = google_compute_network.foobar.self_link + source_tags = ["foo"] + target_tags = ["bar"] + + allow { + protocol = "tcp" + ports = ["80-255"] + } +} +`, network, firewall) +} + + func testAccComputeFirewall_priority(network, firewall string, priority int) string { return fmt.Sprintf(` resource "google_compute_network" "foobar" { diff --git a/mmv1/third_party/terraform/services/dialogflowcx/go/resource_dialogflowcx_agent_test.go b/mmv1/third_party/terraform/services/dialogflowcx/go/resource_dialogflowcx_agent_test.go new file mode 100644 index 000000000000..d3b757abfd6f --- /dev/null +++ b/mmv1/third_party/terraform/services/dialogflowcx/go/resource_dialogflowcx_agent_test.go @@ -0,0 +1,138 @@ +package dialogflowcx_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDialogflowCXAgent_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDialogflowCXAgent_basic(context), + }, + { + ResourceName: "google_dialogflow_cx_agent.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"git_integration_settings.0.github_settings.0.access_token"}, + }, + { + Config: testAccDialogflowCXAgent_full(context), + }, + { + ResourceName: "google_dialogflow_cx_agent.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"git_integration_settings.0.github_settings.0.access_token"}, + }, + { + Config: testAccDialogflowCXAgent_removeSettings(context), + }, + { + ResourceName: "google_dialogflow_cx_agent.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"git_integration_settings.0.github_settings.0.access_token"}, + }, + }, + }) +} + +func testAccDialogflowCXAgent_basic(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_dialogflow_cx_agent" "foobar" { + display_name = "tf-test-%{random_suffix}" + location = "global" + default_language_code = "en" + supported_language_codes = ["fr","de","es"] + time_zone = "America/New_York" + description = "Description 1." + avatar_uri = "https://storage.cloud.google.com/dialogflow-test-host-image/cloud-logo.png" + } + `, context) +} + +func testAccDialogflowCXAgent_full(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_storage_bucket" "bucket" { + name = "tf-test-dialogflowcx-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true + } + + resource "google_dialogflow_cx_agent" "foobar" { + display_name = "tf-test-%{random_suffix}update" + location = "global" + default_language_code = "en" + supported_language_codes = ["no"] + time_zone = "Europe/London" + description = "Description 2!" + avatar_uri = "https://storage.cloud.google.com/dialogflow-test-host-image/cloud-logo-2.png" + enable_stackdriver_logging = true + enable_spell_correction = true + speech_to_text_settings { + enable_speech_adaptation = true + } + advanced_settings { + audio_export_gcs_destination { + uri = "${google_storage_bucket.bucket.url}/prefix-" + } + dtmf_settings { + enabled = true + max_digits = 1 + finish_digit = "#" + } + } + git_integration_settings { + github_settings { + display_name = "Github Repo" + repository_uri = "https://api.github.com/repos/githubtraining/hellogitworld" + tracking_branch = "main" + access_token = "secret-token" + branches = ["main"] + } + } + text_to_speech_settings { + synthesize_speech_configs = jsonencode({ + en = { + voice = { + name = "en-US-Neural2-A" + } + } + }) + } + } + `, context) +} + +func testAccDialogflowCXAgent_removeSettings(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_dialogflow_cx_agent" "foobar" { + display_name = "tf-test-%{random_suffix}" + location = "global" + default_language_code = "en" + supported_language_codes = ["fr","de","es"] + time_zone = "America/New_York" + description = "Description 1." + avatar_uri = "https://storage.cloud.google.com/dialogflow-test-host-image/cloud-logo.png" + advanced_settings {} + git_integration_settings {} + text_to_speech_settings {} + } + `, context) +} diff --git a/mmv1/third_party/terraform/services/dns/go/data_source_dns_key_test.go b/mmv1/third_party/terraform/services/dns/go/data_source_dns_key_test.go new file mode 100644 index 000000000000..d05066a2f7bb --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/data_source_dns_key_test.go @@ -0,0 +1,178 @@ +package dns_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceDNSKeys_basic(t *testing.T) { + t.Parallel() + + dnsZoneName := fmt.Sprintf("tf-test-dnskey-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceDNSKeysConfig(dnsZoneName, "on"), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceDNSKeysDSRecordCheck("data.google_dns_keys.foo_dns_key"), + resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "key_signing_keys.#", "1"), + resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "zone_signing_keys.#", "1"), + resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key_id", "key_signing_keys.#", "1"), + resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key_id", "zone_signing_keys.#", "1"), + ), + }, + }, + }) +} + + +func TestAccDataSourceDNSKeys_noDnsSec(t *testing.T) { + t.Parallel() + + dnsZoneName := fmt.Sprintf("tf-test-dnskey-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceDNSKeysConfig(dnsZoneName, "off"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "key_signing_keys.#", "0"), + resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "zone_signing_keys.#", "0"), + ), + }, + }, + }) +} + +func testAccDataSourceDNSKeysDSRecordCheck(datasourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[datasourceName] + if !ok { + return fmt.Errorf("root module has no resource called %s", datasourceName) + } + + if ds.Primary.Attributes["key_signing_keys.0.ds_record"] == "" { + return fmt.Errorf("DS record not found in data source") + } + + return nil + } +} + +func testAccDataSourceDNSKeysConfig(dnsZoneName, dnssecStatus string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foo" { + name = "%s" + dns_name = "dnssec.gcp.tfacc.hashicorptest.com." + + dnssec_config { + state = "%s" + non_existence = "nsec3" + } +} + +data "google_dns_keys" "foo_dns_key" { + managed_zone = google_dns_managed_zone.foo.name +} + +data "google_dns_keys" "foo_dns_key_id" { + managed_zone = google_dns_managed_zone.foo.id +} +`, dnsZoneName, dnssecStatus) +} + +// TestAccDataSourceDNSKeys_basic_AdcAuth is the same as TestAccDataSourceDNSKeys_basic but the test enforces that a developer runs this using +// ADCs, supplied via GOOGLE_APPLICATION_CREDENTIALS. If any other credentials ENVs are set the PreCheck will fail. +// Commented out until this test can run in TeamCity/CI. +// func TestAccDataSourceDNSKeys_basic_AdcAuth(t *testing.T) { +// acctest.SkipIfVcr(t) // Uses external providers +// t.Parallel() + +// creds := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") // PreCheck assertion handles checking this is set + +// dnsZoneName := fmt.Sprintf("tf-test-dnskey-test-%s", acctest.RandString(t, 10)) + +// context := map[string]interface{}{ +// "credentials_path": creds, +// "dns_zone_name": dnsZoneName, +// "dnssec_status": "on", +// } + +// acctest.VcrTest(t, resource.TestCase{ +// PreCheck: func() { acctest.AccTestPreCheck_AdcCredentialsOnly(t) }, // Note different than default +// CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), +// Steps: []resource.TestStep{ +// // Check test fails with version of provider where data source is implemented with PF +// { +// ExternalProviders: map[string]resource.ExternalProvider{ +// "google": { +// VersionConstraint: "4.60.0", // Muxed provider with dns data sources migrated to PF +// Source: "hashicorp/google", +// }, +// }, +// ExpectError: regexp.MustCompile("Post \"https://oauth2.googleapis.com/token\": context canceled"), +// Config: testAccDataSourceDNSKeysConfig_AdcCredentials(context), +// Check: resource.ComposeTestCheckFunc( +// testAccDataSourceDNSKeysDSRecordCheck("data.google_dns_keys.foo_dns_key"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "key_signing_keys.#", "1"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "zone_signing_keys.#", "1"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key_id", "key_signing_keys.#", "1"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key_id", "zone_signing_keys.#", "1"), +// ), +// }, +// // Test should pass with more recent code +// { +// ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), +// Config: testAccDataSourceDNSKeysConfig_AdcCredentials(context), +// Check: resource.ComposeTestCheckFunc( +// testAccDataSourceDNSKeysDSRecordCheck("data.google_dns_keys.foo_dns_key"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "key_signing_keys.#", "1"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key", "zone_signing_keys.#", "1"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key_id", "key_signing_keys.#", "1"), +// resource.TestCheckResourceAttr("data.google_dns_keys.foo_dns_key_id", "zone_signing_keys.#", "1"), +// ), +// }, +// }, +// }) +// } + +func testAccDataSourceDNSKeysConfig_AdcCredentials(context map[string]interface{}) string { + return acctest.Nprintf(` + +// The auth problem isn't triggered unless provider block is +// present in the test config. + +provider "google" { + credentials = "%{credentials_path}" +} + +resource "google_dns_managed_zone" "foo" { + name = "%{dns_zone_name}" + dns_name = "dnssec.gcp.tfacc.hashicorptest.com." + + dnssec_config { + state = "%{dnssec_status}" + non_existence = "nsec3" + } +} + +data "google_dns_keys" "foo_dns_key" { + managed_zone = google_dns_managed_zone.foo.name +} + +data "google_dns_keys" "foo_dns_key_id" { + managed_zone = google_dns_managed_zone.foo.id +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/dns/go/data_source_dns_managed_zone_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/data_source_dns_managed_zone_test.go.tmpl new file mode 100644 index 000000000000..f3b7ed151874 --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/data_source_dns_managed_zone_test.go.tmpl @@ -0,0 +1,59 @@ +package dns_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceDnsManagedZone_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceDnsManagedZone_basic(acctest.RandString(t, 10)), + Check: acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + "data.google_dns_managed_zone.qa", + "google_dns_managed_zone.foo", + map[string]struct{}{ + "dnssec_config.#": {}, + "private_visibility_config.#": {}, + "peering_config.#": {}, + "forwarding_config.#": {}, + "force_destroy": {}, + "labels.#": {}, + "terraform_labels.%": {}, + "effective_labels.%": {}, + "creation_time": {}, + "cloud_logging_config.#": {}, + "cloud_logging_config.0.%": {}, + "cloud_logging_config.0.enable_logging": {}, +{{- if ne $.TargetVersionName "ga" }} + "reverse_lookup": {}, +{{- end }} + }, + ), + }, + }, + }) +} + +func testAccDataSourceDnsManagedZone_basic(managedZoneName string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foo" { + name = "tf-test-qa-zone-%s" + dns_name = "qa.gcp.tfacc.hashicorptest.com." + description = "QA DNS zone" +} + +data "google_dns_managed_zone" "qa" { + name = google_dns_managed_zone.foo.name +} +`, managedZoneName) +} diff --git a/mmv1/third_party/terraform/services/dns/go/data_source_dns_record_set_test.go b/mmv1/third_party/terraform/services/dns/go/data_source_dns_record_set_test.go new file mode 100644 index 000000000000..1169033b5cd1 --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/data_source_dns_record_set_test.go @@ -0,0 +1,54 @@ +package dns_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAcccDataSourceDnsRecordSet_basic(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceDnsRecordSet_basic(name), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_dns_record_set.rs", "google_dns_record_set.rs"), + ), + }, + }, + }) +} + +func testAccDataSourceDnsRecordSet_basic(randString string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "zone" { + name = "tf-test-zone-%s" + dns_name = "%s.hashicorptest.com." +} + +resource "google_dns_record_set" "rs" { + managed_zone = google_dns_managed_zone.zone.name + name = "%s.${google_dns_managed_zone.zone.dns_name}" + type = "A" + ttl = 300 + rrdatas = [ + "192.168.1.0", + ] +} + +data "google_dns_record_set" "rs" { + managed_zone = google_dns_record_set.rs.managed_zone + name = google_dns_record_set.rs.name + type = google_dns_record_set.rs.type +} +`, randString, randString, randString) +} diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl new file mode 100644 index 000000000000..d95ab34faaad --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_managed_zone_test.go.tmpl @@ -0,0 +1,663 @@ +package dns_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgdns "github.com/hashicorp/terraform-provider-google/google/services/dns" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "google.golang.org/api/dns/v1" +) + +func TestAccDNSManagedZone_update(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_basic(zoneSuffix, "description1", map[string]string{"foo": "bar", "ping": "pong"}), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDnsManagedZone_basic(zoneSuffix, "description2", map[string]string{"foo": "bar"}), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccDNSManagedZone_privateUpdate(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_privateUpdate(zoneSuffix, "network-1", "network-2"), + }, + { + ResourceName: "google_dns_managed_zone.private", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsManagedZone_privateUpdate(zoneSuffix, "network-2", "network-3"), + }, + { + ResourceName: "google_dns_managed_zone.private", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSManagedZone_dnssec_update(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_dnssec_on(zoneSuffix), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsManagedZone_dnssec_off(zoneSuffix), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSManagedZone_dnssec_empty(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_dnssec_empty(zoneSuffix), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSManagedZone_privateForwardingUpdate(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_privateForwardingUpdate(zoneSuffix, "172.16.1.10", "172.16.1.20", "default", "private"), + }, + { + ResourceName: "google_dns_managed_zone.private", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsManagedZone_privateForwardingUpdate(zoneSuffix, "172.16.1.10", "192.168.1.1", "private", "default"), + }, + { + ResourceName: "google_dns_managed_zone.private", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSManagedZone_cloudLoggingConfigUpdate(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_cloudLoggingConfig_basic(zoneSuffix), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDnsManagedZone_cloudLoggingConfig_update(zoneSuffix, true), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccDnsManagedZone_cloudLoggingConfig_update(zoneSuffix, false), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func TestAccDNSManagedZone_reverseLookup(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_reverseLookup(zoneSuffix), + }, + { + ResourceName: "google_dns_managed_zone.reverse", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccDNSManagedZone_forceDestroy(t *testing.T) { + //t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + project := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDNSManagedZone_forceDestroy(zoneSuffix), + Check: resource.ComposeTestCheckFunc( + testAccCheckManagedZoneCreateRRs(t, zoneSuffix, project), + ), + }, + }, + }) +} + +func testAccCheckManagedZoneCreateRRs(t *testing.T, zoneSuffix string, project string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + zone := fmt.Sprintf("mzone-test-%s", zoneSuffix) + // Build the change + chg := &dns.Change{ + Additions: []*dns.ResourceRecordSet{ + { + Name: fmt.Sprintf("cname.%s.hashicorptest.com.", zoneSuffix), + Type: "CNAME", + Ttl: 300, + Rrdatas: []string{"foo.example.com."}, + }, + { + Name: fmt.Sprintf("a.%s.hashicorptest.com.", zoneSuffix), + Type: "A", + Ttl: 300, + Rrdatas: []string{"1.1.1.1"}, + }, + { + Name: fmt.Sprintf("nested.%s.hashicorptest.com.", zoneSuffix), + Type: "NS", + Ttl: 300, + Rrdatas: []string{"ns.hashicorp.services.", "ns2.hashicorp.services."}, + }, + }, + } + + chg, err := config.NewDnsClient(config.UserAgent).Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error creating DNS RecordSet: %s", err) + } + + w := &tpgdns.DnsChangeWaiter{ + Service: config.NewDnsClient(config.UserAgent), + Change: chg, + Project: project, + ManagedZone: zone, + } + _, err = w.Conf().WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return nil + } +} + +func testAccDNSManagedZone_forceDestroy(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "%s.hashicorptest.com." + labels = { + foo = "bar" + } + force_destroy = true + visibility = "public" +} +`, suffix, suffix) +} + +func testAccDnsManagedZone_basic(suffix, description string, labels map[string]string) string { + labelsRep := "" + for k, v := range labels { + labelsRep += fmt.Sprintf("%s = %q, ", k, v) + } + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + description = "%s" + labels = {%s} + visibility = "public" +} +`, suffix, suffix, description, labelsRep) +} + +func testAccDnsManagedZone_dnssec_on(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + + dnssec_config { + state = "on" + default_key_specs { + algorithm = "rsasha256" + key_length = "2048" + key_type = "zoneSigning" + } + default_key_specs { + algorithm = "rsasha256" + key_length = "2048" + key_type = "keySigning" + } + + non_existence = "nsec" + } +} +`, suffix, suffix) +} + +func testAccDnsManagedZone_dnssec_off(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + + dnssec_config { + state = "off" + default_key_specs { + algorithm = "rsasha256" + key_length = "2048" + key_type = "zoneSigning" + } + default_key_specs { + algorithm = "rsasha256" + key_length = "2048" + key_type = "keySigning" + } + + non_existence = "nsec3" + } +} +`, suffix, suffix) +} + +func testAccDnsManagedZone_dnssec_empty(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + + dnssec_config { + state = "off" + } +} +`, suffix, suffix) +} + +func testAccDnsManagedZone_privateUpdate(suffix, first_network, second_network string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "private" { + name = "private-zone-%s" + dns_name = "private.example.com." + description = "Example private DNS zone" + visibility = "private" + private_visibility_config { + networks { + network_url = google_compute_network.%s.self_link + } + networks { + network_url = google_compute_network.%s.self_link + } + gke_clusters { + gke_cluster_name = google_container_cluster.cluster-1.id + } + } +} + +resource "google_compute_network" "network-1" { + name = "tf-test-net-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network-2" { + name = "tf-test-net-2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network-3" { + name = "tf-test-network-3-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork-1" { + name = google_compute_network.network-1.name + network = google_compute_network.network-1.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster-1" { + name = "tf-test-cluster-1-%s" + location = "us-central1-c" + initial_node_count = 1 + deletion_protection = false + + networking_mode = "VPC_NATIVE" + default_snat_status { + disabled = true + } + network = google_compute_network.network-1.name + subnetwork = google_compute_subnetwork.subnetwork-1.name + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + master_global_access_config { + enabled = true + } + } + master_authorized_networks_config { + } + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.subnetwork-1.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.subnetwork-1.secondary_ip_range[1].range_name + } +} +`, suffix, first_network, second_network, suffix, suffix, suffix, suffix) +} + +func testAccDnsManagedZone_privateForwardingUpdate(suffix, first_nameserver, second_nameserver, first_forwarding_path, second_forwarding_path string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "private" { + name = "private-zone-%s" + dns_name = "private.example.com." + description = "Example private DNS zone" + visibility = "private" + private_visibility_config { + networks { + network_url = google_compute_network.network-1.self_link + } + } + + forwarding_config { + target_name_servers { + ipv4_address = "%s" + forwarding_path = "%s" + } + target_name_servers { + ipv4_address = "%s" + forwarding_path = "%s" + } + } +} + +resource "google_compute_network" "network-1" { + name = "tf-test-net-1-%s" + auto_create_subnetworks = false +} +`, suffix, first_nameserver, first_forwarding_path, second_nameserver, second_forwarding_path, suffix) +} + +func testAccDnsManagedZone_cloudLoggingConfig_basic(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + description = "Example DNS zone" + labels = { + foo = "bar" + } +} +`, suffix, suffix) +} + +func testAccDnsManagedZone_cloudLoggingConfig_update(suffix string, enableCloudLogging bool) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + description = "Example DNS zone" + labels = { + foo = "bar" + } + + cloud_logging_config { + enable_logging = %t + } +} +`, suffix, suffix, enableCloudLogging) +} + +{{ if or (ne $.TargetVersionName ``) (eq $.TargetVersionName `ga`) }} +func testAccDnsManagedZone_reverseLookup(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "reverse" { + name = "reverse-zone-%s" + dns_name = "1.0.168.192.in-addr.arpa." + visibility = "private" + description = "Example private DNS zone" + + reverse_lookup = true +} + +resource "google_compute_network" "network-1" { + name = "tf-test-net-1-%s" + auto_create_subnetworks = false +} +`, suffix, suffix) +} +{{- end }} + +func TestDnsManagedZoneImport_parseImportId(t *testing.T) { + zoneRegexes := []string{ + "projects/(?P[^/]+)/managedZones/(?P[^/]+)", + "(?P[^/]+)/managedZones/(?P[^/]+)", + "(?P[^/]+)", + } + + cases := map[string]struct { + ImportId string + IdRegexes []string + Config *transport_tpg.Config + ExpectedSchemaValues map[string]interface{} + ExpectError bool + }{ + "full self_link": { + IdRegexes: zoneRegexes, + ImportId: "https://dns.googleapis.com/dns/v1/projects/my-project/managedZones/my-zone", + ExpectedSchemaValues: map[string]interface{}{ + "project": "my-project", + "name": "my-zone", + }, + }, + "relative self_link": { + IdRegexes: zoneRegexes, + ImportId: "projects/my-project/managedZones/my-zone", + ExpectedSchemaValues: map[string]interface{}{ + "project": "my-project", + "name": "my-zone", + }, + }, + "short id": { + IdRegexes: zoneRegexes, + ImportId: "my-project/managedZones/my-zone", + ExpectedSchemaValues: map[string]interface{}{ + "project": "my-project", + "name": "my-zone", + }, + }, + "short id with default project and region": { + IdRegexes: zoneRegexes, + ImportId: "my-zone", + Config: &transport_tpg.Config{ + Project: "default-project", + }, + ExpectedSchemaValues: map[string]interface{}{ + "project": "default-project", + "name": "my-zone", + }, + }, + } + + for tn, tc := range cases { + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: make(map[string]interface{}), + } + d.SetId(tc.ImportId) + config := tc.Config + if config == nil { + config = &transport_tpg.Config{} + } + // + if err := tpgresource.ParseImportId(tc.IdRegexes, d, config); err == nil { + for k, expectedValue := range tc.ExpectedSchemaValues { + if v, ok := d.GetOk(k); ok { + if v != expectedValue { + t.Errorf("%s failed; Expected value %q for field %q, got %q", tn, expectedValue, k, v) + } + } else { + t.Errorf("%s failed; Expected a value for field %q", tn, k) + } + } + } else if !tc.ExpectError { + t.Errorf("%s failed; unexpected error: %s", tn, err) + } + } +} + +func TestAccDNSManagedZone_importWithProject(t *testing.T) { + t.Parallel() + + zoneSuffix := acctest.RandString(t, 10) + project := envvar.GetTestProjectFromEnv() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsManagedZone_basicWithProject(zoneSuffix, "description1", project), + }, + { + ResourceName: "google_dns_managed_zone.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDnsManagedZone_basicWithProject(suffix, description, project string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "foobar" { + name = "mzone-test-%s" + dns_name = "tf-acctest-%s.hashicorptest.com." + description = "%s" + project = "%s" +} +`, suffix, suffix, description, project) +} diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_policy_test.go b/mmv1/third_party/terraform/services/dns/go/resource_dns_policy_test.go new file mode 100644 index 000000000000..69bde4cb51d9 --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_policy_test.go @@ -0,0 +1,72 @@ +package dns_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDNSPolicy_update(t *testing.T) { + t.Parallel() + + policySuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsPolicy_privateUpdate(policySuffix, "true", "172.16.1.10", "172.16.1.30", "network-1"), + }, + { + ResourceName: "google_dns_policy.example-policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsPolicy_privateUpdate(policySuffix, "false", "172.16.1.20", "172.16.1.40", "network-2"), + }, + { + ResourceName: "google_dns_policy.example-policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDnsPolicy_privateUpdate(suffix, forwarding, first_nameserver, second_nameserver, network string) string { + return fmt.Sprintf(` +resource "google_dns_policy" "example-policy" { + name = "example-policy-%s" + enable_inbound_forwarding = %s + + alternative_name_server_config { + target_name_servers { + ipv4_address = "%s" + } + target_name_servers { + ipv4_address = "%s" + forwarding_path = "private" + } + } + + networks { + network_url = google_compute_network.%s.self_link + } +} + +resource "google_compute_network" "network-1" { + name = "tf-test-network-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network-2" { + name = "tf-test-network-2-%s" + auto_create_subnetworks = false +} +`, suffix, forwarding, first_nameserver, second_nameserver, network, suffix, suffix) +} diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl new file mode 100644 index 000000000000..d3621e9b8bb3 --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_record_set_test.go.tmpl @@ -0,0 +1,1186 @@ +package dns_test + +import ( + "fmt" + "net" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgdns "github.com/hashicorp/terraform-provider-google/google/services/dns" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "google.golang.org/api/dns/v1" +) + +func TestIpv6AddressDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New []string + ShouldSuppress bool + }{ + "compact form should suppress diff": { + Old: []string{"2a03:b0c0:1:e0::29b:8001"}, + New: []string{"2a03:b0c0:0001:00e0:0000:0000:029b:8001"}, + ShouldSuppress: true, + }, + "different address should not suppress diff": { + Old: []string{"2a03:b0c0:1:e00::29b:8001"}, + New: []string{"2a03:b0c0:0001:00e0:0000:0000:029b:8001"}, + ShouldSuppress: false, + }, + "increase address should not suppress diff": { + Old: []string{""}, + New: []string{"2a03:b0c0:0001:00e0:0000:0000:029b:8001"}, + ShouldSuppress: false, + }, + "decrease address should not suppress diff": { + Old: []string{"2a03:b0c0:1:e00::29b:8001"}, + New: []string{""}, + ShouldSuppress: false, + }, + "switch address positions should suppress diff": { + Old: []string{"2a03:b0c0:1:e00::28b:8001", "2a03:b0c0:1:e0::29b:8001"}, + New: []string{"2a03:b0c0:1:e0::29b:8001", "2a03:b0c0:1:e00::28b:8001"}, + ShouldSuppress: true, + }, + } + + parseFunc := func(x string) string { + return net.ParseIP(x).String() + } + + for tn, tc := range cases { + shouldSuppress := tpgdns.RrdatasListDiffSuppress(tc.Old, tc.New, parseFunc, nil) + if shouldSuppress != tc.ShouldSuppress { + t.Errorf("%s: expected %t", tn, tc.ShouldSuppress) + } + } +} + +func TestAccDNSRecordSet_basic(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/test-record.%s.hashicorptest.com./A", zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + // Check both import formats + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_Update(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_changeType(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_bigChange(zoneName, 600), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./CNAME", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_nestedNS(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-ns-%s", acctest.RandString(t, 10)) + recordSetName := fmt.Sprintf("\"nested.%s.hashicorptest.com.\"", zoneName) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_NS(zoneName, recordSetName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/nested.%s.hashicorptest.com./NS", zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_secondaryNS(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-ns-%s", acctest.RandString(t, 10)) + recordSetName := "google_dns_managed_zone.parent-zone.dns_name" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_NS(zoneName, recordSetName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s.hashicorptest.com./NS", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// tracks fix for https://github.com/hashicorp/terraform-provider-google/issues/12827 +func TestAccDNSRecordSet_deletionSOA(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-soa-%s", acctest.RandString(t, 10)) + recordSetName := "google_dns_managed_zone.parent-zone.dns_name" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_SOA(zoneName, recordSetName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("projects/%s/managedZones/%s/rrsets/%s.hashicorptest.com./SOA", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_quotedTXT(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-txt-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_quotedTXT(zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/test-record.%s.hashicorptest.com./TXT", zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_uppercaseMX(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-txt-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_uppercaseMX(zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./MX", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_routingPolicy(t *testing.T) { + t.Parallel() + + networkName := fmt.Sprintf("tf-test-network-%s", acctest.RandString(t, 10)) + backendSubnetName := fmt.Sprintf("tf-test-backend-subnet-%s", acctest.RandString(t, 10)) + proxySubnetName := fmt.Sprintf("tf-test-proxy-subnet-%s", acctest.RandString(t, 10)) + httpHealthCheckName := fmt.Sprintf("tf-test-http-health-check-%s", acctest.RandString(t, 10)) + backendName := fmt.Sprintf("tf-test-backend-%s", acctest.RandString(t, 10)) + urlMapName := fmt.Sprintf("tf-test-url-map-%s", acctest.RandString(t, 10)) + httpProxyName := fmt.Sprintf("tf-test-http-proxy-%s", acctest.RandString(t, 10)) + forwardingRuleName := fmt.Sprintf("tf-test-forwarding-rule-%s", acctest.RandString(t, 10)) + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_routingPolicyWRR(networkName, backendName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_routingPolicyGEO(networkName, backendName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_routingPolicyPrimaryBackup(networkName, backendName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_routingPolicyRegionalL7PrimaryBackup(networkName, proxySubnetName, httpHealthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_routingPolicyCrossRegionL7PrimaryBackup(networkName, backendSubnetName, proxySubnetName, httpHealthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_changeRouting(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_routingPolicy(zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +// Tracks fix for https://github.com/hashicorp/terraform-provider-google/issues/12043 +func TestAccDNSRecordSet_interpolated(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_interpolated(zoneName), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/test-record.%s.hashicorptest.com./TXT", zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_readOutOfBandRoutingPolicyChange(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + rrsetName := fmt.Sprintf("test-record.%s.hashicorptest.com.", zoneName) + ttl := 300 + rrdata := []string{"127.0.0.1", "127.0.0.10"} + routingPolicy := &dns.RRSetRoutingPolicy{ + Wrr: &dns.RRSetRoutingPolicyWrrPolicy{ + Items: []*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Weight: 0, + Rrdatas: []string{"1.2.3.4", "4.3.2.1"}, + }, + { + Weight: 0, + Rrdatas: []string{"2.3.4.5", "5.4.3.2"}, + }, + }, + }, + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + PreConfig: testAccCheckDnsRecordSetSetRoutingPolicyOutOfBand(t, zoneName, rrsetName, ttl, rrdata, routingPolicy), + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSRecordSet_readOutOfBandRrDataChange(t *testing.T) { + t.Parallel() + + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(t, 10)) + rrsetName := fmt.Sprintf("test-record.%s.hashicorptest.com.", zoneName) + ttl := 300 + rrdata := []string{"127.0.0.1", "127.0.0.10"} + routingPolicy := &dns.RRSetRoutingPolicy{ + Wrr: &dns.RRSetRoutingPolicyWrrPolicy{ + Items: []*dns.RRSetRoutingPolicyWrrPolicyWrrPolicyItem{ + { + Weight: 0, + Rrdatas: []string{"1.2.3.4", "4.3.2.1"}, + }, + { + Weight: 0, + Rrdatas: []string{"2.3.4.5", "5.4.3.2"}, + }, + }, + }, + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDnsRecordSetDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsRecordSet_routingPolicy(zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + { + PreConfig: testAccCheckDnsRecordSetSetRrdataOutOfBand(t, zoneName, rrsetName, ttl, rrdata, routingPolicy), + Config: testAccDnsRecordSet_routingPolicy(zoneName, 300), + }, + { + ResourceName: "google_dns_record_set.foobar", + ImportStateId: fmt.Sprintf("%s/%s/test-record.%s.hashicorptest.com./A", envvar.GetTestProjectFromEnv(), zoneName, zoneName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckDnsRecordSetSetRoutingPolicyOutOfBand(t *testing.T, zoneName, rrsetName string, ttl int, rrdata []string, routingPolicy *dns.RRSetRoutingPolicy) func() { + return func() { + config := acctest.GoogleProviderConfig(t) + service := config.NewDnsClient(config.UserAgent).Changes + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + { + Name: rrsetName, + Type: "A", + Ttl: int64(ttl), + Rrdatas: rrdata, + }, + }, + Additions: []*dns.ResourceRecordSet{ + { + Name: rrsetName, + Type: "A", + Ttl: int64(ttl), + RoutingPolicy: routingPolicy, + }, + }, + } + chg, err := service.Create(config.Project, zoneName, chg).Do() + if err != nil { + t.Errorf("Error while changing rrset %s/%s/%s out of band: %s", config.Project, zoneName, rrsetName, err) + return + } + w := &tpgdns.DnsChangeWaiter{ + Service: config.NewDnsClient(config.UserAgent), + Change: chg, + Project: config.Project, + ManagedZone: zoneName, + } + if _, err = w.Conf().WaitForState(); err != nil { + t.Errorf("Error waiting for out of band Google DNS change: %s", err) + } + } +} + +func testAccCheckDnsRecordSetSetRrdataOutOfBand(t *testing.T, zoneName, rrsetName string, ttl int, rrdata []string, routingPolicy *dns.RRSetRoutingPolicy) func() { + return func() { + config := acctest.GoogleProviderConfig(t) + service := config.NewDnsClient(config.UserAgent).Changes + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + { + Name: rrsetName, + Type: "A", + Ttl: int64(ttl), + RoutingPolicy: routingPolicy, + }, + }, + Additions: []*dns.ResourceRecordSet{ + { + Name: rrsetName, + Type: "A", + Ttl: int64(ttl), + Rrdatas: rrdata, + }, + }, + } + chg, err := service.Create(config.Project, zoneName, chg).Do() + if err != nil { + t.Errorf("Error while changing rrset %s/%s/%s out of band: %s", config.Project, zoneName, rrsetName, err) + return + } + w := &tpgdns.DnsChangeWaiter{ + Service: config.NewDnsClient(config.UserAgent), + Change: chg, + Project: config.Project, + ManagedZone: zoneName, + } + if _, err = w.Conf().WaitForState(); err != nil { + t.Errorf("Error waiting for out of band Google DNS change: %s", err) + } + } +} + +func testAccCheckDnsRecordSetDestroyProducer(t *testing.T) func(s *terraform.State) error { + + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_dns_record_set" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}DNSBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/managedZones/{{"{{"}}managed_zone{{"}}"}}/rrsets/{{"{{"}}name{{"}}"}}/{{"{{"}}type{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("DNSResourceDnsRecordSet still exists at %s", url) + } + } + + return nil + } +} + +func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + rrdatas = ["127.0.0.1", "%s"] + ttl = %d +} +`, zoneName, zoneName, zoneName, addr2, ttl) +} + +func testAccDnsRecordSet_routingPolicy(zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + routing_policy { + wrr { + weight = 0 + rrdatas = ["1.2.3.4", "4.3.2.1"] + } + + wrr { + weight = 0 + rrdatas = ["2.3.4.5", "5.4.3.2"] + } + } +} +`, zoneName, zoneName, zoneName, ttl) +} + +func testAccDnsRecordSet_NS(name string, recordSetName string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = %s + type = "NS" + rrdatas = ["ns.hashicorp.services.", "ns2.hashicorp.services."] + ttl = %d +} +`, name, name, recordSetName, ttl) +} + +func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "CNAME" + rrdatas = ["www.terraform.io."] + ttl = %d +} +`, zoneName, zoneName, zoneName, ttl) +} + + +func testAccDnsRecordSet_SOA(name string, recordSetName string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = %s + type = "SOA" + rrdatas = ["ns-cloud-a1.googledomains.com. cloud-dns-hostmaster.google.com. 629010464 900 900 1800 60"] + ttl = %d +} +`, name, name, recordSetName, ttl) +} + +func testAccDnsRecordSet_quotedTXT(name string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "TXT" + rrdatas = ["test", "\"quoted test\""] + ttl = %d +} +`, name, name, name, ttl) +} + +func testAccDnsRecordSet_uppercaseMX(name string, ttl int) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "MX" + rrdatas = [ + "1 ASPMX.L.GOOGLE.COM.", + "5 ALT1.ASPMX.L.GOOGLE.COM.", + "5 ALT2.ASPMX.L.GOOGLE.COM.", + "10 ASPMX2.GOOGLEMAIL.COM.", + "10 ASPMX3.GOOGLEMAIL.COM.", + ] + ttl = %d +} +`, name, name, name, ttl) +} + +func testAccDnsRecordSet_routingPolicyWRR(networkName, backendName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_region_backend_service" "backend" { + name = "%s" + region = "us-central1" +} + +resource "google_compute_forwarding_rule" "default" { + name = "%s" + region = "us-central1" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.backend.id + all_ports = true + allow_global_access = true + network = google_compute_network.default.name +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + wrr { + weight = 0 + rrdatas = ["1.2.3.4", "4.3.2.1"] + } + + wrr { + weight = 0 + rrdatas = ["2.3.4.5", "5.4.3.2"] + } + + wrr { + weight = 1.0 + + health_checked_targets { + internal_load_balancers { + load_balancer_type = "regionalL4ilb" + ip_address = google_compute_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.default.project + region = google_compute_forwarding_rule.default.region + } + } + } + } +} +`, networkName, backendName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) +} + +func testAccDnsRecordSet_routingPolicyGEO(networkName, backendName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_region_backend_service" "backend" { + name = "%s" + region = "us-central1" +} + +resource "google_compute_forwarding_rule" "default" { + name = "%s" + region = "us-central1" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.backend.id + all_ports = true + allow_global_access = true + network = google_compute_network.default.name +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + enable_geo_fencing = true + + geo { + location = "us-east4" + rrdatas = ["1.2.3.4", "4.3.2.1"] + } + + geo { + location = "asia-east1" + rrdatas = ["2.3.4.5", "5.4.3.2"] + } + + geo { + location = "us-central1" + + health_checked_targets { + internal_load_balancers { + load_balancer_type = "regionalL4ilb" + ip_address = google_compute_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.default.project + region = google_compute_forwarding_rule.default.region + } + } + } + } +} +`, networkName, backendName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) +} + +func testAccDnsRecordSet_routingPolicyPrimaryBackup(networkName, backendName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_region_backend_service" "backend" { + name = "%s" + region = "us-central1" +} + +resource "google_compute_forwarding_rule" "default" { + name = "%s" + region = "us-central1" + + load_balancing_scheme = "INTERNAL" + backend_service = google_compute_region_backend_service.backend.id + all_ports = true + allow_global_access = true + network = google_compute_network.default.name +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + primary_backup { + trickle_ratio = 0.1 + enable_geo_fencing_for_backups = true + + primary { + internal_load_balancers { + load_balancer_type = "regionalL4ilb" + ip_address = google_compute_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.default.project + region = google_compute_forwarding_rule.default.region + } + } + + backup_geo { + location = "us-west1" + rrdatas = ["1.2.3.4"] + } + + backup_geo { + location = "asia-east1" + rrdatas = ["5.6.7.8"] + } + } + } +} +`, networkName, backendName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) +} + +func testAccDnsRecordSet_routingPolicyRegionalL7PrimaryBackup(networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_subnetwork" "proxy_subnet" { + name = "%s" + ip_cidr_range = "10.100.0.0/24" + region = "us-central1" + purpose = "INTERNAL_HTTPS_LOAD_BALANCER" + role = "ACTIVE" + network = google_compute_network.default.id +} + +resource "google_compute_region_health_check" "health_check" { + name = "%s" + region = "us-central1" + + http_health_check { + port = 80 + } +} + +resource "google_compute_region_backend_service" "backend" { + name = "%s" + region = "us-central1" + load_balancing_scheme = "INTERNAL_MANAGED" + protocol = "HTTP" + health_checks = [google_compute_region_health_check.health_check.id] +} + +resource "google_compute_region_url_map" "url_map" { + name = "%s" + region = "us-central1" + default_service = google_compute_region_backend_service.backend.id +} + +resource "google_compute_region_target_http_proxy" "http_proxy" { + name = "%s" + region = "us-central1" + url_map = google_compute_region_url_map.url_map.id +} + +resource "google_compute_forwarding_rule" "default" { + name = "%s" + region = "us-central1" + depends_on = [google_compute_subnetwork.proxy_subnet] + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_region_target_http_proxy.http_proxy.id + port_range = "80" + allow_global_access = true + network = google_compute_network.default.name + ip_protocol = "TCP" +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + primary_backup { + trickle_ratio = 0.1 + enable_geo_fencing_for_backups = true + + primary { + internal_load_balancers { + load_balancer_type = "regionalL7ilb" + ip_address = google_compute_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_forwarding_rule.default.project + region = google_compute_forwarding_rule.default.region + } + } + + backup_geo { + location = "us-west1" + rrdatas = ["1.2.3.4"] + } + + backup_geo { + location = "asia-east1" + rrdatas = ["5.6.7.8"] + } + } + } +} +`, networkName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) +} + +func testAccDnsRecordSet_routingPolicyCrossRegionL7PrimaryBackup(networkName, backendSubnetName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName string, ttl int) string { + return fmt.Sprintf(` +resource "google_compute_network" "default" { + name = "%s" +} + +resource "google_compute_subnetwork" "backend_subnet" { + name = "%s" + ip_cidr_range = "10.0.1.0/24" + region = "us-central1" + network = google_compute_network.default.id +} + +resource "google_compute_subnetwork" "proxy_subnet" { + name = "%s" + ip_cidr_range = "10.100.0.0/24" + region = "us-central1" + purpose = "GLOBAL_MANAGED_PROXY" + role = "ACTIVE" + network = google_compute_network.default.id +} + +resource "google_compute_health_check" "health_check" { + name = "%s" + + http_health_check { + port = 80 + } +} + +resource "google_compute_backend_service" "backend" { + name = "%s" + load_balancing_scheme = "INTERNAL_MANAGED" + protocol = "HTTP" + health_checks = [google_compute_health_check.health_check.id] +} + +resource "google_compute_url_map" "url_map" { + name = "%s" + default_service = google_compute_backend_service.backend.id +} + +resource "google_compute_target_http_proxy" "http_proxy" { + name = "%s" + url_map = google_compute_url_map.url_map.id +} + +resource "google_compute_global_forwarding_rule" "default" { + name = "%s" + depends_on = [google_compute_subnetwork.proxy_subnet] + load_balancing_scheme = "INTERNAL_MANAGED" + target = google_compute_target_http_proxy.http_proxy.id + port_range = "80" + network = google_compute_network.default.name + subnetwork = google_compute_subnetwork.backend_subnet.name + ip_protocol = "TCP" +} + +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" + visibility = "private" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "A" + ttl = %d + + routing_policy { + primary_backup { + trickle_ratio = 0.1 + enable_geo_fencing_for_backups = true + + primary { + internal_load_balancers { + load_balancer_type = "globalL7ilb" + ip_address = google_compute_global_forwarding_rule.default.ip_address + port = "80" + ip_protocol = "tcp" + network_url = google_compute_network.default.id + project = google_compute_global_forwarding_rule.default.project + } + } + + backup_geo { + location = "us-west1" + rrdatas = ["1.2.3.4"] + } + + backup_geo { + location = "asia-east1" + rrdatas = ["5.6.7.8"] + } + } + } +} +`, networkName, backendSubnetName, proxySubnetName, healthCheckName, backendName, urlMapName, httpProxyName, forwardingRuleName, zoneName, zoneName, zoneName, ttl) +} + +func testAccDnsRecordSet_interpolated(zoneName string) string { + return fmt.Sprintf(` +resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "%s.hashicorptest.com." + description = "Test Description" +} + +resource "google_dns_record_set" "foobar" { + managed_zone = google_dns_managed_zone.parent-zone.name + name = "test-record.%s.hashicorptest.com." + type = "TXT" + rrdatas = ["127.0.0.1", "firebase=${google_dns_managed_zone.parent-zone.id}"] + ttl = 10 +} +`, zoneName, zoneName, zoneName) +} diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_rule_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_rule_test.go.tmpl new file mode 100644 index 000000000000..b7d29eaebc6c --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_rule_test.go.tmpl @@ -0,0 +1,197 @@ +package dns_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDNSResponsePolicyRule_update(t *testing.T) { + t.Parallel() + + responsePolicyRuleSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckDNSResponsePolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsResponsePolicyRule_privateUpdate(responsePolicyRuleSuffix, "network-1"), + }, + { + ResourceName: "google_dns_response_policy_rule.example-response-policy-rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsResponsePolicyRule_privateUpdate(responsePolicyRuleSuffix, "network-2"), + }, + { + ResourceName: "google_dns_response_policy_rule.example-response-policy-rule", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccDNSResponsePolicyRuleBehavior_update(t *testing.T) { + t.Parallel() + + responsePolicyRuleSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckDNSResponsePolicyRuleDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsResponsePolicyRuleBehavior_unspecified(responsePolicyRuleSuffix, "network-1"), + }, + { + ResourceName: "google_dns_response_policy_rule.example-response-policy-rule-behavior", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsResponsePolicyRuleBehavior_byPass(responsePolicyRuleSuffix, "network-1"), + }, + { + ResourceName: "google_dns_response_policy_rule.example-response-policy-rule-behavior", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsResponsePolicyRuleBehavior_unspecified(responsePolicyRuleSuffix, "network-1"), + }, + { + ResourceName: "google_dns_response_policy_rule.example-response-policy-rule-behavior", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDnsResponsePolicyRuleBehavior_unspecified(suffix, network string) string { + return fmt.Sprintf(` + +resource "google_compute_network" "network-1" { + provider = google-beta + + name = "tf-test-network-1-%s" + auto_create_subnetworks = false +} + +resource "google_dns_response_policy" "response-policy" { + provider = google-beta + + response_policy_name = "tf-test-response-policy-%s" + + networks { + network_url = google_compute_network.%s.self_link + } +} + +resource "google_dns_response_policy_rule" "example-response-policy-rule-behavior" { + provider = google-beta + + response_policy = google_dns_response_policy.response-policy.response_policy_name + rule_name = "tf-test-response-policy-rule-%s" + dns_name = "dns.example.com." + + local_data { + local_datas { + name = "dns.example.com." + type = "A" + ttl = 300 + rrdatas = ["192.0.2.91"] + } + } +} + +`, suffix, suffix, network, suffix) +} + +func testAccDnsResponsePolicyRuleBehavior_byPass(suffix, network string) string { + return fmt.Sprintf(` + +resource "google_compute_network" "network-1" { + provider = google-beta + + name = "tf-test-network-1-%s" + auto_create_subnetworks = false +} + +resource "google_dns_response_policy" "response-policy" { + provider = google-beta + + response_policy_name = "tf-test-response-policy-%s" + + networks { + network_url = google_compute_network.%s.self_link + } +} + +resource "google_dns_response_policy_rule" "example-response-policy-rule-behavior" { + provider = google-beta + + behavior = "bypassResponsePolicy" + dns_name = "dns.example.com." + rule_name = "tf-test-response-policy-rule-%s" + response_policy = google_dns_response_policy.response-policy.response_policy_name + +} +`, suffix, suffix, network, suffix) +} + + +func testAccDnsResponsePolicyRule_privateUpdate(suffix, network string) string { + return fmt.Sprintf(` +resource "google_compute_network" "network-1" { + provider = google-beta + + name = "tf-test-network-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network-2" { + provider = google-beta + + name = "tf-test-network-2-%s" + auto_create_subnetworks = false +} + +resource "google_dns_response_policy" "response-policy" { + provider = google-beta + + response_policy_name = "tf-test-response-policy-%s" + + networks { + network_url = google_compute_network.%s.self_link + } +} + +resource "google_dns_response_policy_rule" "example-response-policy-rule" { + provider = google-beta + + response_policy = google_dns_response_policy.response-policy.response_policy_name + rule_name = "tf-test-response-policy-rule-%s" + dns_name = "dns.example.com." + + local_data { + local_datas { + name = "dns.example.com." + type = "A" + ttl = 300 + rrdatas = ["192.0.2.91"] + } + } +} +`, suffix, suffix, suffix, network, suffix) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_test.go.tmpl b/mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_test.go.tmpl new file mode 100644 index 000000000000..26959c441a7f --- /dev/null +++ b/mmv1/third_party/terraform/services/dns/go/resource_dns_response_policy_test.go.tmpl @@ -0,0 +1,210 @@ +package dns_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDNSResponsePolicy_update(t *testing.T) { + t.Parallel() + + responsePolicySuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckDNSResponsePolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDnsResponsePolicy_privateUpdate(responsePolicySuffix, "network-1"), + }, + { + ResourceName: "google_dns_response_policy.example-response-policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsResponsePolicy_privateUpdate(responsePolicySuffix, "network-2"), + }, + { + ResourceName: "google_dns_response_policy.example-response-policy", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccDnsResponsePolicy_removeNetworks(responsePolicySuffix), + }, + { + ResourceName: "google_dns_response_policy.example-response-policy", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccDnsResponsePolicy_privateUpdate(suffix, network string) string { + return fmt.Sprintf(` +resource "google_dns_response_policy" "example-response-policy" { + provider = google-beta + + response_policy_name = "tf-test-response-policy-%s" + + networks { + network_url = google_compute_network.%s.self_link + } + gke_clusters { + gke_cluster_name = google_container_cluster.cluster-1.id + } +} + +resource "google_compute_network" "network-1" { + provider = google-beta + + name = "tf-test-network-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network-2" { + provider = google-beta + + name = "tf-test-network-2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork-1" { + provider = google-beta + + name = google_compute_network.network-1.name + network = google_compute_network.network-1.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster-1" { + provider = google-beta + + name = "tf-test-cluster-1-%s" + location = "us-central1-c" + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + default_snat_status { + disabled = true + } + network = google_compute_network.network-1.name + subnetwork = google_compute_subnetwork.subnetwork-1.name + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + master_global_access_config { + enabled = true + } + } + master_authorized_networks_config { + } + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.subnetwork-1.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.subnetwork-1.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, suffix, network, suffix, suffix, suffix) +} + +func testAccDnsResponsePolicy_removeNetworks(suffix string) string { + return fmt.Sprintf(` +resource "google_dns_response_policy" "example-response-policy" { + provider = google-beta + + response_policy_name = "tf-test-response-policy-%s" + + gke_clusters { + gke_cluster_name = google_container_cluster.cluster-1.id + } +} + +resource "google_compute_network" "network-1" { + provider = google-beta + + name = "tf-test-network-1-%s" + auto_create_subnetworks = false +} + +resource "google_compute_network" "network-2" { + provider = google-beta + + name = "tf-test-network-2-%s" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "subnetwork-1" { + provider = google-beta + + name = google_compute_network.network-1.name + network = google_compute_network.network-1.name + ip_cidr_range = "10.0.36.0/24" + region = "us-central1" + private_ip_google_access = true + + secondary_ip_range { + range_name = "pod" + ip_cidr_range = "10.0.0.0/19" + } + + secondary_ip_range { + range_name = "svc" + ip_cidr_range = "10.0.32.0/22" + } +} + +resource "google_container_cluster" "cluster-1" { + provider = google-beta + + name = "tf-test-cluster-1-%s" + location = "us-central1-c" + initial_node_count = 1 + + networking_mode = "VPC_NATIVE" + default_snat_status { + disabled = true + } + network = google_compute_network.network-1.name + subnetwork = google_compute_subnetwork.subnetwork-1.name + + private_cluster_config { + enable_private_endpoint = true + enable_private_nodes = true + master_ipv4_cidr_block = "10.42.0.0/28" + master_global_access_config { + enabled = true + } + } + master_authorized_networks_config { + } + ip_allocation_policy { + cluster_secondary_range_name = google_compute_subnetwork.subnetwork-1.secondary_ip_range[0].range_name + services_secondary_range_name = google_compute_subnetwork.subnetwork-1.secondary_ip_range[1].range_name + } + deletion_protection = false +} +`, suffix, suffix, suffix, suffix) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_channel_test.go.tmpl b/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_channel_test.go.tmpl new file mode 100644 index 000000000000..4c518290aebc --- /dev/null +++ b/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_channel_test.go.tmpl @@ -0,0 +1,207 @@ +package eventarc_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc{{ $.DCLVersion }}" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccEventarcChannel_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "region": envvar.GetTestRegionFromEnv(), + "project_name": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckEventarcChannelDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcChannel_basic(context), + }, + { + ResourceName: "google_eventarc_channel.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEventarcChannel_cryptoKeyUpdate(t *testing.T) { + t.Parallel() + + region := envvar.GetTestRegionFromEnv() + key1 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", region, "tf-bootstrap-eventarc-channel-key1") + key2 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", region, "tf-bootstrap-eventarc-channel-key2") + + context := map[string]interface{}{ + "region": region, + "project_name": envvar.GetTestProjectFromEnv(), + "key_ring": tpgresource.GetResourceNameFromSelfLink(key1.KeyRing.Name), + "key1": tpgresource.GetResourceNameFromSelfLink(key1.CryptoKey.Name), + "key2": tpgresource.GetResourceNameFromSelfLink(key2.CryptoKey.Name), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckEventarcChannelDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcChannel_setCryptoKey(context), + }, + { + ResourceName: "google_eventarc_channel.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEventarcChannel_cryptoKeyUpdate(context), + }, + { + ResourceName: "google_eventarc_channel.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccEventarcChannel_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +resource "google_eventarc_channel" "primary" { + location = "%{region}" + name = "tf-test-name%{random_suffix}" + third_party_provider = "projects/${data.google_project.test_project.project_id}/locations/%{region}/providers/datadog" +} +`, context) +} + +func testAccEventarcChannel_setCryptoKey(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +data "google_kms_key_ring" "test_key_ring" { + name = "%{key_ring}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key1" { + name = "%{key1}" + key_ring = data.google_kms_key_ring.test_key_ring.id +} + + +resource "google_kms_crypto_key_iam_member" "key1_member" { + crypto_key_id = data.google_kms_crypto_key.key1.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.test_project.number}@gcp-sa-eventarc.iam.gserviceaccount.com" +} + +resource "google_eventarc_channel" "primary" { + location = "%{region}" + name = "tf-test-name%{random_suffix}" + crypto_key_name = data.google_kms_crypto_key.key1.id + third_party_provider = "projects/${data.google_project.test_project.project_id}/locations/%{region}/providers/datadog" + depends_on = [google_kms_crypto_key_iam_member.key1_member] +} +`, context) +} + +func testAccEventarcChannel_cryptoKeyUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +data "google_kms_key_ring" "test_key_ring" { + name = "%{key_ring}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key2" { + name = "%{key2}" + key_ring = data.google_kms_key_ring.test_key_ring.id +} + +resource "google_kms_crypto_key_iam_member" "key2_member" { + crypto_key_id = data.google_kms_crypto_key.key2.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.test_project.number}@gcp-sa-eventarc.iam.gserviceaccount.com" +} + +resource "google_eventarc_channel" "primary" { + location = "%{region}" + name = "tf-test-name%{random_suffix}" + crypto_key_name= data.google_kms_crypto_key.key2.id + third_party_provider = "projects/${data.google_project.test_project.project_id}/locations/%{region}/providers/datadog" + depends_on = [google_kms_crypto_key_iam_member.key2_member] +} +`, context) +} + +func testAccCheckEventarcChannelDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_eventarc_channel" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &eventarc.Channel{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + CryptoKeyName: dcl.String(rs.Primary.Attributes["crypto_key_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + ThirdPartyProvider: dcl.String(rs.Primary.Attributes["third_party_provider"]), + ActivationToken: dcl.StringOrNil(rs.Primary.Attributes["activation_token"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + PubsubTopic: dcl.StringOrNil(rs.Primary.Attributes["pubsub_topic"]), + State: eventarc.ChannelStateEnumRef(rs.Primary.Attributes["state"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLEventarcClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetChannel(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_eventarc_channel still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_google_channel_config_test.go.tmpl b/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_google_channel_config_test.go.tmpl new file mode 100644 index 000000000000..dbe556619332 --- /dev/null +++ b/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_google_channel_config_test.go.tmpl @@ -0,0 +1,206 @@ +package eventarc_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc{{ $.DCLVersion }}" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccEventarcGoogleChannelConfig_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckEventarcGoogleChannelConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcGoogleChannelConfig_basic(context), + }, + { + ResourceName: "google_eventarc_google_channel_config.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEventarcGoogleChannelConfig_cryptoKeyUpdate(t *testing.T) { + t.Parallel() + + region := envvar.GetTestRegionFromEnv() + key1 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", region, "tf-bootstrap-eventarc-google-channel-config-key1") + key2 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", region, "tf-bootstrap-eventarc-google-channel-config-key2") + + context := map[string]interface{}{ + "project_name": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "key_ring": tpgresource.GetResourceNameFromSelfLink(key1.KeyRing.Name), + "key1": tpgresource.GetResourceNameFromSelfLink(key1.CryptoKey.Name), + "key2": tpgresource.GetResourceNameFromSelfLink(key2.CryptoKey.Name), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckEventarcGoogleChannelConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcGoogleChannelConfig_setCryptoKey(context), + }, + { + ResourceName: "google_eventarc_google_channel_config.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEventarcGoogleChannelConfig_cryptoKeyUpdate(context), + }, + { + ResourceName: "google_eventarc_google_channel_config.primary", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccEventarcGoogleChannelConfig_deleteCryptoKey(context), + }, + }, + }) +} + +func testAccEventarcGoogleChannelConfig_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_eventarc_google_channel_config" "primary" { + location = "%{region}" + name = "projects/%{project_name}/locations/%{region}/googleChannelConfig" +} + `, context) +} + +func testAccEventarcGoogleChannelConfig_setCryptoKey(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +data "google_kms_key_ring" "test_key_ring" { + name = "%{key_ring}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key1" { + name = "%{key1}" + key_ring = data.google_kms_key_ring.test_key_ring.id +} + +resource "google_kms_crypto_key_iam_member" "key1_member" { + crypto_key_id = data.google_kms_crypto_key.key1.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.test_project.number}@gcp-sa-eventarc.iam.gserviceaccount.com" +} + +resource "google_eventarc_google_channel_config" "primary" { + location = "%{region}" + name = "projects/%{project_name}/locations/%{region}/googleChannelConfig" + crypto_key_name = data.google_kms_crypto_key.key1.id + depends_on =[google_kms_crypto_key_iam_member.key1_member] +} + `, context) +} + +func testAccEventarcGoogleChannelConfig_cryptoKeyUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +data "google_kms_key_ring" "test_key_ring" { + name = "%{key_ring}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key2" { + name = "%{key2}" + key_ring = data.google_kms_key_ring.test_key_ring.id +} + +resource "google_kms_crypto_key_iam_member" "key2_member" { + crypto_key_id = data.google_kms_crypto_key.key2.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.test_project.number}@gcp-sa-eventarc.iam.gserviceaccount.com" +} + +resource "google_eventarc_google_channel_config" "primary" { + location = "%{region}" + name = "projects/%{project_name}/locations/%{region}/googleChannelConfig" + crypto_key_name = data.google_kms_crypto_key.key2.id + depends_on =[google_kms_crypto_key_iam_member.key2_member] +} + `, context) +} + +func testAccEventarcGoogleChannelConfig_deleteCryptoKey(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_eventarc_google_channel_config" "primary" { + location = "%{region}" + name = "projects/%{project_name}/locations/%{region}/googleChannelConfig" + crypto_key_name = "" +} + `, context) +} + +func testAccCheckEventarcGoogleChannelConfigDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_eventarc_google_channel_config" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &eventarc.GoogleChannelConfig{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + CryptoKeyName: dcl.String(rs.Primary.Attributes["crypto_key_name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + } + + client := transport_tpg.NewDCLEventarcClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetGoogleChannelConfig(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_eventarc_google_channel_config still exists %v", obj) + } + } + return nil + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_trigger_test.go.tmpl b/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_trigger_test.go.tmpl new file mode 100644 index 000000000000..7d358bd5739f --- /dev/null +++ b/mmv1/third_party/terraform/services/eventarc/go/resource_eventarc_trigger_test.go.tmpl @@ -0,0 +1,239 @@ +package eventarc_test + +import ( + "context" + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + eventarc "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/eventarc{{ $.DCLVersion }}" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + + +func TestAccEventarcTrigger_channel(t *testing.T) { + t.Parallel() + + region := envvar.GetTestRegionFromEnv() + key1 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", region, "tf-bootstrap-eventarc-trigger-key1") + key2 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", region, "tf-bootstrap-eventarc-trigger-key2") + + context := map[string]interface{}{ + "region": region, + "project_name": envvar.GetTestProjectFromEnv(), + "service_account": envvar.GetTestServiceAccountFromEnv(t), + "key_ring": tpgresource.GetResourceNameFromSelfLink(key1.KeyRing.Name), + "key1": tpgresource.GetResourceNameFromSelfLink(key1.CryptoKey.Name), + "key2": tpgresource.GetResourceNameFromSelfLink(key2.CryptoKey.Name), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckEventarcChannelTriggerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcTrigger_createTriggerWithChannelName(context), + }, + { + ResourceName: "google_eventarc_trigger.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccEventarcTrigger_HttpDest(t *testing.T) { + t.Parallel() + + region := envvar.GetTestRegionFromEnv() + + testNetworkName := acctest.BootstrapSharedTestNetwork(t, "attachment-network") + subnetName := acctest.BootstrapSubnet(t, "tf-test-subnet", testNetworkName) + networkAttachmentName := acctest.BootstrapNetworkAttachment(t, "tf-test-attachment", subnetName) + + // Need to have the full network attachment name in the format project/{project_id}/regions/{region_id}/networkAttachments/{networkAttachmentName} + fullFormNetworkAttachmentName := fmt.Sprintf("projects/%s/regions/%s/networkAttachments/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), networkAttachmentName) + + context := map[string]interface{}{ + "region": region, + "project_name": envvar.GetTestProjectFromEnv(), + "service_account": envvar.GetTestServiceAccountFromEnv(t), + "network_attachment": fullFormNetworkAttachmentName, + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckEventarcChannelTriggerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccEventarcTrigger_createTriggerWithHttpDest(context), + }, + { + ResourceName: "google_eventarc_trigger.primary", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccEventarcTrigger_createTriggerWithChannelName(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +data "google_kms_key_ring" "test_key_ring" { + name = "%{key_ring}" + location = "us-central1" +} + +data "google_kms_crypto_key" "key1" { + name = "%{key1}" + key_ring = data.google_kms_key_ring.test_key_ring.id +} + + +resource "google_kms_crypto_key_iam_member" "key1_member" { + crypto_key_id = data.google_kms_crypto_key.key1.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:service-${data.google_project.test_project.number}@gcp-sa-eventarc.iam.gserviceaccount.com" +} + +resource "google_eventarc_channel" "test_channel" { + location = "%{region}" + name = "tf-test-channel%{random_suffix}" + crypto_key_name = data.google_kms_crypto_key.key1.id + third_party_provider = "projects/${data.google_project.test_project.project_id}/locations/%{region}/providers/datadog" + depends_on = [google_kms_crypto_key_iam_member.key1_member] +} + +resource "google_cloud_run_service" "default" { + name = "tf-test-eventarc-service%{random_suffix}" + location = "%{region}" + + metadata { + namespace = "%{project_name}" + } + + template { + spec { + containers { + image = "gcr.io/cloudrun/hello" + ports { + container_port = 8080 + } + } + container_concurrency = 50 + timeout_seconds = 100 + } + } + + traffic { + percent = 100 + latest_revision = true + } +} + +resource "google_eventarc_trigger" "primary" { + name = "tf-test-trigger%{random_suffix}" + location = "%{region}" + matching_criteria { + attribute = "type" + value = "datadog.v1.alert" + } + destination { + cloud_run_service { + service = google_cloud_run_service.default.name + region = "%{region}" + } + } + service_account = "%{service_account}" + + channel = "projects/${data.google_project.test_project.project_id}/locations/%{region}/channels/${google_eventarc_channel.test_channel.name}" + + depends_on = [google_cloud_run_service.default,google_eventarc_channel.test_channel] +} +`, context) +} + +func testAccEventarcTrigger_createTriggerWithHttpDest(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "test_project" { + project_id = "%{project_name}" +} + +resource "google_eventarc_trigger" "primary" { + name = "tf-test-trigger%{random_suffix}" + location = "%{region}" + matching_criteria { + attribute = "type" + value = "google.cloud.pubsub.topic.v1.messagePublished" + } + destination { + http_endpoint { + uri = "http://10.10.10.8:80/route" + } + network_config { + network_attachment = "%{network_attachment}" + } + + } + service_account = "%{service_account}" + +} +`, context) +} + +func testAccCheckEventarcChannelTriggerDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "rs.google_eventarc_trigger" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + billingProject := "" + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + obj := &eventarc.Trigger{ + Location: dcl.String(rs.Primary.Attributes["location"]), + Name: dcl.String(rs.Primary.Attributes["name"]), + Project: dcl.StringOrNil(rs.Primary.Attributes["project"]), + ServiceAccount: dcl.String(rs.Primary.Attributes["service_account"]), + CreateTime: dcl.StringOrNil(rs.Primary.Attributes["create_time"]), + Etag: dcl.StringOrNil(rs.Primary.Attributes["etag"]), + Uid: dcl.StringOrNil(rs.Primary.Attributes["uid"]), + UpdateTime: dcl.StringOrNil(rs.Primary.Attributes["update_time"]), + Channel: dcl.StringOrNil(rs.Primary.Attributes["channel"]), + EventDataContentType: dcl.StringOrNil(rs.Primary.Attributes["event_data_content_type"]), + } + + client := transport_tpg.NewDCLEventarcClient(config, config.UserAgent, billingProject, 0) + _, err := client.GetTrigger(context.Background(), obj) + if err == nil { + return fmt.Errorf("google_eventarc_trigger still exists %v", obj) + } + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app.go.tmpl new file mode 100644 index 000000000000..81b39b144fb3 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app.go.tmpl @@ -0,0 +1,49 @@ +package firebase +{{- if ne $.TargetVersionName "ga" }} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleFirebaseAndroidApp() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceFirebaseAndroidApp().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "app_id") + + // Allow specifying a project + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleFirebaseAndroidAppRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleFirebaseAndroidAppRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + appId := d.Get("app_id") + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + name := fmt.Sprintf("projects/%s/androidApps/%s", project, appId.(string)) + d.SetId(name) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + err = resourceFirebaseAndroidAppRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", name) + } + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config.go.tmpl new file mode 100644 index 000000000000..261a700a2a10 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config.go.tmpl @@ -0,0 +1,153 @@ +package firebase +{{- if ne $.TargetVersionName "ga" }} + +import ( + "context" + "fmt" + + "google.golang.org/api/firebase/v1beta1" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + "github.com/hashicorp/terraform-provider-google/google/fwresource" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ datasource.DataSource = &GoogleFirebaseAndroidAppConfigDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleFirebaseAndroidAppConfigDataSource{} +) + +func NewGoogleFirebaseAndroidAppConfigDataSource() datasource.DataSource { + return &GoogleFirebaseAndroidAppConfigDataSource{} +} + +// GoogleFirebaseAndroidAppConfigDataSource defines the data source implementation +type GoogleFirebaseAndroidAppConfigDataSource struct { + client *firebase.Service + project types.String +} + +type GoogleFirebaseAndroidAppConfigModel struct { + Id types.String `tfsdk:"id"` + AppId types.String `tfsdk:"app_id"` + ConfigFilename types.String `tfsdk:"config_filename"` + ConfigFileContents types.String `tfsdk:"config_file_contents"` + Project types.String `tfsdk:"project"` +} + +func (d *GoogleFirebaseAndroidAppConfigDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_firebase_android_app_config" +} + +func (d *GoogleFirebaseAndroidAppConfigDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "A Google Cloud Firebase Android application configuration", + + Attributes: map[string]schema.Attribute{ + "app_id": schema.StringAttribute{ + Description: "The id of the Firebase Android App.", + MarkdownDescription: "The id of the Firebase Android App.", + Required: true, + }, + + "project": schema.StringAttribute{ + Description: "The project id of the Firebase Android App.", + MarkdownDescription: "The project id of the Firebase Android App.", + Optional: true, + }, + + "config_filename": schema.StringAttribute{ + Description: "The filename that the configuration artifact for the AndroidApp is typically saved as.", + MarkdownDescription: "The filename that the configuration artifact for the AndroidApp is typically saved as.", + Computed: true, + }, + + "config_file_contents": schema.StringAttribute{ + Description: "The content of the XML configuration file as a base64-encoded string.", + MarkdownDescription: "The content of the XML configuration file as a base64-encoded string.", + Computed: true, + }, + + "id": schema.StringAttribute{ + Description: "Firebase Android App Config identifier", + MarkdownDescription: "Firebase Android App Config identifier", + Computed: true, + }, + }, + } +} + +func (d *GoogleFirebaseAndroidAppConfigDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = p.NewFirebaseClient(p.UserAgent, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + d.project = p.Project +} + +func (d *GoogleFirebaseAndroidAppConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleFirebaseAndroidAppConfigModel + var metaData *fwmodels.ProviderMetaModel + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + d.client.UserAgent = fwtransport.GenerateFrameworkUserAgentString(metaData, d.client.UserAgent) + + client := firebase.NewProjectsAndroidAppsService(d.client) + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + data.Project = fwresource.GetProjectFramework(data.Project, d.project, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + appName := fmt.Sprintf("projects/%s/androidApps/%s/config", data.Project.ValueString(), data.AppId.ValueString()) + data.Id = types.StringValue(appName) + + clientResp, err := client.GetConfig(appName).Do() + if err != nil { + fwtransport.HandleDatasourceNotFoundError(ctx, err, &resp.State, fmt.Sprintf("dataSourceFirebaseAndroidAppConfig %q", data.AppId.ValueString()), &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + } + + tflog.Trace(ctx, "read firebase android app config data source") + + data.ConfigFilename = types.StringValue(clientResp.ConfigFilename) + data.ConfigFileContents = types.StringValue(clientResp.ConfigFileContents) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl new file mode 100644 index 000000000000..722c12cf6514 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_config_test.go.tmpl @@ -0,0 +1,70 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccDataSourceGoogleFirebaseAndroidAppConfig(t *testing.T) { + t.Parallel() + // Framework-based resources and datasources don't work with VCR yet + acctest.SkipIfVcr(t) + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "package_name": "android.app." + acctest.RandString(t, 5), + "display_name": "tf-test Display Name AndroidAppConfig DataSource", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleFirebaseAndroidAppConfig(context), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceFirebaseAndroidAppConfigCheck("data.google_firebase_android_app_config.my_app_config"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleFirebaseAndroidAppConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_android_app" "my_app_config" { + project = "%{project_id}" + package_name = "%{package_name}" + display_name = "%{display_name}" +} + +data "google_firebase_android_app_config" "my_app_config" { + project = "%{project_id}" + app_id = google_firebase_android_app.my_app_config.app_id +} +`, context) +} + +func testAccDataSourceFirebaseAndroidAppConfigCheck(datasourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[datasourceName] + if !ok { + return fmt.Errorf("root module has no resource called %s", datasourceName) + } + + if ds.Primary.Attributes["config_filename"] == "" { + return fmt.Errorf("config filename not found in data source") + } + + if ds.Primary.Attributes["config_file_contents"] == "" { + return fmt.Errorf("config file contents not found in data source") + } + + return nil + } +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_test.go.tmpl new file mode 100644 index 000000000000..d39e384354d1 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_android_app_test.go.tmpl @@ -0,0 +1,62 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceGoogleFirebaseAndroidApp(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "package_name": "android.package.app" + acctest.RandString(t, 4), + "display_name": "tf-test Display Name AndroidApp DataSource", + } + + resourceName := "data.google_firebase_android_app.my_app" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleFirebaseAndroidApp(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + resourceName, + "google_firebase_android_app.my_app", + map[string]struct{}{ + "deletion_policy": {}, + }, + ), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleFirebaseAndroidApp(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_android_app" "my_app" { + project = "%{project_id}" + package_name = "%{package_name}" + display_name = "%{display_name}" + sha1_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21c"] + sha256_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21ca1b2c3d4e5f6123456789abc"] +} + +data "google_firebase_android_app" "my_app" { + app_id = google_firebase_android_app.my_app.app_id +} + +data "google_firebase_android_app" "my_app_project" { + project = "%{project_id}" + app_id = google_firebase_android_app.my_app.app_id +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app.go.tmpl new file mode 100644 index 000000000000..54722f76da17 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app.go.tmpl @@ -0,0 +1,49 @@ +package firebase +{{- if ne $.TargetVersionName "ga" }} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleFirebaseAppleApp() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceFirebaseAppleApp().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "app_id") + + // Allow specifying a project + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleFirebaseAppleAppRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleFirebaseAppleAppRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + appId := d.Get("app_id") + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + name := fmt.Sprintf("projects/%s/iosApps/%s", project, appId.(string)) + d.SetId(name) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + err = resourceFirebaseAppleAppRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", name) + } + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config.go.tmpl new file mode 100644 index 000000000000..f7f5b137fafb --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config.go.tmpl @@ -0,0 +1,153 @@ +package firebase +{{- if ne $.TargetVersionName "ga" }} + +import ( + "context" + "fmt" + + "google.golang.org/api/firebase/v1beta1" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ datasource.DataSource = &GoogleFirebaseAppleAppConfigDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleFirebaseAppleAppConfigDataSource{} +) + +func NewGoogleFirebaseAppleAppConfigDataSource() datasource.DataSource { + return &GoogleFirebaseAppleAppConfigDataSource{} +} + +// GoogleFirebaseAppleAppConfigDataSource defines the data source implementation +type GoogleFirebaseAppleAppConfigDataSource struct { + client *firebase.Service + project types.String +} + +type GoogleFirebaseAppleAppConfigModel struct { + Id types.String `tfsdk:"id"` + AppId types.String `tfsdk:"app_id"` + ConfigFilename types.String `tfsdk:"config_filename"` + ConfigFileContents types.String `tfsdk:"config_file_contents"` + Project types.String `tfsdk:"project"` +} + +func (d *GoogleFirebaseAppleAppConfigDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_firebase_apple_app_config" +} + +func (d *GoogleFirebaseAppleAppConfigDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "A Google Cloud Firebase Apple application configuration", + + Attributes: map[string]schema.Attribute{ + "app_id": schema.StringAttribute{ + Description: "The id of the Firebase iOS App.", + MarkdownDescription: "The id of the Firebase iOS App.", + Required: true, + }, + + "project": schema.StringAttribute{ + Description: "The project id of the Firebase iOS App.", + MarkdownDescription: "The project id of the Firebase iOS App.", + Optional: true, + }, + + "config_filename": schema.StringAttribute{ + Description: "The filename that the configuration artifact for the IosApp is typically saved as.", + MarkdownDescription: "The filename that the configuration artifact for the IosApp is typically saved as.", + Computed: true, + }, + + "config_file_contents": schema.StringAttribute{ + Description: "The content of the XML configuration file as a base64-encoded string.", + MarkdownDescription: "The content of the XML configuration file as a base64-encoded string.", + Computed: true, + }, + + "id": schema.StringAttribute{ + Description: "Firebase Apple App Config identifier", + MarkdownDescription: "Firebase Apple App Config identifier", + Computed: true, + }, + }, + } +} + +func (d *GoogleFirebaseAppleAppConfigDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = p.NewFirebaseClient(p.UserAgent, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + d.project = p.Project +} + +func (d *GoogleFirebaseAppleAppConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleFirebaseAppleAppConfigModel + var metaData *fwmodels.ProviderMetaModel + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + d.client.UserAgent = fwtransport.GenerateFrameworkUserAgentString(metaData, d.client.UserAgent) + + client := firebase.NewProjectsIosAppsService(d.client) + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + data.Project = fwresource.GetProjectFramework(data.Project, d.project, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + appName := fmt.Sprintf("projects/%s/iosApps/%s/config", data.Project.ValueString(), data.AppId.ValueString()) + data.Id = types.StringValue(appName) + + clientResp, err := client.GetConfig(appName).Do() + if err != nil { + fwtransport.HandleDatasourceNotFoundError(ctx, err, &resp.State, fmt.Sprintf("dataSourceFirebaseAppleAppConfig %q", data.AppId.ValueString()), &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + } + + tflog.Trace(ctx, "read firebase apple app config data source") + + data.ConfigFilename = types.StringValue(clientResp.ConfigFilename) + data.ConfigFileContents = types.StringValue(clientResp.ConfigFileContents) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config_test.go.tmpl new file mode 100644 index 000000000000..69b52667f13b --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_config_test.go.tmpl @@ -0,0 +1,85 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccDataSourceGoogleFirebaseAppleAppConfig(t *testing.T) { + // TODO: https://github.com/hashicorp/terraform-provider-google/issues/14158 + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "bundle_id": "apple.app." + acctest.RandString(t, 5), + "display_name": "tf-test Display Name AppleAppConfig DataSource", + "app_store_id": 12345, + "team_id": 1234567890, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.58.0", + Source: "hashicorp/google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}", + }, + }, + Config: testAccDataSourceGoogleFirebaseAppleAppConfig(context), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceFirebaseAppleAppConfigCheck("data.google_firebase_apple_app_config.my_app_config"), + ), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Config: testAccDataSourceGoogleFirebaseAppleAppConfig(context), + Check: resource.ComposeTestCheckFunc( + testAccDataSourceFirebaseAppleAppConfigCheck("data.google_firebase_apple_app_config.my_app_config"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleFirebaseAppleAppConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_apple_app" "my_app_config" { + project = "%{project_id}" + bundle_id = "%{bundle_id}" + display_name = "%{display_name}" + app_store_id = "%{app_store_id}" + team_id = "%{team_id}" +} + +data "google_firebase_apple_app_config" "my_app_config" { + app_id = google_firebase_apple_app.my_app_config.app_id +} +`, context) +} + +func testAccDataSourceFirebaseAppleAppConfigCheck(datasourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[datasourceName] + if !ok { + return fmt.Errorf("root module has no resource called %s", datasourceName) + } + + if ds.Primary.Attributes["config_filename"] == "" { + return fmt.Errorf("config filename not found in data source") + } + + if ds.Primary.Attributes["config_file_contents"] == "" { + return fmt.Errorf("config file contents not found in data source") + } + + return nil + } +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_test.go.tmpl new file mode 100644 index 000000000000..c7eef1095932 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_apple_app_test.go.tmpl @@ -0,0 +1,64 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceGoogleFirebaseAppleApp(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "bundle_id": "apple.app." + acctest.RandString(t, 5), + "display_name": "tf-test Display Name AppleApp DataSource", + "app_store_id": 12345, + "team_id": 1234567890, + } + + resourceName := "data.google_firebase_apple_app.my_app" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleFirebaseAppleApp(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + resourceName, + "google_firebase_apple_app.my_app", + map[string]struct{}{ + "deletion_policy": {}, + }, + ), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleFirebaseAppleApp(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_apple_app" "my_app" { + project = "%{project_id}" + bundle_id = "%{bundle_id}" + display_name = "%{display_name}" + app_store_id = "%{app_store_id}" + team_id = "%{team_id}" +} + +data "google_firebase_apple_app" "my_app" { + app_id = google_firebase_apple_app.my_app.app_id +} + +data "google_firebase_apple_app" "my_app_project" { + project = "%{project_id}" + app_id = google_firebase_apple_app.my_app.app_id +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app.go.tmpl new file mode 100644 index 000000000000..2b6154e14e57 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app.go.tmpl @@ -0,0 +1,49 @@ +package firebase +{{- if ne $.TargetVersionName "ga" }} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleFirebaseWebApp() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceFirebaseWebApp().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "app_id") + + // Allow specifying a project + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleFirebaseWebAppRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleFirebaseWebAppRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + appId := d.Get("app_id") + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + name := fmt.Sprintf("projects/%s/webApps/%s", project, appId.(string)) + d.SetId(name) + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + err = resourceFirebaseWebAppRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", name) + } + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_config.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_config.go.tmpl new file mode 100644 index 000000000000..7479139af4f4 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_config.go.tmpl @@ -0,0 +1,205 @@ +package firebase +{{- if ne $.TargetVersionName "ga" }} + +import ( + "context" + "fmt" + + "google.golang.org/api/firebase/v1beta1" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" +) + +// Ensure the implementation satisfies the expected interfaces +var ( + _ datasource.DataSource = &GoogleFirebaseWebAppConfigDataSource{} + _ datasource.DataSourceWithConfigure = &GoogleFirebaseWebAppConfigDataSource{} +) + +func NewGoogleFirebaseWebAppConfigDataSource() datasource.DataSource { + return &GoogleFirebaseWebAppConfigDataSource{} +} + +// GoogleFirebaseWebAppConfigDataSource defines the data source implementation +type GoogleFirebaseWebAppConfigDataSource struct { + client *firebase.Service + project types.String +} + +type GoogleFirebaseWebAppConfigModel struct { + Id types.String `tfsdk:"id"` + WebAppId types.String `tfsdk:"web_app_id"` + ApiKey types.String `tfsdk:"api_key"` + AuthDomain types.String `tfsdk:"auth_domain"` + DatabaseUrl types.String `tfsdk:"database_url"` + LocationId types.String `tfsdk:"location_id"` + MeasurementId types.String `tfsdk:"measurement_id"` + MessagingSenderId types.String `tfsdk:"messaging_sender_id"` + StorageBucket types.String `tfsdk:"storage_bucket"` + Project types.String `tfsdk:"project"` +} + +func (d *GoogleFirebaseWebAppConfigDataSource) Metadata(ctx context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_firebase_web_app_config" +} + +func (d *GoogleFirebaseWebAppConfigDataSource) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + // This description is used by the documentation generator and the language server. + MarkdownDescription: "A Google Cloud Firebase web application configuration", + + Attributes: map[string]schema.Attribute{ + "web_app_id": schema.StringAttribute{ + Description: "The id of the Firebase web App.", + MarkdownDescription: "The id of the Firebase web App.", + Required: true, + }, + + "project": schema.StringAttribute{ + Description: "The project id of the Firebase web App.", + MarkdownDescription: "The project id of the Firebase web App.", + Optional: true, + }, + + "api_key": schema.StringAttribute{ + Description: "The API key associated with the web App.", + MarkdownDescription: "The API key associated with the web App.", + Computed: true, + }, + + "auth_domain": schema.StringAttribute{ + Description: "The domain Firebase Auth configures for OAuth redirects, in the format `projectId.firebaseapp.com`", + MarkdownDescription: "The domain Firebase Auth configures for OAuth redirects, in the format `projectId.firebaseapp.com`", + Computed: true, + }, + + "database_url": schema.StringAttribute{ + Description: "The default Firebase Realtime Database URL.", + MarkdownDescription: "The default Firebase Realtime Database URL.", + Computed: true, + }, + + "location_id": schema.StringAttribute{ + Description: "The ID of the project's default GCP resource location. The location is one of the available GCP resource locations. " + + "This field is omitted if the default GCP resource location has not been finalized yet. To set your project's " + + "default GCP resource location, call defaultLocation.finalize after you add Firebase services to your project.", + MarkdownDescription: "The ID of the project's default GCP resource location. The location is one of the available GCP resource locations. " + + "This field is omitted if the default GCP resource location has not been finalized yet. To set your project's " + + "default GCP resource location, call defaultLocation.finalize after you add Firebase services to your project.", + Computed: true, + }, + + "measurement_id": schema.StringAttribute{ + Description: "The unique Google-assigned identifier of the Google Analytics web stream associated with the Firebase Web App. " + + "Firebase SDKs use this ID to interact with Google Analytics APIs. " + + "This field is only present if the App is linked to a web stream in a Google Analytics App + Web property. " + + "Learn more about this ID and Google Analytics web streams in the Analytics documentation. " + + "To generate a measurementId and link the Web App with a Google Analytics web stream, call projects.addGoogleAnalytics.", + MarkdownDescription: "The unique Google-assigned identifier of the Google Analytics web stream associated with the Firebase Web App. " + + "Firebase SDKs use this ID to interact with Google Analytics APIs. " + + "This field is only present if the App is linked to a web stream in a Google Analytics App + Web property. " + + "Learn more about this ID and Google Analytics web streams in the Analytics documentation. " + + "To generate a measurementId and link the Web App with a Google Analytics web stream, call projects.addGoogleAnalytics.", + Computed: true, + }, + + "messaging_sender_id": schema.StringAttribute{ + Description: "The sender ID for use with Firebase Cloud Messaging.", + MarkdownDescription: "The sender ID for use with Firebase Cloud Messaging.", + Computed: true, + }, + + "storage_bucket": schema.StringAttribute{ + Description: "The default Cloud Storage for Firebase storage bucket name.", + MarkdownDescription: "The default Cloud Storage for Firebase storage bucket name.", + Computed: true, + }, + + "id": schema.StringAttribute{ + Description: "Firebase Web App Config identifier", + MarkdownDescription: "Firebase Web App Config identifier", + Computed: true, + }, + }, + } +} + +func (d *GoogleFirebaseWebAppConfigDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + // Prevent panic if the provider has not been configured. + if req.ProviderData == nil { + return + } + + p, ok := req.ProviderData.(*fwtransport.FrameworkProviderConfig) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *fwtransport.FrameworkProviderConfig, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return + } + + d.client = p.NewFirebaseClient(p.UserAgent, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + d.project = p.Project +} + +func (d *GoogleFirebaseWebAppConfigDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + var data GoogleFirebaseWebAppConfigModel + var metaData *fwmodels.ProviderMetaModel + + // Read Provider meta into the meta model + resp.Diagnostics.Append(req.ProviderMeta.Get(ctx, &metaData)...) + if resp.Diagnostics.HasError() { + return + } + + d.client.UserAgent = fwtransport.GenerateFrameworkUserAgentString(metaData, d.client.UserAgent) + + client := firebase.NewProjectsWebAppsService(d.client) + + // Read Terraform configuration data into the model + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + + data.Project = fwresource.GetProjectFramework(data.Project, d.project, &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + + appName := fmt.Sprintf("projects/%s/webApps/%s/config", data.Project.ValueString(), data.WebAppId.ValueString()) + data.Id = data.WebAppId + + clientResp, err := client.GetConfig(appName).Do() + if err != nil { + fwtransport.HandleDatasourceNotFoundError(ctx, err, &resp.State, fmt.Sprintf("dataSourceFirebaseWebAppConfig %q", data.WebAppId.ValueString()), &resp.Diagnostics) + if resp.Diagnostics.HasError() { + return + } + } + + tflog.Trace(ctx, "read firebase web app config data source") + + data.ApiKey = types.StringValue(clientResp.ApiKey) + data.AuthDomain = types.StringValue(clientResp.AuthDomain) + data.DatabaseUrl = types.StringValue(clientResp.DatabaseURL) + data.LocationId = types.StringValue(clientResp.LocationId) + data.MeasurementId = types.StringValue(clientResp.MeasurementId) + data.MessagingSenderId = types.StringValue(clientResp.MessagingSenderId) + data.StorageBucket = types.StringValue(clientResp.StorageBucket) + + // Save data into Terraform state + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_test.go.tmpl new file mode 100644 index 000000000000..19be22fac1df --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/data_source_google_firebase_web_app_test.go.tmpl @@ -0,0 +1,58 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceGoogleFirebaseWebApp(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "display_name": "tf_test Display Name WebApp DataSource", + } + + resourceName := "data.google_firebase_web_app.my_app" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleFirebaseWebApp(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + resourceName, + "google_firebase_web_app.my_app", + map[string]struct{}{ + "deletion_policy": {}, + }, + ), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleFirebaseWebApp(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_web_app" "my_app" { + project = "%{project_id}" + display_name = "%{display_name}" +} + +data "google_firebase_web_app" "my_app" { + app_id = google_firebase_web_app.my_app.app_id +} + +data "google_firebase_web_app" "my_app_project" { + project = "%{project_id}" + app_id = google_firebase_web_app.my_app.app_id +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/resource_firebase_android_app_update_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_android_app_update_test.go.tmpl new file mode 100644 index 000000000000..170445fc6f4f --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_android_app_update_test.go.tmpl @@ -0,0 +1,85 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirebaseAndroidApp_update(t *testing.T) { + t.Parallel() + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "package_name": "android.package.app" + acctest.RandString(t, 4), + "random_suffix": acctest.RandString(t, 10), + "display_name": "tf-test Display Name N", + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAndroidApp(context, "", "key1"), + }, + { + Config: testAccFirebaseAndroidApp(context, "2", "key2"), + }, + }, + }) +} + +func testAccFirebaseAndroidApp(context map[string]interface{}, update string, apiKeyLabel string) string { + context["display_name"] = context["display_name"].(string) + update + context["api_key_label"] = apiKeyLabel + return acctest.Nprintf(` +resource "google_firebase_android_app" "update" { + provider = google-beta + project = "%{project_id}" + + package_name = "%{package_name}" + display_name = "%{display_name} %{random_suffix}" + sha1_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21c"] + sha256_hashes = ["2145bdf698b8715039bd0e83f2069bed435ac21ca1b2c3d4e5f6123456789abc"] + api_key_id = google_apikeys_key.%{api_key_label}.uid +} + +resource "google_apikeys_key" "key1" { + provider = google-beta + project = "%{project_id}" + + name = "tf-test-api-key1%{random_suffix}" + display_name = "Test api key 1" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "%{package_name}" + sha1_fingerprint = "2145bdf698b8715039bd0e83f2069bed435ac21c" + } + } + } +} + +resource "google_apikeys_key" "key2" { + provider = google-beta + project = "%{project_id}" + + name = "tf-test-api-key2%{random_suffix}" + display_name = "Test api key 2" + + restrictions { + android_key_restrictions { + allowed_applications { + package_name = "%{package_name}" + sha1_fingerprint = "2145bdf698b8715039bd0e83f2069bed435ac21c" + } + } + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/resource_firebase_apple_app_update_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_apple_app_update_test.go.tmpl new file mode 100644 index 000000000000..0dc1f4d4afdc --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_apple_app_update_test.go.tmpl @@ -0,0 +1,81 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirebaseAppleApp_update(t *testing.T) { + t.Parallel() + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "bundle_id": "apple.app.12345", + "random_suffix": acctest.RandString(t, 10), + "display_name": "tf-test Display Name N", + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppleApp(context, 12345, "1", "key1"), + }, + { + Config: testAccFirebaseAppleApp(context, 67890, "2", "key2"), + }, + }, + }) +} + +func testAccFirebaseAppleApp(context map[string]interface{}, appStoreId int, delta string, apiKeyLabel string) string { + context["display_name"] = context["display_name"].(string) + delta + context["app_store_id"] = appStoreId + context["team_id"] = "123456789" + delta + context["api_key_label"] = apiKeyLabel + return acctest.Nprintf(` +resource "google_firebase_apple_app" "update" { + provider = google-beta + project = "%{project_id}" + + bundle_id = "%{bundle_id}" + display_name = "%{display_name} %{random_suffix}" + app_store_id = "%{app_store_id}" + team_id = "%{team_id}" + api_key_id = google_apikeys_key.%{api_key_label}.uid +} + +resource "google_apikeys_key" "key1" { + provider = google-beta + project = "%{project_id}" + + name = "tf-test-api-key1%{random_suffix}" + display_name = "Test api key 1" + + restrictions { + ios_key_restrictions { + allowed_bundle_ids = ["%{bundle_id}"] + } + } +} + +resource "google_apikeys_key" "key2" { + provider = google-beta + project = "%{project_id}" + + name = "tf-test-api-key2%{random_suffix}" + display_name = "Test api key 2" + + restrictions { + ios_key_restrictions { + allowed_bundle_ids = ["%{bundle_id}"] + } + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/resource_firebase_project_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_project_test.go.tmpl new file mode 100644 index 000000000000..e73d9ff08d5b --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_project_test.go.tmpl @@ -0,0 +1,58 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirebaseProject_destroyAndReapply(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseProject_firebaseProjectBasicExample(context), + }, + { + Config: testAccFirebaseProject_firebaseProjectBasicExampleDestroyed(context), + }, + { + Config: testAccFirebaseProject_firebaseProjectBasicExample(context), + }, + { + ResourceName: "google_firebase_project.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccFirebaseProject_firebaseProjectBasicExampleDestroyed(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "default" { + provider = google-beta + + project_id = "tf-test-my-project%{random_suffix}" + name = "tf-test-my-project%{random_suffix}" + org_id = "%{org_id}" + + labels = { + "firebase" = "enabled" + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/firebase/go/resource_firebase_web_app_test.go.tmpl b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_web_app_test.go.tmpl new file mode 100644 index 000000000000..ad9c9fd4bdc9 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebase/go/resource_firebase_web_app_test.go.tmpl @@ -0,0 +1,186 @@ +package firebase_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccFirebaseWebApp_firebaseWebAppFull(t *testing.T) { + // TODO: https://github.com/hashicorp/terraform-provider-google/issues/14158 + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "display_name": "tf-test Display Name N", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseWebApp_firebaseWebAppFull(context, "", "key1"), + }, + { + Config: testAccFirebaseWebApp_firebaseWebAppFull(context, "2", "key2"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.google_firebase_web_app_config.default", "api_key"), + resource.TestCheckResourceAttrSet("data.google_firebase_web_app_config.default", "auth_domain"), + resource.TestCheckResourceAttrSet("data.google_firebase_web_app_config.default", "storage_bucket"), + ), + }, + { + Config: testAccFirebaseWebApp_firebaseWebAppFull(context, "", "key1"), + }, + { + Config: testAccFirebaseWebApp_firebaseWebAppFull(context, "2", "key2"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.google_firebase_web_app_config.default", "api_key"), + resource.TestCheckResourceAttrSet("data.google_firebase_web_app_config.default", "auth_domain"), + resource.TestCheckResourceAttrSet("data.google_firebase_web_app_config.default", "storage_bucket"), + ), + }, + }, + }) +} + +func testAccFirebaseWebApp_firebaseWebAppFull(context map[string]interface{}, update string, apiKeyLabel string) string { + context["display_name"] = context["display_name"].(string) + update + context["api_key_label"] = apiKeyLabel + return acctest.Nprintf(` +resource "google_apikeys_key" "key1" { + provider = google-beta + name = "tf-test-api-key1%{random_suffix}" + display_name = "Test api key 1" + project = "%{project_id}" +} + +resource "google_apikeys_key" "key2" { + provider = google-beta + name = "tf-test-api-key2%{random_suffix}" + display_name = "Test api key 2" + project = "%{project_id}" +} + +resource "google_firebase_web_app" "default" { + provider = google-beta + project = "%{project_id}" + display_name = "%{display_name} %{random_suffix}" + api_key_id = google_apikeys_key.%{api_key_label}.uid + deletion_policy = "DELETE" +} + +data "google_firebase_web_app_config" "default" { + provider = google-beta + web_app_id = google_firebase_web_app.default.app_id +} +`, context) +} + +func TestAccFirebaseWebApp_firebaseWebAppSkipDelete(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + "display_name": "tf-test Display Name N", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseWebAppNotDestroyedProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseWebApp_firebaseWebAppSkipDelete(context, ""), + }, + { + ResourceName: "google_firebase_web_app.skip_delete", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_policy", "project"}, + }, + }, + }) +} + +func testAccFirebaseWebApp_firebaseWebAppSkipDelete(context map[string]interface{}, update string) string { + // Create a new project so we can clean up the project entirely + return acctest.Nprintf(` +resource "google_project" "default" { + provider = google-beta + + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "%{org_id}" + labels = { + "firebase" = "enabled" + } +} + +resource "google_firebase_project" "default" { + provider = google-beta + project = google_project.default.project_id +} + +resource "google_firebase_web_app" "skip_delete" { + provider = google-beta + project = google_firebase_project.default.project + display_name = "%{display_name} %{random_suffix}" + deletion_policy = "ABANDON" +} +`, context) +} + +func testAccCheckFirebaseWebAppNotDestroyedProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_firebase_web_app" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}FirebaseBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err != nil { + return fmt.Errorf("FirebaseWebApp doesn't exists at %s", url) + } + } + + return nil + } +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_app_attest_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_app_attest_config_test.go.tmpl new file mode 100644 index 000000000000..75df876e7293 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_app_attest_config_test.go.tmpl @@ -0,0 +1,61 @@ +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "team_id": "9987654321", + "random_suffix": acctest.RandString(t, 10), + "token_ttl": "7200s", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_app_attest_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigFullExample(context), + }, + { + ResourceName: "google_firebase_app_check_app_attest_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckAppAttestConfig_firebaseAppCheckAppAttestConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_app_attest_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + }, + }) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_debug_token_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_debug_token_test.go.tmpl new file mode 100644 index 000000000000..bd8a2fd50cf2 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_debug_token_test.go.tmpl @@ -0,0 +1,87 @@ +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckDebugToken_firebaseAppCheckDebugTokenUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "display_name": "Debug Token 1", + "token": "5E728315-E121-467F-BCA1-1FE71130BB98", + } + + contextUpdated := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "display_name": "Debug Token 2", + "token": "5E728315-E121-467F-BCA1-1FE71130BB98", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + CheckDestroy: testAccCheckFirebaseAppCheckDebugTokenDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckDebugToken_firebaseAppCheckDebugTokenTemplate(context), + }, + { + ResourceName: "google_firebase_app_check_debug_token.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"token", "app_id"}, + }, + { + Config: testAccFirebaseAppCheckDebugToken_firebaseAppCheckDebugTokenTemplate(contextUpdated), + }, + { + ResourceName: "google_firebase_app_check_debug_token.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"token", "app_id"}, + }, + }, + }) +} + +func testAccFirebaseAppCheckDebugToken_firebaseAppCheckDebugTokenTemplate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_web_app" "default" { + provider = google-beta + + project = "%{project_id}" + display_name = "Web App for debug token" +} + +# It takes a while for App Check to recognize the new app +# If your app already exists, you don't have to wait 30 seconds. +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_web_app.default] + create_duration = "30s" +} + +resource "google_firebase_app_check_debug_token" "default" { + provider = google-beta + + project = "%{project_id}" + app_id = google_firebase_web_app.default.app_id + display_name = "%{display_name}" + token = "%{token}" + + depends_on = [time_sleep.wait_30s] +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_device_check_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_device_check_config_test.go.tmpl new file mode 100644 index 000000000000..1282e6dc5623 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_device_check_config_test.go.tmpl @@ -0,0 +1,62 @@ +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckDeviceCheckConfig_firebaseAppCheckDeviceCheckConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "team_id": "9987654321", + "private_key_path": "test-fixtures/private-key.p8", + "token_ttl": "3900s", + "random_suffix": acctest.RandString(t, 10), + } + + contextUpdated := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "team_id": "9987654321", + "private_key_path": "test-fixtures/private-key-2.p8", + "token_ttl": "7200s", + // Bundle ID needs to be the same between updates but different between tests + "random_suffix": context["random_suffix"], + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckDeviceCheckConfig_firebaseAppCheckDeviceCheckConfigFullExample(context), + }, + { + ResourceName: "google_firebase_app_check_device_check_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"private_key", "app_id"}, + }, + { + Config: testAccFirebaseAppCheckDeviceCheckConfig_firebaseAppCheckDeviceCheckConfigFullExample(contextUpdated), + }, + { + ResourceName: "google_firebase_app_check_device_check_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"private_key", "app_id"}, + }, + }, + }) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_play_integrity_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_play_integrity_config_test.go.tmpl new file mode 100644 index 000000000000..698c79f946a7 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_play_integrity_config_test.go.tmpl @@ -0,0 +1,60 @@ +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "token_ttl": "7200s", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_play_integrity_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigFullExample(context), + }, + { + ResourceName: "google_firebase_app_check_play_integrity_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckPlayIntegrityConfig_firebaseAppCheckPlayIntegrityConfigMinimalExample(context), + }, + { + ResourceName: "google_firebase_app_check_play_integrity_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + }, + }) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_enterprise_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_enterprise_config_test.go.tmpl new file mode 100644 index 000000000000..4b663c534a2a --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_enterprise_config_test.go.tmpl @@ -0,0 +1,59 @@ +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckRecaptchaEnterpriseConfig_firebaseAppCheckRecaptchaEnterpriseConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "token_ttl": "7200s", + "site_key": "6LdpMXIpAAAAANkwWQPgEdjEhal7ugkH9RK9ytuw", + "random_suffix": acctest.RandString(t, 10), + } + + contextUpdated := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "token_ttl": "3800s", + "site_key": "7LdpMXIpAAAAANkwWQPgEdjEhal7ugkH9RK9ytuw", + "random_suffix": context["random_suffix"], + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckRecaptchaEnterpriseConfig_firebaseAppCheckRecaptchaEnterpriseConfigBasicExample(context), + }, + { + ResourceName: "google_firebase_app_check_recaptcha_enterprise_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + { + Config: testAccFirebaseAppCheckRecaptchaEnterpriseConfig_firebaseAppCheckRecaptchaEnterpriseConfigBasicExample(contextUpdated), + }, + { + ResourceName: "google_firebase_app_check_recaptcha_enterprise_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"app_id"}, + }, + }, + }) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_v3_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_v3_config_test.go.tmpl new file mode 100644 index 000000000000..045d890e5104 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_recaptcha_v3_config_test.go.tmpl @@ -0,0 +1,59 @@ +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckRecaptchaV3Config_firebaseAppCheckRecaptchaV3ConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "token_ttl": "7200s", + "site_secret": "6Lf9YnQpAAAAAC3-MHmdAllTbPwTZxpUw5d34YzX", + "random_suffix": acctest.RandString(t, 10), + } + + contextUpdated := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "token_ttl": "3800s", + "site_secret": "7Lf9YnQpAAAAAC3-MHmdAllTbPwTZxpUw5d34YzX", + "random_suffix": context["random_suffix"], + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "random": {}, + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckRecaptchaV3Config_firebaseAppCheckRecaptchaV3ConfigBasicExample(context), + }, + { + ResourceName: "google_firebase_app_check_recaptcha_v3_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_secret", "app_id"}, + }, + { + Config: testAccFirebaseAppCheckRecaptchaV3Config_firebaseAppCheckRecaptchaV3ConfigBasicExample(contextUpdated), + }, + { + ResourceName: "google_firebase_app_check_recaptcha_v3_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_secret", "app_id"}, + }, + }, + }) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_service_config_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_service_config_test.go.tmpl new file mode 100644 index 000000000000..abb7f2a65109 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseappcheck/go/resource_firebase_app_check_service_config_test.go.tmpl @@ -0,0 +1,137 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package firebaseappcheck_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseAppCheckServiceConfig_firebaseAppCheckServiceConfigUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseAppCheckServiceConfigDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirebaseAppCheckServiceConfig_firebaseAppCheckServiceConfigUpdate(context, "UNENFORCED"), + }, + { + ResourceName: "google_firebase_app_check_service_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id"}, + }, + { + Config: testAccFirebaseAppCheckServiceConfig_firebaseAppCheckServiceConfigUpdate(context, "ENFORCED"), + }, + { + ResourceName: "google_firebase_app_check_service_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id"}, + }, + { + Config: testAccFirebaseAppCheckServiceConfig_firebaseAppCheckServiceConfigUpdate(context, ""), + }, + { + ResourceName: "google_firebase_app_check_service_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_id"}, + }, + }, + }) +} + +func testAccFirebaseAppCheckServiceConfig_firebaseAppCheckServiceConfigUpdate(context map[string]interface{}, enforcementMode string) string { + context["enforcement_mode"] = enforcementMode + return acctest.Nprintf(` +resource "google_project" "default" { + provider = google-beta + project_id = "tf-test-appcheck%{random_suffix}" + name = "tf-test-appcheck%{random_suffix}" + org_id = "%{org_id}" + labels = { + "firebase" = "enabled" + } +} + +resource "google_project_service" "firebase" { + provider = google-beta + project = google_project.default.project_id + service = "firebase.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "database" { + provider = google-beta + project = google_project.default.project_id + service = "firebasedatabase.googleapis.com" + disable_on_destroy = false + depends_on = [ + google_project_service.firebase, + ] +} + +resource "google_project_service" "appcheck" { + provider = google-beta + project = google_project.default.project_id + service = "firebaseappcheck.googleapis.com" + disable_on_destroy = false + depends_on = [ + google_project_service.database, + ] +} + +resource "google_firebase_project" "default" { + provider = google-beta + project = google_project.default.project_id + + depends_on = [ + google_project_service.appcheck, + ] +} + +# It takes a while for the new project to be ready for a database +resource "time_sleep" "wait_30s" { + depends_on = [google_firebase_project.default] + create_duration = "30s" +} + +resource "google_firebase_database_instance" "default" { + provider = google-beta + project = google_firebase_project.default.project + region = "us-central1" + instance_id = "tf-test-appcheck%{random_suffix}-default-rtdb" + type = "DEFAULT_DATABASE" + + depends_on = [time_sleep.wait_30s] +} + +resource "google_firebase_app_check_service_config" "default" { + provider = google-beta + project = google_firebase_project.default.project + service_id = "firebasedatabase.googleapis.com" + enforcement_mode = "%{enforcement_mode}" + + depends_on = [google_firebase_database_instance.default] +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebasedatabase/go/resource_firebase_database_instance_test.go.tmpl b/mmv1/third_party/terraform/services/firebasedatabase/go/resource_firebase_database_instance_test.go.tmpl new file mode 100644 index 000000000000..524aafe661ca --- /dev/null +++ b/mmv1/third_party/terraform/services/firebasedatabase/go/resource_firebase_database_instance_test.go.tmpl @@ -0,0 +1,74 @@ +package firebasedatabase_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func testAccFirebaseDatabaseInstance_firebaseDatabaseInstanceInState(context map[string]interface{}, state string) string { + context["desired_state"] = state + return acctest.Nprintf(` +resource "google_firebase_database_instance" "updated" { + provider = google-beta + project = "%{project_id}" + region = "%{region}" + instance_id = "tf-test-state-change-db%{random_suffix}" + desired_state = "%{desired_state}" +} +`, context) +} + +func TestAccFirebaseDatabaseInstance_firebaseDatabaseInstanceStateChange(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "region": envvar.GetTestRegionFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseDatabaseInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseDatabaseInstance_firebaseDatabaseInstanceInState(context, "ACTIVE"), + }, + { + ResourceName: "google_firebase_database_instance.updated", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region", "instance_id", "desired_state"}, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_firebase_database_instance.updated", "database_url"), + ), + }, + { + Config: testAccFirebaseDatabaseInstance_firebaseDatabaseInstanceInState(context, "DISABLED"), + }, + { + ResourceName: "google_firebase_database_instance.updated", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region", "instance_id", "desired_state"}, + }, + { + Config: testAccFirebaseDatabaseInstance_firebaseDatabaseInstanceInState(context, "ACTIVE"), + }, + { + ResourceName: "google_firebase_database_instance.updated", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region", "instance_id", "desired_state"}, + }, + }, + }) +} + +{{ end }} + diff --git a/mmv1/third_party/terraform/services/firebaseextensions/go/resource_firebase_extensions_instance_test.go.tmpl b/mmv1/third_party/terraform/services/firebaseextensions/go/resource_firebase_extensions_instance_test.go.tmpl new file mode 100644 index 000000000000..836db0b276d7 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebaseextensions/go/resource_firebase_extensions_instance_test.go.tmpl @@ -0,0 +1,149 @@ +package firebaseextensions_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccFirebaseExtensionsInstance_firebaseExtentionsInstanceResizeImageUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "location": "us-central1", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseExtensionsInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseExtensionsInstance_firebaseExtentionsInstanceResizeImageBefore(context), + }, + { + ResourceName: "google_firebase_extensions_instance.resize_image", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance_id"}, + }, + { + Config: testAccFirebaseExtensionsInstance_firebaseExtentionsInstanceResizeImageAfter(context), + }, + { + ResourceName: "google_firebase_extensions_instance.resize_image", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"instance_id"}, + }, + }, + }) +} + +func testAccFirebaseExtensionsInstance_firebaseExtentionsInstanceResizeImageBefore(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "images" { + provider = google-beta + project = "%{project_id}" + name = "tf-test-bucket-id%{random_suffix}" + location = "US" + uniform_bucket_level_access = true + + # Delete all objects when the bucket is deleted + force_destroy = true +} + +resource "google_firebase_extensions_instance" "resize_image" { + provider = google-beta + project = "%{project_id}" + instance_id = "tf-test-storage-resize-images%{random_suffix}" + config { + extension_ref = "firebase/storage-resize-images" + extension_version = "0.2.2" + + # The following params apply to the firebase/storage-resize-images extension. + # Different extensions may have different params + params = { + DELETE_ORIGINAL_FILE = false + MAKE_PUBLIC = false + IMAGE_TYPE = "jpeg" + IS_ANIMATED = true + FUNCTION_MEMORY = 1024 + DO_BACKFILL = false + IMG_SIZES = "200x200" + IMG_BUCKET = google_storage_bucket.images.name + } + + system_params = { + "firebaseextensions.v1beta.function/location" = "%{location}" + "firebaseextensions.v1beta.function/maxInstances" = 3000 + "firebaseextensions.v1beta.function/minInstances" = 0 + "firebaseextensions.v1beta.function/vpcConnectorEgressSettings" = "VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED" + } + + allowed_event_types = [ + "firebase.extensions.storage-resize-images.v1.onCompletion" + ] + + eventarc_channel = "projects/%{project_id}/locations/%{location}/channels/firebase" + } +} +`, context) +} + +func testAccFirebaseExtensionsInstance_firebaseExtentionsInstanceResizeImageAfter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "images" { + provider = google-beta + project = "%{project_id}" + name = "tf-test-bucket-id%{random_suffix}" + location = "US" + uniform_bucket_level_access = true + + # Delete all objects when the bucket is deleted + force_destroy = true +} + +resource "google_firebase_extensions_instance" "resize_image" { + provider = google-beta + project = "%{project_id}" + instance_id = "tf-test-storage-resize-images%{random_suffix}" + config { + extension_ref = "firebase/storage-resize-images" + extension_version = "0.2.2" + + # The following params apply to the firebase/storage-resize-images extension. + # Different extensions may have different params + params = { + # Changed params + DELETE_ORIGINAL_FILE = true + MAKE_PUBLIC = true + IMAGE_TYPE = "jpeg" + IS_ANIMATED = true + FUNCTION_MEMORY = 512 + DO_BACKFILL = true + IMG_SIZES = "400x400" + IMG_BUCKET = google_storage_bucket.images.name + } + + system_params = { + "firebaseextensions.v1beta.function/location" = "%{location}" + # Changed params + "firebaseextensions.v1beta.function/maxInstances" = 100 + "firebaseextensions.v1beta.function/minInstances" = 0 + "firebaseextensions.v1beta.function/vpcConnectorEgressSettings" = "VPC_CONNECTOR_EGRESS_SETTINGS_UNSPECIFIED" + } + + # Disable events + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel.go.tmpl b/mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel.go.tmpl new file mode 100644 index 000000000000..38f8479410db --- /dev/null +++ b/mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel.go.tmpl @@ -0,0 +1,47 @@ +package firebasehosting +{{- if ne $.TargetVersionName "ga" }} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleFirebaseHostingChannel() *schema.Resource { + // Generate datasource schema from resource + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceFirebaseHostingChannel().Schema) + + // Set 'Required' schema elements + tpgresource.AddRequiredFieldsToSchema(dsSchema, "site_id", "channel_id") + + return &schema.Resource{ + Read: dataSourceGoogleFirebaseHostingChannelRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleFirebaseHostingChannelRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "sites/{{"{{"}}site_id{{"}}"}}/channels/{{"{{"}}channel_id{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + err = resourceFirebaseHostingChannelRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel_test.go.tmpl b/mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel_test.go.tmpl new file mode 100644 index 000000000000..0b7fa3cc6ebf --- /dev/null +++ b/mmv1/third_party/terraform/services/firebasehosting/go/data_source_google_firebase_hosting_channel_test.go.tmpl @@ -0,0 +1,60 @@ +package firebasehosting_test +{{- if ne $.TargetVersionName "ga" }} +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceGoogleFirebaseHostingChannel(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleFirebaseHostingChannel(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState( + "data.google_firebase_hosting_channel.channel", + "google_firebase_hosting_channel.channel", + ), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleFirebaseHostingChannel(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "default" { + project = "%{project_id}" + site_id = "tf-test-site-with-channel%{random_suffix}" +} + +resource "google_firebase_hosting_channel" "channel" { + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel%{random_suffix}" + + labels = { + foo = "bar" + } +} + +data "google_firebase_hosting_channel" "channel" { + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel%{random_suffix}" + + depends_on = [google_firebase_hosting_channel.channel] +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_channel_test.go.tmpl b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_channel_test.go.tmpl new file mode 100644 index 000000000000..5be59c4fa773 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_channel_test.go.tmpl @@ -0,0 +1,196 @@ +package firebasehosting_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirebaseHostingChannel_firebasehostingChannelUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseHostingChannelDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelBasic(context), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelTtl(context, "8600s"), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelTtl(context, "86400s"), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelRetainedReleaseCount(context, 30), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelRetainedReleaseCount(context, 20), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelLabels(context), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id", "labels", "terraform_labels"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelMultipleFields(context), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id", "labels", "terraform_labels"}, + }, + { + Config: testAccFirebaseHostingChannel_firebasehostingChannelBasic(context), + }, + { + ResourceName: "google_firebase_hosting_channel.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "site_id", "channel_id"}, + }, + }, + }) +} + +func testAccFirebaseHostingChannel_firebasehostingChannelBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "default" { + provider = google-beta + project = "%{project_id}" + site_id = "tf-test-site-with-channel%{random_suffix}" +} + +resource "google_firebase_hosting_channel" "update" { + provider = google-beta + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel-update%{random_suffix}" +} +`, context) +} + +func testAccFirebaseHostingChannel_firebasehostingChannelTtl(context map[string]interface{}, ttl string) string { + context["ttl"] = ttl + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "default" { + provider = google-beta + project = "%{project_id}" + site_id = "tf-test-site-with-channel%{random_suffix}" +} + +resource "google_firebase_hosting_channel" "update" { + provider = google-beta + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel-update%{random_suffix}" + ttl = "%{ttl}" +} +`, context) +} + +func testAccFirebaseHostingChannel_firebasehostingChannelRetainedReleaseCount(context map[string]interface{}, retainedReleaseCount int) string { + context["retained_release_count"] = retainedReleaseCount + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "default" { + provider = google-beta + project = "%{project_id}" + site_id = "tf-test-site-with-channel%{random_suffix}" +} + +resource "google_firebase_hosting_channel" "update" { + provider = google-beta + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel-update%{random_suffix}" + ttl = "86400s" + retained_release_count = %{retained_release_count} +} +`, context) +} + +func testAccFirebaseHostingChannel_firebasehostingChannelLabels(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "default" { + provider = google-beta + project = "%{project_id}" + site_id = "tf-test-site-with-channel%{random_suffix}" +} + +resource "google_firebase_hosting_channel" "update" { + provider = google-beta + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel-update%{random_suffix}" + ttl = "86400s" + retained_release_count = 10 + labels = { + "some-key": "some-value" + } +} +`, context) +} + +func testAccFirebaseHostingChannel_firebasehostingChannelMultipleFields(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "default" { + provider = google-beta + project = "%{project_id}" + site_id = "tf-test-site-with-channel%{random_suffix}" +} + +resource "google_firebase_hosting_channel" "update" { + provider = google-beta + site_id = google_firebase_hosting_site.default.site_id + channel_id = "tf-test-channel-update%{random_suffix}" + ttl = "86400s" + retained_release_count = 40 + labels = { + "some-key-2": "some-value-2" + } +} +`, context) +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_custom_domain_test.go.tmpl b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_custom_domain_test.go.tmpl new file mode 100644 index 000000000000..e16581346c11 --- /dev/null +++ b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_custom_domain_test.go.tmpl @@ -0,0 +1,81 @@ +package firebasehosting_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirebaseHostingCustomDomain_firebasehostingCustomdomainUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "site_id": envvar.GetTestProjectFromEnv(), + "custom_domain": "update.source.domain.com", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseHostingCustomDomainDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseHostingCustomDomain_firebasehostingCustomdomainBeforeUpdate(context), + }, + { + ResourceName: "google_firebase_hosting_custom_domain.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_id", "custom_domain", "wait_dns_verification"}, + }, + { + Config: testAccFirebaseHostingCustomDomain_firebasehostingCustomdomainAfterUpdate(context), + }, + { + ResourceName: "google_firebase_hosting_custom_domain.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_id", "custom_domain", "wait_dns_verification"}, + }, + }, + }) +} + +func testAccFirebaseHostingCustomDomain_firebasehostingCustomdomainBeforeUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_custom_domain" "default" { + provider = google-beta + + project = "%{project_id}" + site_id = "%{site_id}" + custom_domain = "%{custom_domain}" + cert_preference = "GROUPED" + redirect_target = "destination.domain.com" + + wait_dns_verification = false +} +`, context) +} + +func testAccFirebaseHostingCustomDomain_firebasehostingCustomdomainAfterUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_custom_domain" "default" { + provider = google-beta + + project = "%{project_id}" + site_id = "%{site_id}" + custom_domain = "%{custom_domain}" + cert_preference = "PROJECT_GROUPED" + redirect_target = "destination2.domain.com" + + wait_dns_verification = false +} +`, context) +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl new file mode 100644 index 000000000000..e5a7d337eafb --- /dev/null +++ b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl @@ -0,0 +1,82 @@ +package firebasehosting_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirebaseHostingSite_firebasehostingSiteUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "site_id": "tf-test-site-update-app", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseHostingSiteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseHostingSite_firebasehostingSiteBeforeUpdate(context), + }, + { + ResourceName: "google_firebase_hosting_site.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_id"}, + }, + { + Config: testAccFirebaseHostingSite_firebasehostingSiteAfterUpdate(context), + }, + { + ResourceName: "google_firebase_hosting_site.update", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_id"}, + }, + }, + }) +} + +func testAccFirebaseHostingSite_firebasehostingSiteBeforeUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_web_app" "before" { + provider = google-beta + project = "%{project_id}" + display_name = "tf-test Test web app before for Firebase Hosting" +} + +resource "google_firebase_hosting_site" "update" { + provider = google-beta + project = "%{project_id}" + site_id = "%{site_id}%{random_suffix}" + app_id = google_firebase_web_app.before.app_id +} +`, context) +} + +func testAccFirebaseHostingSite_firebasehostingSiteAfterUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_web_app" "after" { + provider = google-beta + project = "%{project_id}" + display_name = "tf-test Test web app after for Firebase Hosting" +} + +resource "google_firebase_hosting_site" "update" { + provider = google-beta + project = "%{project_id}" + site_id = "%{site_id}%{random_suffix}" + app_id = google_firebase_web_app.after.app_id +} +`, context) +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/firestore/go/resource_firestore_database_update_test.go b/mmv1/third_party/terraform/services/firestore/go/resource_firestore_database_update_test.go new file mode 100644 index 000000000000..3477c4a19a73 --- /dev/null +++ b/mmv1/third_party/terraform/services/firestore/go/resource_firestore_database_update_test.go @@ -0,0 +1,151 @@ +package firestore_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccFirestoreDatabase_updateConcurrencyMode(t *testing.T) { + t.Parallel() + + projectId := envvar.GetTestProjectFromEnv() + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirestoreDatabase_concurrencyMode(projectId, randomSuffix, "OPTIMISTIC"), + }, + { + ResourceName: "google_firestore_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "project"}, + }, + { + Config: testAccFirestoreDatabase_concurrencyMode(projectId, randomSuffix, "PESSIMISTIC"), + }, + { + ResourceName: "google_firestore_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "project"}, + }, + }, + }) +} + +func TestAccFirestoreDatabase_updatePitrEnablement(t *testing.T) { + t.Parallel() + + projectId := envvar.GetTestProjectFromEnv() + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirestoreDatabase_pitrEnablement(projectId, randomSuffix, "POINT_IN_TIME_RECOVERY_ENABLED"), + }, + { + ResourceName: "google_firestore_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "project"}, + }, + { + Config: testAccFirestoreDatabase_pitrEnablement(projectId, randomSuffix, "POINT_IN_TIME_RECOVERY_DISABLED"), + }, + { + ResourceName: "google_firestore_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "project"}, + }, + }, + }) +} + +func TestAccFirestoreDatabase_updateDeleteProtectionState(t *testing.T) { + t.Parallel() + + projectId := envvar.GetTestProjectFromEnv() + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccFirestoreDatabase_deleteProtectionState(projectId, randomSuffix, "DELETE_PROTECTION_ENABLED"), + }, + { + ResourceName: "google_firestore_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "project"}, + }, + { + Config: testAccFirestoreDatabase_deleteProtectionState(projectId, randomSuffix, "DELETE_PROTECTION_DISABLED"), + }, + { + ResourceName: "google_firestore_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "project"}, + }, + }, + }) +} + +func testAccFirestoreDatabase_concurrencyMode(projectId string, randomSuffix string, concurrencyMode string) string { + return fmt.Sprintf(` +resource "google_firestore_database" "database" { + project = "%s" + name = "tf-test-%s" + type = "DATASTORE_MODE" + location_id = "nam5" + concurrency_mode = "%s" +} +`, projectId, randomSuffix, concurrencyMode) +} + +func testAccFirestoreDatabase_pitrEnablement(projectId string, randomSuffix string, pointInTimeRecoveryEnablement string) string { + return fmt.Sprintf(` +resource "google_firestore_database" "database" { + project = "%s" + name = "tf-test-%s" + type = "DATASTORE_MODE" + location_id = "nam5" + point_in_time_recovery_enablement = "%s" +} +`, projectId, randomSuffix, pointInTimeRecoveryEnablement) +} + +func testAccFirestoreDatabase_deleteProtectionState(projectId string, randomSuffix string, deleteProtectionState string) string { + return fmt.Sprintf(` +resource "google_firestore_database" "database" { + project = "%s" + name = "tf-test-%s" + type = "DATASTORE_MODE" + location_id = "nam5" + delete_protection_state = "%s" +} +`, projectId, randomSuffix, deleteProtectionState) +} diff --git a/mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_backup_plan_test.go b/mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_backup_plan_test.go new file mode 100644 index 000000000000..4c2ef81cdaca --- /dev/null +++ b/mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_backup_plan_test.go @@ -0,0 +1,366 @@ +package gkebackup_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccGKEBackupBackupPlan_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "network_name": acctest.BootstrapSharedTestNetwork(t, "gke-cluster"), + "subnetwork_name": acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster")), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEBackupBackupPlanDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEBackupBackupPlan_basic(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccGKEBackupBackupPlan_permissive(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccGKEBackupBackupPlan_full(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccGKEBackupBackupPlan_rpo_daily_window(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccGKEBackupBackupPlan_rpo_weekly_window(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccGKEBackupBackupPlan_full(context), + }, + { + ResourceName: "google_gke_backup_backup_plan.backupplan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccGKEBackupBackupPlan_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-testcluster%{random_suffix}" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = false + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "backupplan" { + name = "tf-test-testplan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = false + include_secrets = false + all_namespaces = true + } + labels = { + "some-key-1": "some-value-1" + } +} +`, context) +} + +func testAccGKEBackupBackupPlan_permissive(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-testcluster%{random_suffix}" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = false + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "backupplan" { + name = "tf-test-testplan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = false + include_secrets = false + all_namespaces = true + permissive_mode = true + } + labels = { + "some-key-1": "some-value-1" + } +} +`, context) +} + +func testAccGKEBackupBackupPlan_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-testcluster%{random_suffix}" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = false + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "backupplan" { + name = "tf-test-testplan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + retention_policy { + backup_delete_lock_days = 30 + backup_retain_days = 180 + } + backup_schedule { + cron_schedule = "0 9 * * 1" + } + backup_config { + include_volume_data = true + include_secrets = true + selected_applications { + namespaced_names { + name = "app1" + namespace = "ns1" + } + namespaced_names { + name = "app2" + namespace = "ns2" + } + } + } + labels = { + "some-key-2": "some-value-2" + } +} +`, context) +} + +func testAccGKEBackupBackupPlan_rpo_daily_window(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-testcluster%{random_suffix}" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = false + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "backupplan" { + name = "tf-test-testplan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + retention_policy { + backup_delete_lock_days = 30 + backup_retain_days = 180 + } + backup_schedule { + paused = true + rpo_config { + target_rpo_minutes=1440 + exclusion_windows { + start_time { + hours = 12 + } + duration = "7200s" + daily = true + } + exclusion_windows { + start_time { + hours = 8 + minutes = 40 + seconds = 1 + } + duration = "3600s" + single_occurrence_date { + year = 2024 + month = 3 + day = 16 + } + } + } + } + backup_config { + include_volume_data = true + include_secrets = true + selected_applications { + namespaced_names { + name = "app1" + namespace = "ns1" + } + namespaced_names { + name = "app2" + namespace = "ns2" + } + } + } + labels = { + "some-key-2": "some-value-2" + } +} +`, context) +} + +func testAccGKEBackupBackupPlan_rpo_weekly_window(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-testcluster%{random_suffix}" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = false + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "backupplan" { + name = "tf-test-testplan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + retention_policy { + backup_delete_lock_days = 30 + backup_retain_days = 180 + } + backup_schedule { + paused = true + rpo_config { + target_rpo_minutes=1400 + exclusion_windows { + start_time { + hours = 1 + minutes = 23 + } + duration = "1800s" + days_of_week { + days_of_week = ["MONDAY", "THURSDAY"] + } + } + exclusion_windows { + start_time { + hours = 12 + } + duration = "3600s" + single_occurrence_date { + year = 2024 + month = 3 + day = 17 + } + } + exclusion_windows { + start_time { + hours = 8 + minutes = 40 + } + duration = "600s" + single_occurrence_date { + year = 2024 + month = 3 + day = 18 + } + } + } + } + backup_config { + include_volume_data = true + include_secrets = true + selected_applications { + namespaced_names { + name = "app1" + namespace = "ns1" + } + namespaced_names { + name = "app2" + namespace = "ns2" + } + } + } + labels = { + "some-key-2": "some-value-2" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_restore_plan_test.go b/mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_restore_plan_test.go new file mode 100644 index 000000000000..cf8d9dfa0c81 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkebackup/go/resource_gke_backup_restore_plan_test.go @@ -0,0 +1,207 @@ + + +package gkebackup_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + + +func TestAccGKEBackupRestorePlan_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "deletion_protection": false, + "network_name": acctest.BootstrapSharedTestNetwork(t, "gke-cluster"), + "subnetwork_name": acctest.BootstrapSubnet(t, "gke-cluster", acctest.BootstrapSharedTestNetwork(t, "gke-cluster")), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEBackupRestorePlan_full(context), + }, + { + ResourceName: "google_gke_backup_restore_plan.restore_plan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "terraform_labels"}, + }, + { + Config: testAccGKEBackupRestorePlan_update(context), + }, + { + ResourceName: "google_gke_backup_restore_plan.restore_plan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccGKEBackupRestorePlan_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-restore-plan%{random_suffix}-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "%{deletion_protection}" + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "tf-test-restore-plan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "restore_plan" { + name = "tf-test-restore-plan%{random_suffix}" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "MERGE_SKIP_ON_CONFLICT" + volume_data_restore_policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + restore_order { + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindA" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + } + } + volume_data_restore_policy_bindings { + policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + volume_type = "GCE_PERSISTENT_DISK" + } + } +} +`, context) +} + +func testAccGKEBackupRestorePlan_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-restore-plan%{random_suffix}-cluster" + location = "us-central1" + initial_node_count = 1 + workload_identity_config { + workload_pool = "%{project}.svc.id.goog" + } + addons_config { + gke_backup_agent_config { + enabled = true + } + } + deletion_protection = "%{deletion_protection}" + network = "%{network_name}" + subnetwork = "%{subnetwork_name}" +} + +resource "google_gke_backup_backup_plan" "basic" { + name = "tf-test-restore-plan%{random_suffix}" + cluster = google_container_cluster.primary.id + location = "us-central1" + backup_config { + include_volume_data = true + include_secrets = true + all_namespaces = true + } +} + +resource "google_gke_backup_restore_plan" "restore_plan" { + name = "tf-test-restore-plan%{random_suffix}" + location = "us-central1" + backup_plan = google_gke_backup_backup_plan.basic.id + cluster = google_container_cluster.primary.id + restore_config { + all_namespaces = true + namespaced_resource_restore_mode = "MERGE_REPLACE_VOLUME_ON_CONFLICT" + volume_data_restore_policy = "RESTORE_VOLUME_DATA_FROM_BACKUP" + cluster_resource_restore_scope { + all_group_kinds = true + } + cluster_resource_conflict_policy = "USE_EXISTING_VERSION" + restore_order { + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindA" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindB" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + } + group_kind_dependencies { + satisfying { + resource_group = "stable.example.com" + resource_kind = "kindC" + } + requiring { + resource_group = "stable.example.com" + resource_kind = "kindD" + } + } + } + volume_data_restore_policy_bindings { + policy = "REUSE_VOLUME_HANDLE_FROM_BACKUP" + volume_type = "GCE_PERSISTENT_DISK" + } + } +} +`, context) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl new file mode 100644 index 000000000000..76c7415e37c7 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub/go/resource_gke_hub_feature_membership_test.go.tmpl @@ -0,0 +1,1438 @@ +package gkehub_test + +import ( + "context" + "fmt" + "strings" + "testing" + + dcl "github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl" + gkehub "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/gkehub{{ $.DCLVersion }}" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccGKEHubFeatureMembership_gkehubFeatureAcmUpdate(t *testing.T) { + // Multiple fine-grained resources cause VCR to fail + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmUpdateStart(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test2%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member_1", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmMembershipUpdate(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test2%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member_2", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmAddHierarchyController(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipNotPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test2%s", context["random_suffix"])), + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test3%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member_3", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmRemoveFields(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipNotPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test2%s", context["random_suffix"])), + testAccCheckGkeHubFeatureMembershipNotPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("basic1%s", context["random_suffix"])), + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test3%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member_3", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmUpdateStart(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_feature_membership" "feature_member_1" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "hierarchy" + git { + sync_repo = "https://github.com/GoogleCloudPlatform/magic-modules" + secret_type = "none" + } + } + } +} + +resource "google_gke_hub_feature_membership" "feature_member_2" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_second.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "hierarchy" + git { + sync_repo = "https://github.com/terraform-providers/terraform-provider-google" + secret_type = "none" + } + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmMembershipUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "changed" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_feature_membership" "feature_member_1" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "hierarchy" + git { + sync_repo = "https://github.com/GoogleCloudPlatform/magic-modules" + secret_type = "none" + } + } + } +} + +resource "google_gke_hub_feature_membership" "feature_member_2" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_second.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "hierarchy" + git { + sync_repo = "https://github.com/terraform-providers/terraform-provider-google-beta" + secret_type = "none" + } + } + policy_controller { + enabled = true + audit_interval_seconds = "10" + exemptable_namespaces = ["asdf", "1234"] + template_library_installed = true + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmAddHierarchyController(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "changed" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_feature_membership" "feature_member_2" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_second.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "unstructured" + git { + sync_repo = "https://github.com/terraform-providers/terraform-provider-google-beta" + secret_type = "none" + } + } + policy_controller { + enabled = true + audit_interval_seconds = "9" + exemptable_namespaces = ["different", "1234"] + template_library_installed = false + } + hierarchy_controller { + enable_hierarchical_resource_quota = true + enable_pod_tree_labels = false + enabled = true + } + } +} + +resource "google_gke_hub_feature_membership" "feature_member_3" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_third.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "hierarchy" + git { + sync_repo = "https://github.com/hashicorp/terraform" + secret_type = "none" + } + } + policy_controller { + enabled = false + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + } + hierarchy_controller { + enable_hierarchical_resource_quota = false + enable_pod_tree_labels = true + enabled = false + } + } +} + +resource "google_gke_hub_feature_membership" "feature_member_4" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_fourth.membership_id + configmanagement { + version = "1.15.1" + policy_controller { + enabled = true + audit_interval_seconds = "100" + template_library_installed = true + mutation_enabled = true + monitoring { + backends = ["CLOUD_MONITORING", "PROMETHEUS"] + } + } + } +} + + + +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmRemoveFields(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "changed" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_feature_membership" "feature_member_3" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_third.membership_id + configmanagement { + version = "1.15.1" + policy_controller { + enabled = true + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + } + } +} +`, context) +} + +func TestAccGKEHubFeatureMembership_gkehubFeatureAcmAllFields(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmFewFields(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmAllFields(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmFewFields(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureWithPreventDriftField(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmAllFields(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + project = google_project.project.project_id + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + configmanagement { + version = "1.15.1" + config_sync { + git { + sync_repo = "https://github.com/hashicorp/terraform" + https_proxy = "https://example.com" + policy_dir = "google/" + secret_type = "none" + sync_branch = "some-branch" + sync_rev = "v3.60.0" + sync_wait_secs = "30" + } + } + policy_controller { + enabled = true + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + referential_rules_enabled = true + log_denies_enabled = true + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureWithPreventDriftField(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + project = google_project.project.project_id + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + configmanagement { + version = "1.15.1" + config_sync { + git { + sync_repo = "https://github.com/hashicorp/terraform" + https_proxy = "https://example.com" + policy_dir = "google/" + secret_type = "none" + sync_branch = "some-branch" + sync_rev = "v3.60.0" + sync_wait_secs = "30" + } + prevent_drift = true + } + policy_controller { + enabled = true + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + referential_rules_enabled = true + log_denies_enabled = true + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmFewFields(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + project = google_project.project.project_id + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub, google_project_service.acm] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + configmanagement { + version = "1.15.1" + config_sync { + git { + sync_repo = "https://github.com/hashicorp/terraform" + secret_type = "none" + } + } + } +} +`, context) +} + +func TestAccGKEHubFeatureMembership_gkehubFeatureAcmOci(t *testing.T) { + // Multiple fine-grained resources cause VCR to fail + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmOciStart(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmOciUpdate(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_gkehubFeatureAcmOciRemoveFields(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "configmanagement", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmOciStart(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup_ACMOCI(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.container, google_project_service.gkehub] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_acmoci.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "unstructured" + oci { + sync_repo = "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest" + policy_dir = "config-connector" + sync_wait_secs = "20" + secret_type = "gcpserviceaccount" + gcp_service_account_email = google_service_account.feature_sa.email + } + prevent_drift = true + } + policy_controller { + enabled = true + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + referential_rules_enabled = true + log_denies_enabled = true + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmOciUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup_ACMOCI(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.container, google_project_service.gkehub] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_acmoci.membership_id + configmanagement { + version = "1.15.1" + config_sync { + source_format = "hierarchy" + oci { + sync_repo = "us-central1-docker.pkg.dev/sample-project/config-repo/config-sync-gke:latest" + policy_dir = "config-sync" + sync_wait_secs = "15" + secret_type = "gcenode" + gcp_service_account_email = google_service_account.feature_sa.email + } + prevent_drift = true + } + policy_controller { + enabled = true + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + referential_rules_enabled = true + log_denies_enabled = true + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_gkehubFeatureAcmOciRemoveFields(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup_ACMOCI(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "configmanagement" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.container, google_project_service.gkehub] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership_acmoci.membership_id + configmanagement { + version = "1.15.1" + policy_controller { + enabled = true + audit_interval_seconds = "100" + exemptable_namespaces = ["onetwothree", "fourfive"] + template_library_installed = true + referential_rules_enabled = true + log_denies_enabled = true + } + } +} +`, context) +} + +func TestAccGKEHubFeatureMembership_gkehubFeatureMesh(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeatureMembership_meshStart(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "servicemesh", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_meshUpdateManagement(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "servicemesh", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_meshUpdateControlPlane(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "servicemesh", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeatureMembership_meshStart(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + project = google_project.project.project_id + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "servicemesh" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.mesh] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + mesh { + management = "MANAGEMENT_AUTOMATIC" + control_plane = "AUTOMATIC" + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_meshUpdateManagement(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + project = google_project.project.project_id + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "servicemesh" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.mesh] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + mesh { + management = "MANAGEMENT_MANUAL" + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_meshUpdateControlPlane(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + project = google_project.project.project_id + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "servicemesh" + location = "global" + + labels = { + foo = "bar" + } + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.mesh] +} + +resource "google_service_account" "feature_sa" { + project = google_project.project.project_id + account_id = "feature-sa" +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + mesh { + control_plane = "MANUAL" + } +} +`, context) +} + +func TestAccGKEHubFeatureMembership_gkehubFeaturePolicyController(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeatureMembership_policycontrollerStart(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "policycontroller", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_policycontrollerUpdateDefaultFields(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "policycontroller", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeatureMembership_policycontrollerUpdateMaps(context), + Check: resource.ComposeTestCheckFunc( + testAccCheckGkeHubFeatureMembershipPresent(t, fmt.Sprintf("tf-test-gkehub%s", context["random_suffix"]), "global", "policycontroller", fmt.Sprintf("tf-test1%s", context["random_suffix"])), + ), + }, + { + ResourceName: "google_gke_hub_feature_membership.feature_member", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeatureMembership_policycontrollerStart(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "policycontroller" + location = "global" + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.poco] +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_ENABLED" + exemptable_namespaces = ["foo"] + audit_interval_seconds = 30 + referential_rules_enabled = true + } + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_policycontrollerUpdateDefaultFields(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "policycontroller" + location = "global" + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.poco] +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_SUSPENDED" + constraint_violation_limit = 50 + referential_rules_enabled = true + log_denies_enabled = true + mutation_enabled = true + monitoring { + backends = [ + "PROMETHEUS" + ] + } + deployment_configs { + component_name = "admission" + replica_count = 3 + pod_affinity = "ANTI_AFFINITY" + container_resources { + limits { + memory = "1Gi" + cpu = "1.5" + } + requests { + memory = "500Mi" + cpu = "150m" + } + } + pod_tolerations { + key = "key1" + operator = "Equal" + value = "value1" + effect = "NoSchedule" + } + } + deployment_configs { + component_name = "mutation" + replica_count = 3 + pod_affinity = "ANTI_AFFINITY" + } + policy_content { + template_library { + installation = "ALL" + } + bundles { + bundle_name = "pci-dss-v3.2.1" + exempted_namespaces = ["sample-namespace"] + } + bundles { + bundle_name = "nist-sp-800-190" + } + } + } + version = "1.17.0" + } +} +`, context) +} + +func testAccGKEHubFeatureMembership_policycontrollerUpdateMaps(context map[string]interface{}) string { + return gkeHubFeatureProjectSetup(context) + gkeHubClusterMembershipSetup(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + project = google_project.project.project_id + name = "policycontroller" + location = "global" + depends_on = [google_project_service.container, google_project_service.gkehub, google_project_service.poco] +} + +resource "google_gke_hub_feature_membership" "feature_member" { + project = google_project.project.project_id + location = "global" + feature = google_gke_hub_feature.feature.name + membership = google_gke_hub_membership.membership.membership_id + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_SUSPENDED" + constraint_violation_limit = 50 + referential_rules_enabled = true + log_denies_enabled = true + mutation_enabled = true + monitoring { + backends = [ + "PROMETHEUS" + ] + } + deployment_configs { + component_name = "admission" + pod_affinity = "NO_AFFINITY" + } + deployment_configs { + component_name = "audit" + container_resources { + limits { + memory = "1Gi" + cpu = "1.5" + } + requests { + memory = "500Mi" + cpu = "150m" + } + } + } + } + version = "1.17.0" + } +} +`, context) +} + +func gkeHubClusterMembershipSetup(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_container_cluster" "secondary" { + name = "tf-test-cl2%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_container_cluster" "tertiary" { + name = "tf-test-cl3%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + + +resource "google_container_cluster" "quarternary" { + name = "tf-test-cl4%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_membership" "membership_second" { + project = google_project.project.project_id + membership_id = "tf-test2%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.secondary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_membership" "membership_third" { + project = google_project.project.project_id + membership_id = "tf-test3%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.tertiary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} + +resource "google_gke_hub_membership" "membership_fourth" { + project = google_project.project.project_id + membership_id = "tf-test4%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.quarternary.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} +`, context) +} + +func gkeHubClusterMembershipSetup_ACMOCI(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_network" "testnetwork" { + project = google_project.project.project_id + name = "testnetwork" + auto_create_subnetworks = true + depends_on = [google_project_service.compute] +} + +resource "google_container_cluster" "container_acmoci" { + name = "tf-test-cl%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + network = google_compute_network.testnetwork.self_link + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership_acmoci" { + project = google_project.project.project_id + membership_id = "tf-test1%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.container_acmoci.id}" + } + } +{{- if ne $.TargetVersionName "ga" }} + description = "test resource." +{{- end }} +} +`, context) +} + +func testAccCheckGkeHubFeatureMembershipPresent(t *testing.T, project, location, feature, membership string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + obj := &gkehub.FeatureMembership{ + Feature: dcl.StringOrNil(feature), + Location: dcl.StringOrNil(location), + Membership: dcl.StringOrNil(membership), + Project: dcl.String(project), + } + + _, err := transport_tpg.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) + if err != nil { + return err + } + return nil + } +} + +func testAccCheckGkeHubFeatureMembershipNotPresent(t *testing.T, project, location, feature, membership string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + obj := &gkehub.FeatureMembership{ + Feature: dcl.StringOrNil(feature), + Location: dcl.StringOrNil(location), + Membership: dcl.StringOrNil(membership), + Project: dcl.String(project), + } + + _, err := transport_tpg.NewDCLGkeHubClient(config, "", "", 0).GetFeatureMembership(context.Background(), obj) + if err == nil { + return fmt.Errorf("Did not expect to find GKE Feature Membership for projects/%s/locations/%s/features/%s/membershipId/%s", project, location, feature, membership) + } + if dcl.IsNotFound(err) { + return nil + } + return err + } +} + +// Copy this function from the package gkehub2_test to here +func gkeHubFeatureProjectSetup(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "tf-test-gkehub%{random_suffix}" + project_id = "tf-test-gkehub%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "anthos" { + project = google_project.project.project_id + service = "anthos.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "mesh" { + project = google_project.project.project_id + service = "meshconfig.googleapis.com" +} + +resource "google_project_service" "mci" { + project = google_project.project.project_id + service = "multiclusteringress.googleapis.com" +} + +resource "google_project_service" "acm" { + project = google_project.project.project_id + service = "anthosconfigmanagement.googleapis.com" +} + +resource "google_project_service" "poco" { + project = google_project.project.project_id + service = "anthospolicycontroller.googleapis.com" +} + +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "container" { + project = google_project.project.project_id + service = "container.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +`, context) +} + +// Copy this function from the package gkehub2_test to here +func testAccCheckGKEHubFeatureDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_feature" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}GKEHub2BasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/features/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHubFeature still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/gkehub2/go/iam_gke_hub_feature_test.go b/mmv1/third_party/terraform/services/gkehub2/go/iam_gke_hub_feature_test.go new file mode 100644 index 000000000000..2c8c61f8b190 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub2/go/iam_gke_hub_feature_test.go @@ -0,0 +1,316 @@ +package gkehub2_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccGKEHub2FeatureIamBindingGenerated(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + "project_id": fmt.Sprintf("tf-test-gkehub-%s", acctest.RandString(t, 10)), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2FeatureIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_feature_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/features/%s roles/viewer", context["project_id"], "global", fmt.Sprint("multiclusterservicediscovery")), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccGKEHub2FeatureIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_gke_hub_feature_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/features/%s roles/viewer", context["project_id"], "global", fmt.Sprint("multiclusterservicediscovery")), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGKEHub2FeatureIamMemberGenerated(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + "project_id": fmt.Sprintf("tf-test-gkehub-%s", acctest.RandString(t, 10)), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccGKEHub2FeatureIamMember_basicGenerated(context), + }, + { + ResourceName: "google_gke_hub_feature_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/features/%s roles/viewer user:admin@hashicorptest.com", context["project_id"], "global", fmt.Sprint("multiclusterservicediscovery")), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGKEHub2FeatureIamPolicyGenerated(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + "project_id": fmt.Sprintf("tf-test-gkehub-%s", acctest.RandString(t, 10)), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2FeatureIamPolicy_basicGenerated(context), + Check: resource.TestCheckResourceAttrSet("data.google_gke_hub_feature_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_gke_hub_feature_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/features/%s", context["project_id"], "global", fmt.Sprint("multiclusterservicediscovery")), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHub2FeatureIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_gke_hub_feature_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/features/%s", context["project_id"], "global", fmt.Sprint("multiclusterservicediscovery")), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHub2FeatureIamMember_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "%{project_id}" + project_id = "%{project_id}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = google_project.project.project_id + labels = { + foo = "bar" + } + depends_on = [google_project_service.mcsd, google_project_service.gkehub] +} +resource "google_gke_hub_feature_iam_member" "foo" { + project = google_gke_hub_feature.feature.project + location = google_gke_hub_feature.feature.location + name = google_gke_hub_feature.feature.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccGKEHub2FeatureIamPolicy_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "%{project_id}" + project_id = "%{project_id}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = google_project.project.project_id + labels = { + foo = "bar" + } + depends_on = [google_project_service.mcsd, google_project_service.gkehub] +} +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} +resource "google_gke_hub_feature_iam_policy" "foo" { + project = google_gke_hub_feature.feature.project + location = google_gke_hub_feature.feature.location + name = google_gke_hub_feature.feature.name + policy_data = data.google_iam_policy.foo.policy_data +} +data "google_gke_hub_feature_iam_policy" "foo" { + project = google_gke_hub_feature.feature.project + location = google_gke_hub_feature.feature.location + name = google_gke_hub_feature.feature.name + depends_on = [ + google_gke_hub_feature_iam_policy.foo + ] +} +`, context) +} + +func testAccGKEHub2FeatureIamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "%{project_id}" + project_id = "%{project_id}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = google_project.project.project_id + labels = { + foo = "bar" + } + depends_on = [google_project_service.mcsd, google_project_service.gkehub] +} +data "google_iam_policy" "foo" { +} +resource "google_gke_hub_feature_iam_policy" "foo" { + project = google_gke_hub_feature.feature.project + location = google_gke_hub_feature.feature.location + name = google_gke_hub_feature.feature.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccGKEHub2FeatureIamBinding_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "%{project_id}" + project_id = "%{project_id}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = google_project.project.project_id + labels = { + foo = "bar" + } + depends_on = [google_project_service.mcsd, google_project_service.gkehub] +} +resource "google_gke_hub_feature_iam_binding" "foo" { + project = google_gke_hub_feature.feature.project + location = google_gke_hub_feature.feature.location + name = google_gke_hub_feature.feature.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccGKEHub2FeatureIamBinding_updateGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "%{project_id}" + project_id = "%{project_id}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = google_project.project.project_id + labels = { + foo = "bar" + } + depends_on = [google_project_service.mcsd, google_project_service.gkehub] +} +resource "google_gke_hub_feature_iam_binding" "foo" { + project = google_gke_hub_feature.feature.project + location = google_gke_hub_feature.feature.location + name = google_gke_hub_feature.feature.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl new file mode 100644 index 000000000000..971de79c1dd0 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl @@ -0,0 +1,974 @@ +package gkehub2_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccGKEHubFeature_gkehubFeatureFleetObservability(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_gkehubFeatureFleetObservability(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_gkehubFeatureFleetObservabilityUpdate1(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_gkehubFeatureFleetObservabilityUpdate2(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeature_gkehubFeatureFleetObservability(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "time_sleep" "wait_for_gkehub_enablement" { + create_duration = "150s" + depends_on = [google_project_service.gkehub] +} + +resource "google_gke_hub_feature" "feature" { + name = "fleetobservability" + location = "global" + project = google_project.project.project_id + spec { + fleetobservability { + logging_config { + default_config { + mode = "MOVE" + } + fleet_scope_logs_config { + mode = "COPY" + } + } + } + } + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + +func testAccGKEHubFeature_gkehubFeatureFleetObservabilityUpdate1(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "time_sleep" "wait_for_gkehub_enablement" { + create_duration = "150s" + depends_on = [google_project_service.gkehub] +} + +resource "google_gke_hub_feature" "feature" { + name = "fleetobservability" + location = "global" + project = google_project.project.project_id + spec { + fleetobservability { + logging_config { + default_config { + mode = "MOVE" + } + } + } + } + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + +func testAccGKEHubFeature_gkehubFeatureFleetObservabilityUpdate2(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "time_sleep" "wait_for_gkehub_enablement" { + create_duration = "150s" + depends_on = [google_project_service.gkehub] +} + +resource "google_gke_hub_feature" "feature" { + name = "fleetobservability" + location = "global" + project = google_project.project.project_id + spec { + fleetobservability { + logging_config { + fleet_scope_logs_config { + mode = "COPY" + } + } + } + } + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func gkeHubFeatureProjectSetup(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "tf-test-gkehub%{random_suffix}" + project_id = "tf-test-gkehub%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" + provider = google-beta +} + +resource "google_project_service" "mesh" { + project = google_project.project.project_id + service = "meshconfig.googleapis.com" + provider = google-beta +} + +resource "google_project_service" "mci" { + project = google_project.project.project_id + service = "multiclusteringress.googleapis.com" + provider = google-beta +} + +resource "google_project_service" "acm" { + project = google_project.project.project_id + service = "anthosconfigmanagement.googleapis.com" + provider = google-beta +} + +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" + provider = google-beta +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" + disable_on_destroy = false + provider = google-beta +} + +resource "google_project_service" "container" { + project = google_project.project.project_id + service = "container.googleapis.com" + disable_on_destroy = false + provider = google-beta +} + +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false + provider = google-beta +} +`, context) +} +{{- end }} + +func TestAccGKEHubFeature_gkehubFeatureMciUpdate(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_gkehubFeatureMciUpdateStart(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"update_time"}, + }, + { + Config: testAccGKEHubFeature_gkehubFeatureMciChangeMembership(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"update_time", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccGKEHubFeature_gkehubFeatureMciUpdateStart(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` + +resource "google_container_cluster" "primary" { + name = "tf-test%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_container_cluster" "secondary" { + name = "tf-test2%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "tf-test%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } + project = google_project.project.project_id +} + +resource "google_gke_hub_membership" "membership_second" { + membership_id = "tf-test2%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.secondary.id}" + } + } + project = google_project.project.project_id +} + +resource "google_gke_hub_feature" "feature" { + name = "multiclusteringress" + location = "global" + spec { + multiclusteringress { + config_membership = google_gke_hub_membership.membership.id + } + } + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_gkehubFeatureMciChangeMembership(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_container_cluster" "primary" { + name = "tf-test%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_container_cluster" "secondary" { + name = "tf-test2%{random_suffix}" + location = "us-central1-a" + initial_node_count = 1 + project = google_project.project.project_id + deletion_protection = false + depends_on = [google_project_service.mci, google_project_service.container, google_project_service.container, google_project_service.gkehub] +} + +resource "google_gke_hub_membership" "membership" { + membership_id = "tf-test%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.primary.id}" + } + } + project = google_project.project.project_id +} + +resource "google_gke_hub_membership" "membership_second" { + membership_id = "tf-test2%{random_suffix}" + endpoint { + gke_cluster { + resource_link = "//container.googleapis.com/${google_container_cluster.secondary.id}" + } + } + project = google_project.project.project_id +} + +resource "google_gke_hub_feature" "feature" { + name = "multiclusteringress" + location = "global" + spec { + multiclusteringress { + config_membership = google_gke_hub_membership.membership_second.id + } + } + labels = { + foo = "bar" + } + project = google_project.project.project_id +} +`, context) +} + +func TestAccGKEHubFeature_FleetDefaultMemberConfigServiceMesh(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigServiceMesh(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project"}, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigServiceMesh(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "servicemesh" + location = "global" + fleet_default_member_config { + mesh { + management = "MANAGEMENT_AUTOMATIC" + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.mesh] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "servicemesh" + location = "global" + fleet_default_member_config { + mesh { + management = "MANAGEMENT_MANUAL" + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.mesh] + project = google_project.project.project_id +} +`, context) +} + +func TestAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project"}, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + fleet_default_member_config { + configmanagement { + version = "1.16.0" + config_sync { + source_format = "hierarchy" + git { + sync_repo = "https://github.com/GoogleCloudPlatform/magic-modules" + sync_branch = "master" + policy_dir = "." + sync_rev = "HEAD" + secret_type = "none" + sync_wait_secs = "15" + } + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigConfigManagementUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "configmanagement" + location = "global" + fleet_default_member_config { + configmanagement { + version = "1.16.1" + config_sync { + prevent_drift = true + source_format = "unstructured" + oci { + sync_repo = "us-central1-docker.pkg.dev/corp-gke-build-artifacts/acm/configs:latest" + policy_dir = "/acm/nonprod-root/" + secret_type = "gcpserviceaccount" + sync_wait_secs = "15" + gcp_service_account_email = "gke-cluster@gke-foo-nonprod.iam.gserviceaccount.com" + } + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.acm] + project = google_project.project.project_id +} +`, context) +} + +func TestAccGKEHubFeature_Clusterupgrade(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_Clusterupgrade(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "update_time"}, + }, + { + Config: testAccGKEHubFeature_ClusterupgradeUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"update_time"}, + }, + }, + }) +} + +func testAccGKEHubFeature_Clusterupgrade(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "clusterupgrade" + location = "global" + spec { + clusterupgrade { + upstream_fleets = [] + post_conditions { + soaking = "60s" + } + } + } + depends_on = [google_project_service.gkehub] + project = google_project.project.project_id +} + +resource "google_gke_hub_feature" "feature_2" { + name = "clusterupgrade" + location = "global" + spec { + clusterupgrade { + upstream_fleets = [] + post_conditions { + soaking = "60s" + } + } + } + depends_on = [google_project_service.gkehub_2] + project = google_project.project_2.project_id +} +`, context) +} + +func testAccGKEHubFeature_ClusterupgradeUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "clusterupgrade" + location = "global" + spec { + clusterupgrade { + upstream_fleets = [google_project.project_2.number] + post_conditions { + soaking = "120s" + } + gke_upgrade_overrides { + upgrade { + name = "k8s_control_plane" + version = "1.22.1-gke.100" + } + post_conditions { + soaking = "240s" + } + } + } + } + project = google_project.project.project_id +} + +resource "google_gke_hub_feature" "feature_2" { + name = "clusterupgrade" + location = "global" + spec { + clusterupgrade { + upstream_fleets = [] + post_conditions { + soaking = "60s" + } + } + } + depends_on = [google_project_service.gkehub_2] + project = google_project.project_2.project_id +} +`, context) +} + +func TestAccGKEHubFeature_FleetDefaultMemberConfigPolicyController(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigPolicyController(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "update_time"}, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigPolicyControllerFull(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigPolicyControllerMinimal(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigPolicyController(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "policycontroller" + location = "global" + fleet_default_member_config { + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_ENABLED" + exemptable_namespaces = ["foo"] + policy_content { + bundles { + bundle = "policy-essentials-v2022" + exempted_namespaces = ["foo", "bar"] + } + } + audit_interval_seconds = 30 + referential_rules_enabled = true + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.poco] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigPolicyControllerFull(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "policycontroller" + location = "global" + fleet_default_member_config { + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_SUSPENDED" + policy_content { + bundles { + bundle = "pci-dss-v3.2.1" + exempted_namespaces = ["baz", "bar"] + } + bundles { + bundle = "nist-sp-800-190" + exempted_namespaces = [] + } + template_library { + installation = "ALL" + } + } + constraint_violation_limit = 50 + referential_rules_enabled = true + log_denies_enabled = true + mutation_enabled = true + deployment_configs { + component = "admission" + replica_count = 2 + pod_affinity = "ANTI_AFFINITY" + } + deployment_configs { + component = "audit" + container_resources { + limits { + memory = "1Gi" + cpu = "1.5" + } + requests { + memory = "500Mi" + cpu = "150m" + } + } + pod_toleration { + key = "key1" + operator = "Equal" + value = "value1" + effect = "NoSchedule" + } + } + monitoring { + backends = [ + "PROMETHEUS" + ] + } + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.poco] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigPolicyControllerMinimal(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "policycontroller" + location = "global" + fleet_default_member_config { + policycontroller { + policy_controller_hub_config { + install_spec = "INSTALL_SPEC_ENABLED" + policy_content {} + constraint_violation_limit = 50 + referential_rules_enabled = true + log_denies_enabled = true + mutation_enabled = true + deployment_configs { + component = "admission" + } + monitoring {} + } + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.poco] + project = google_project.project.project_id +} +`, context) +} + +func TestAccGKEHubFeature_gkehubFeatureMcsd(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHubFeatureDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGKEHubFeature_gkehubFeatureMcsd(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "labels", "terraform_labels"}, + }, + { + Config: testAccGKEHubFeature_gkehubFeatureMcsdUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccGKEHubFeature_gkehubFeatureMcsd(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = "projects/${google_project.project.project_id}" + labels = { + foo = "bar" + } + depends_on = [google_project_service.mcsd] +} +`, context) +} + +func testAccGKEHubFeature_gkehubFeatureMcsdUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "multiclusterservicediscovery" + location = "global" + project = google_project.project.project_id + labels = { + foo = "quux" + baz = "qux" + } + depends_on = [google_project_service.mcsd] +} +`, context) +} + +func gkeHubFeatureProjectSetupForGA(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "tf-test-gkehub%{random_suffix}" + project_id = "tf-test-gkehub%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "mesh" { + project = google_project.project.project_id + service = "meshconfig.googleapis.com" +} + +resource "google_project_service" "mci" { + project = google_project.project.project_id + service = "multiclusteringress.googleapis.com" +} + +resource "google_project_service" "acm" { + project = google_project.project.project_id + service = "anthosconfigmanagement.googleapis.com" +} + +resource "google_project_service" "poco" { + project = google_project.project.project_id + service = "anthospolicycontroller.googleapis.com" +} + +resource "google_project_service" "mcsd" { + project = google_project.project.project_id + service = "multiclusterservicediscovery.googleapis.com" +} + +resource "google_project_service" "compute" { + project = google_project.project.project_id + service = "compute.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "container" { + project = google_project.project.project_id + service = "container.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "anthos" { + project = google_project.project.project_id + service = "anthos.googleapis.com" +} + +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} + +resource "google_project" "project_2" { + name = "tf-test-gkehub%{random_suffix}-2" + project_id = "tf-test-gkehub%{random_suffix}-2" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "compute_2" { + project = google_project.project_2.project_id + service = "compute.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "container_2" { + project = google_project.project_2.project_id + service = "container.googleapis.com" + disable_on_destroy = false +} + +resource "google_project_service" "gkehub_2" { + project = google_project.project_2.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false +} +`, context) +} + +func testAccCheckGKEHubFeatureDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_feature" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}GKEHub2BasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/features/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHubFeature still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_fleet_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_fleet_test.go.tmpl new file mode 100644 index 000000000000..f3b872f24615 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_fleet_test.go.tmpl @@ -0,0 +1,180 @@ +package gkehub2_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccGKEHub2Fleet_gkehubFleetBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHub2FleetDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccGKEHub2Fleet_basic(context), + }, + { + ResourceName: "google_gke_hub_fleet.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHub2Fleet_update(context), + }, + { + ResourceName: "google_gke_hub_fleet.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHub2Fleet_removedDefaultClusterConfig(context), + }, + { + ResourceName: "google_gke_hub_fleet.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGKEHub2Fleet_basic(context map[string]interface{}) string { + return gkeHubFleetProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_fleet" "default" { + project = google_project.project.project_id + display_name = "my production fleet" + default_cluster_config { + binary_authorization_config { + evaluation_mode = "DISABLED" + } + security_posture_config { + mode = "DISABLED" + vulnerability_mode = "VULNERABILITY_DISABLED" + } + } + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + +func testAccGKEHub2Fleet_update(context map[string]interface{}) string { + return gkeHubFleetProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_fleet" "default" { + project = google_project.project.project_id + display_name = "my updated fleet" + default_cluster_config { + binary_authorization_config { + evaluation_mode = "POLICY_BINDINGS" + policy_bindings { + name = "projects/${google_project.project.project_id}/platforms/gke/policies/policy_id" + } + } + security_posture_config { + mode = "BASIC" + vulnerability_mode = "VULNERABILITY_BASIC" + } + } + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + +func testAccGKEHub2Fleet_removedDefaultClusterConfig(context map[string]interface{}) string { + return gkeHubFleetProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_fleet" "default" { + project = google_project.project.project_id + display_name = "my updated fleet" + + depends_on = [time_sleep.wait_for_gkehub_enablement] +} +`, context) +} + +func gkeHubFleetProjectSetupForGA(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + name = "tf-test-gkehub%{random_suffix}" + project_id = "tf-test-gkehub%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "gkehub" { + project = google_project.project.project_id + service = "gkehub.googleapis.com" + disable_on_destroy = false + depends_on = [google_project_service.anthos] +} + +resource "google_project_service" "anthos" { + project = google_project.project.project_id + service = "anthos.googleapis.com" + disable_on_destroy = false +} + +resource "time_sleep" "wait_for_gkehub_enablement" { + create_duration = "150s" + depends_on = [google_project_service.gkehub] +} +`, context) +} + +func testAccCheckGKEHub2FleetDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_gke_hub_fleet" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}GKEHub2BasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/global/fleets/default") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("GKEHub2Fleet still exists at %s", url) + } + } + + return nil + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/gkeonprem/go/gkeonprem_operation.go b/mmv1/third_party/terraform/services/gkeonprem/go/gkeonprem_operation.go new file mode 100644 index 000000000000..255f43439b96 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkeonprem/go/gkeonprem_operation.go @@ -0,0 +1,145 @@ +package gkeonprem + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +type gkeonpremOpError struct { + *cloudresourcemanager.Status +} + +func (e gkeonpremOpError) Error() string { + var validationCheck map[string]interface{} + + for _, msg := range e.Details { + detail := make(map[string]interface{}) + if err := json.Unmarshal(msg, &detail); err != nil { + continue + } + + if _, ok := detail["validationCheck"]; ok { + delete(detail, "@type") + validationCheck = detail + } + } + + if validationCheck != nil { + bytes, err := json.MarshalIndent(validationCheck, "", " ") + if err != nil { + return fmt.Sprintf("Error code %v message: %s validation check: %s", e.Code, e.Message, validationCheck) + } + + return fmt.Sprintf("Error code %v message: %s\n %s", e.Code, e.Message, bytes) + } + + return fmt.Sprintf("Error code %v, message: %s", e.Code, e.Message) +} + +type gkeonpremOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + Op tpgresource.CommonOperation +} + +func (w *gkeonpremOperationWaiter) State() string { + if w == nil { + return fmt.Sprintf("Operation is nil!") + } + + return fmt.Sprintf("done: %v", w.Op.Done) +} + +func (w *gkeonpremOperationWaiter) Error() error { + if w != nil && w.Op.Error != nil { + return &gkeonpremOpError{w.Op.Error} + } + return nil +} + +func (w *gkeonpremOperationWaiter) IsRetryable(error) bool { + return false +} + +func (w *gkeonpremOperationWaiter) SetOp(op interface{}) error { + if err := tpgresource.Convert(op, &w.Op); err != nil { + return err + } + return nil +} + +func (w *gkeonpremOperationWaiter) OpName() string { + if w == nil { + return "" + } + + return w.Op.Name +} + +func (w *gkeonpremOperationWaiter) PendingStates() []string { + return []string{"done: false"} +} + +func (w *gkeonpremOperationWaiter) TargetStates() []string { + return []string{"done: true"} +} + +func (w *gkeonpremOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.GkeonpremBasePath, w.Op.Name) + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func creategkeonpremWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*gkeonpremOperationWaiter, error) { + w := &gkeonpremOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func GkeonpremOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := creategkeonpremWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.Op.Response), response) +} + +func GkeonpremOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := creategkeonpremWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_cluster_test.go b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_cluster_test.go new file mode 100644 index 000000000000..caa9138f058b --- /dev/null +++ b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_cluster_test.go @@ -0,0 +1,575 @@ +package gkeonprem_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccGkeonpremBareMetalCluster_bareMetalClusterUpdateBasic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremBareMetalClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateMetalLbStart(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_cluster.cluster-metallb", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + { + Config: testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateMetalLb(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_cluster.cluster-metallb", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + +func TestAccGkeonpremBareMetalCluster_bareMetalClusterUpdateManualLb(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremBareMetalClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateManualLbStart(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_cluster.cluster-manuallb", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateManualLb(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_cluster.cluster-manuallb", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGkeonpremBareMetalCluster_bareMetalClusterUpdateBgpLb(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremBareMetalClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateBgpLbStart(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_cluster.cluster-bgplb", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateBgpLb(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_cluster.cluster-bgplb", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateMetalLbStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster-metallb" { + name = "cluster-metallb%{random_suffix}" + location = "us-west1" + annotations = { + env = "test" + } + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/16"] + pod_address_cidr_blocks = ["10.240.0.0/13"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.9" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 443 + } + vip_config { + control_plane_vip = "10.200.0.13" + ingress_vip = "10.200.0.14" + } + metal_lb_config { + address_pools { + pool = "pool1" + addresses = [ + "10.200.0.14/32", + "10.200.0.15/32", + "10.200.0.16/32", + "10.200.0.17/32", + "10.200.0.18/32", + "fd00:1::f/128", + "fd00:1::10/128", + "fd00:1::11/128", + "fd00:1::12/128" + ] + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share" + storage_class = "local-shared" + } + shared_path_pv_count = 5 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk" + storage_class = "local-disks" + } + } + security_config { + authorization { + admin_users { + username = "admin@hashicorptest.com" + } + } + } + } +`, context) +} + +func testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateMetalLb(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster-metallb" { + name = "cluster-metallb%{random_suffix}" + location = "us-west1" + annotations = { + env = "test-update" + } + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/20"] + pod_address_cidr_blocks = ["10.240.0.0/14"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.10" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 80 + } + vip_config { + control_plane_vip = "10.200.0.14" + ingress_vip = "10.200.0.15" + } + metal_lb_config { + address_pools { + pool = "pool2" + addresses = [ + "10.200.0.14/32", + "10.200.0.15/32", + "10.200.0.16/32", + "10.200.0.17/32", + "fd00:1::f/128", + "fd00:1::10/128", + "fd00:1::11/128" + ] + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share-updated" + storage_class = "local-shared-updated" + } + shared_path_pv_count = 6 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk-updated" + storage_class = "local-disks-updated" + } + } + security_config { + authorization { + admin_users { + username = "admin-updated@hashicorptest.com" + } + } + } + } +`, context) +} + +func testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateManualLbStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster-manuallb" { + name = "cluster-manuallb%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/20"] + pod_address_cidr_blocks = ["10.240.0.0/14"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.10" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 80 + } + vip_config { + control_plane_vip = "10.200.0.13" + ingress_vip = "10.200.0.14" + } + metal_lb_config { + address_pools { + pool = "pool2" + addresses = [ + "10.200.0.14/32", + "10.200.0.15/32", + "10.200.0.16/32", + "10.200.0.17/32", + "fd00:1::f/128", + "fd00:1::10/128", + "fd00:1::11/128" + ] + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share" + storage_class = "local-shared" + } + shared_path_pv_count = 6 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk" + storage_class = "local-disks" + } + } + security_config { + authorization { + admin_users { + username = "admin@hashicorptest.com" + } + } + } + binary_authorization { + evaluation_mode = "DISABLED" + } + upgrade_policy { + policy = "SERIAL" + } + } +`, context) +} + +func testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateManualLb(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster-manuallb" { + name = "cluster-manuallb%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/20"] + pod_address_cidr_blocks = ["10.240.0.0/14"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.10" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 80 + } + vip_config { + control_plane_vip = "10.200.0.14" + ingress_vip = "10.200.0.15" + } + manual_lb_config { + enabled = true + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share-updated" + storage_class = "local-shared-updated" + } + shared_path_pv_count = 6 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk-updated" + storage_class = "local-disks-updated" + } + } + security_config { + authorization { + admin_users { + username = "admin-updated@hashicorptest.com" + } + } + } + binary_authorization { + evaluation_mode = "PROJECT_SINGLETON_POLICY_ENFORCE" + } + upgrade_policy { + policy = "CONCURRENT" + } + } +`, context) +} + +func testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateBgpLbStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster-bgplb" { + name = "cluster-bgplb%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/20"] + pod_address_cidr_blocks = ["10.240.0.0/14"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.10" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 80 + } + vip_config { + control_plane_vip = "10.200.0.13" + ingress_vip = "10.200.0.14" + } + bgp_lb_config { + asn = 123456 + bgp_peer_configs { + asn = 123457 + ip_address = "10.0.0.1" + control_plane_nodes = ["test-node"] + } + address_pools { + pool = "pool1" + addresses = [ + "10.200.0.14/32", + "fd00:1::12/128" + ] + } + load_balancer_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.9" + } + } + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share" + storage_class = "local-shared" + } + shared_path_pv_count = 6 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk" + storage_class = "local-disks" + } + } + security_config { + authorization { + admin_users { + username = "admin@hashicorptest.com" + } + } + } + } +`, context) +} + +func testAccGkeonpremBareMetalCluster_bareMetalClusterUpdateBgpLb(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster-bgplb" { + name = "cluster-bgplb%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/20"] + pod_address_cidr_blocks = ["10.240.0.0/14"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.10" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 80 + } + vip_config { + control_plane_vip = "10.200.0.14" + ingress_vip = "10.200.0.15" + } + bgp_lb_config { + asn = 123457 + bgp_peer_configs { + asn = 123458 + ip_address = "10.0.0.2" + control_plane_nodes = ["test-node-updated"] + } + address_pools { + pool = "pool2" + addresses = [ + "10.200.0.15/32", + "fd00:1::16/128" + ] + } + load_balancer_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.11" + } + } + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share-updated" + storage_class = "local-shared-updated" + } + shared_path_pv_count = 6 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk-updated" + storage_class = "local-disks-updated" + } + } + security_config { + authorization { + admin_users { + username = "admin-updated@hashicorptest.com" + } + } + } + } +`, context) +} diff --git a/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_node_pool_test.go b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_node_pool_test.go new file mode 100644 index 000000000000..67959eb8364f --- /dev/null +++ b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_bare_metal_node_pool_test.go @@ -0,0 +1,226 @@ +package gkeonprem_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccGkeonpremBareMetalNodePool_bareMetalNodePoolUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremBareMetalNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremBareMetalNodePool_bareMetalNodePoolUpdateStart(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_node_pool.nodepool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + { + Config: testAccGkeonpremBareMetalNodePool_bareMetalNodePoolUpdate(context), + }, + { + ResourceName: "google_gkeonprem_bare_metal_node_pool.nodepool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + +func testAccGkeonpremBareMetalNodePool_bareMetalNodePoolUpdateStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/16"] + pod_address_cidr_blocks = ["10.240.0.0/13"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.9" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 443 + } + vip_config { + control_plane_vip = "10.200.0.13" + ingress_vip = "10.200.0.14" + } + metal_lb_config { + address_pools { + pool = "pool1" + addresses = [ + "10.200.0.14/32", + "10.200.0.15/32", + "10.200.0.16/32", + "10.200.0.17/32", + "10.200.0.18/32", + "fd00:1::f/128", + "fd00:1::10/128", + "fd00:1::11/128", + "fd00:1::12/128" + ] + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share" + storage_class = "local-shared" + } + shared_path_pv_count = 5 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk" + storage_class = "local-disks" + } + } + security_config { + authorization { + admin_users { + username = "admin@hashicorptest.com" + } + } + } + } + + resource "google_gkeonprem_bare_metal_node_pool" "nodepool" { + name = "tf-test-nodepool-%{random_suffix}" + location = "us-west1" + bare_metal_cluster = google_gkeonprem_bare_metal_cluster.cluster.name + annotations = { + env = "test" + } + node_pool_config { + operating_system = "LINUX" + labels = {} + node_configs { + node_ip = "10.200.0.11" + labels = {} + } + } + } +`, context) +} + +func testAccGkeonpremBareMetalNodePool_bareMetalNodePoolUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_bare_metal_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + bare_metal_version = "1.12.3" + network_config { + island_mode_cidr { + service_address_cidr_blocks = ["172.26.0.0/16"] + pod_address_cidr_blocks = ["10.240.0.0/13"] + } + } + control_plane { + control_plane_node_pool_config { + node_pool_config { + labels = {} + operating_system = "LINUX" + node_configs { + labels = {} + node_ip = "10.200.0.9" + } + } + } + } + load_balancer { + port_config { + control_plane_load_balancer_port = 443 + } + vip_config { + control_plane_vip = "10.200.0.13" + ingress_vip = "10.200.0.14" + } + metal_lb_config { + address_pools { + pool = "pool1" + addresses = [ + "10.200.0.14/32", + "10.200.0.15/32", + "10.200.0.16/32", + "10.200.0.17/32", + "10.200.0.18/32", + "fd00:1::f/128", + "fd00:1::10/128", + "fd00:1::11/128", + "fd00:1::12/128" + ] + } + } + } + storage { + lvp_share_config { + lvp_config { + path = "/mnt/localpv-share" + storage_class = "local-shared" + } + shared_path_pv_count = 5 + } + lvp_node_mounts_config { + path = "/mnt/localpv-disk" + storage_class = "local-disks" + } + } + security_config { + authorization { + admin_users { + username = "admin@hashicorptest.com" + } + } + } + } + + resource "google_gkeonprem_bare_metal_node_pool" "nodepool" { + name = "tf-test-nodepool-%{random_suffix}" + location = "us-west1" + bare_metal_cluster = google_gkeonprem_bare_metal_cluster.cluster.name + annotations = { + env = "test-update" + } + node_pool_config { + operating_system = "LINUX" + labels = {} + node_configs { + node_ip = "10.200.0.12" + labels = {} + } + } + } +`, context) +} diff --git a/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_cluster_test.go b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_cluster_test.go new file mode 100644 index 000000000000..0f303ba04155 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_cluster_test.go @@ -0,0 +1,484 @@ +package gkeonprem_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccGkeonpremVmwareCluster_vmwareClusterUpdateBasic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremVmwareClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremVmwareCluster_vmwareClusterUpdateMetalLbStart(context), + }, + { + ResourceName: "google_gkeonprem_vmware_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + { + Config: testAccGkeonpremVmwareCluster_vmwareClusterUpdateMetalLb(context), + }, + { + ResourceName: "google_gkeonprem_vmware_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + +func TestAccGkeonpremVmwareCluster_vmwareClusterUpdateF5Lb(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremVmwareClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremVmwareCluster_vmwareClusterUpdateF5LbStart(context), + }, + { + ResourceName: "google_gkeonprem_vmware_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGkeonpremVmwareCluster_vmwareClusterUpdateF5lb(context), + }, + { + ResourceName: "google_gkeonprem_vmware_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccGkeonpremVmwareCluster_vmwareClusterUpdateManualLb(t *testing.T) { + // VCR fails to handle batched project services + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremVmwareClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremVmwareCluster_vmwareClusterUpdateManualLbStart(context), + }, + { + ResourceName: "google_gkeonprem_vmware_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGkeonpremVmwareCluster_vmwareClusterUpdateManualLb(context), + }, + { + ResourceName: "google_gkeonprem_vmware_cluster.cluster", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccGkeonpremVmwareCluster_vmwareClusterUpdateMetalLbStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = { + env = "test" + } + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + dhcp_ip_config { + enabled = true + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + metal_lb_config { + address_pools { + pool = "ingress-ip" + manual_assign = "true" + addresses = ["10.251.135.19"] + avoid_buggy_ips = true + } + address_pools { + pool = "lb-test-ip" + manual_assign = "true" + addresses = ["10.251.135.19"] + avoid_buggy_ips = true + } + } + } + } +`, context) +} + +func testAccGkeonpremVmwareCluster_vmwareClusterUpdateMetalLb(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster updated" + on_prem_version = "1.13.1-gke.36" + annotations = { + env = "test-update" + } + network_config { + service_address_cidr_blocks = ["10.96.0.0/16"] + pod_address_cidr_blocks = ["192.168.0.0/20"] + dhcp_ip_config { + enabled = true + } + } + control_plane_node { + cpus = 5 + memory = 4098 + replicas = 3 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.6" + ingress_vip = "10.251.135.20" + } + metal_lb_config { + address_pools { + pool = "ingress-ip-updated" + manual_assign = "false" + addresses = ["10.251.135.20"] + avoid_buggy_ips = false + } + address_pools { + pool = "lb-test-ip-updated" + manual_assign = "false" + addresses = ["10.251.135.20"] + avoid_buggy_ips = false + } + } + } + } +`, context) +} + +func testAccGkeonpremVmwareCluster_vmwareClusterUpdateF5LbStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = {} + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + dhcp_ip_config { + enabled = true + } + control_plane_v2_config { + control_plane_ip_block { + ips { + hostname = "test-hostname" + ip = "10.0.0.1" + } + netmask="10.0.0.1/32" + gateway="test-gateway" + } + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + f5_config { + address = "10.0.0.1" + partition = "test-partition" + snat_pool = "test-snap-pool" + } + } + } +`, context) +} + +func testAccGkeonpremVmwareCluster_vmwareClusterUpdateF5lb(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = {} + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + dhcp_ip_config { + enabled = true + } + control_plane_v2_config { + control_plane_ip_block { + ips { + hostname = "test-hostname-updated" + ip = "10.0.0.2" + } + netmask="10.0.0.2/32" + gateway="test-gateway-updated" + } + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + f5_config { + address = "10.0.0.2" + partition = "test-partition-updated" + snat_pool = "test-snap-pool-updated" + } + } + } +`, context) +} + +func testAccGkeonpremVmwareCluster_vmwareClusterUpdateManualLbStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = {} + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + host_config { + dns_servers = ["10.254.41.1"] + ntp_servers = ["216.239.35.8"] + dns_search_domains = ["test-domain"] + } + static_ip_config { + ip_blocks { + netmask = "255.255.252.0" + gateway = "10.251.31.254" + ips { + ip = "10.251.30.153" + hostname = "test-hostname1" + } + ips { + ip = "10.251.31.206" + hostname = "test-hostname2" + } + ips { + ip = "10.251.31.193" + hostname = "test-hostname3" + } + ips { + ip = "10.251.30.230" + hostname = "test-hostname4" + } + } + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + manual_lb_config { + ingress_http_node_port = 30005 + ingress_https_node_port = 30006 + control_plane_node_port = 30007 + konnectivity_server_node_port = 30008 + } + } + vcenter { + resource_pool = "test-resource-pool" + datastore = "test-datastore" + datacenter = "test-datacenter" + cluster = "test-cluster" + folder = "test-folder" + ca_cert_data = "test-ca-cert-data" + storage_policy_name = "test-storage-policy-name" + } + dataplane_v2 { + dataplane_v2_enabled = true + windows_dataplane_v2_enabled = true + advanced_networking = true + } + vm_tracking_enabled = true + enable_control_plane_v2 = true + disable_bundled_ingress = true + upgrade_policy { + control_plane_only = true + } + authorization { + admin_users { + username = "testuser@gmail.com" + } + } + anti_affinity_groups { + aag_config_disabled = true + } + auto_repair_config { + enabled = true + } + } +`, context) +} + +func testAccGkeonpremVmwareCluster_vmwareClusterUpdateManualLb(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = {} + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + host_config { + dns_servers = ["10.254.41.1"] + ntp_servers = ["216.239.35.8"] + dns_search_domains = ["test-domain"] + } + static_ip_config { + ip_blocks { + netmask = "255.255.252.1" + gateway = "10.251.31.255" + ips { + ip = "10.251.30.154" + hostname = "test-hostname1-updated" + } + ips { + ip = "10.251.31.206" + hostname = "test-hostname2" + } + ips { + ip = "10.251.31.193" + hostname = "test-hostname3" + } + ips { + ip = "10.251.30.230" + hostname = "test-hostname4" + } + } + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + manual_lb_config { + ingress_http_node_port = 30006 + ingress_https_node_port = 30007 + control_plane_node_port = 30008 + konnectivity_server_node_port = 30009 + } + } + vcenter { + resource_pool = "test-resource-pool-updated" + datastore = "test-datastore-updated" + datacenter = "test-datacenter-updated" + cluster = "test-cluster-updated" + folder = "test-folder-updated" + ca_cert_data = "test-ca-cert-data-updated" + storage_policy_name = "test-storage-policy-name-updated" + } + dataplane_v2 { + dataplane_v2_enabled = true + windows_dataplane_v2_enabled = true + advanced_networking = true + } + vm_tracking_enabled = false + enable_control_plane_v2 = false + disable_bundled_ingress = false + upgrade_policy { + control_plane_only = true + } + authorization { + admin_users { + username = "testuser-updated@gmail.com" + } + } + anti_affinity_groups { + aag_config_disabled = true + } + auto_repair_config { + enabled = true + } + } +`, context) +} diff --git a/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_node_pool_test.go b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_node_pool_test.go new file mode 100644 index 000000000000..0be6f8fa29a0 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkeonprem/go/resource_gkeonprem_vmware_node_pool_test.go @@ -0,0 +1,212 @@ +package gkeonprem_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccGkeonpremVmwareNodePool_vmwareNodePoolUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGkeonpremVmwareNodePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccGkeonpremVmwareNodePool_vmwareNodePoolUpdateStart(context), + }, + { + ResourceName: "google_gkeonprem_vmware_node_pool.nodepool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + { + Config: testAccGkeonpremVmwareNodePool_vmwareNodePoolUpdate(context), + }, + { + ResourceName: "google_gkeonprem_vmware_node_pool.nodepool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"annotations"}, + }, + }, + }) +} + +func testAccGkeonpremVmwareNodePool_vmwareNodePoolUpdateStart(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = {} + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + dhcp_ip_config { + enabled = true + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + metal_lb_config { + address_pools { + pool = "ingress-ip" + manual_assign = "true" + addresses = ["10.251.135.19"] + avoid_buggy_ips = true + } + address_pools { + pool = "lb-test-ip" + manual_assign = "true" + addresses = ["10.251.135.19"] + avoid_buggy_ips = true + } + } + } + } + + resource "google_gkeonprem_vmware_node_pool" "nodepool" { + name = "tf-test-nodepool-%{random_suffix}" + location = "us-west1" + vmware_cluster = google_gkeonprem_vmware_cluster.cluster.name + annotations = { + env = "test" + } + config { + cpus = 4 + memory_mb = 8196 + replicas = 3 + image_type = "ubuntu_containerd" + image = "image" + boot_disk_size_gb = 10 + taints { + key = "key" + value = "value" + } + labels = {} + vsphere_config { + datastore = "test-datastore" + tags { + category = "test-category-1" + tag = "tag-1" + } + tags { + category = "test-category-2" + tag = "tag-2" + } + host_groups = ["host1", "host2"] + } + enable_load_balancer = true + } + node_pool_autoscaling { + min_replicas = 1 + max_replicas = 5 + } + } +`, context) +} + +func testAccGkeonpremVmwareNodePool_vmwareNodePoolUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` + + resource "google_gkeonprem_vmware_cluster" "cluster" { + name = "tf-test-cluster-%{random_suffix}" + location = "us-west1" + admin_cluster_membership = "projects/870316890899/locations/global/memberships/gkeonprem-terraform-test" + description = "test cluster" + on_prem_version = "1.13.1-gke.35" + annotations = {} + network_config { + service_address_cidr_blocks = ["10.96.0.0/12"] + pod_address_cidr_blocks = ["192.168.0.0/16"] + dhcp_ip_config { + enabled = true + } + } + control_plane_node { + cpus = 4 + memory = 8192 + replicas = 1 + } + load_balancer { + vip_config { + control_plane_vip = "10.251.133.5" + ingress_vip = "10.251.135.19" + } + metal_lb_config { + address_pools { + pool = "ingress-ip" + manual_assign = "true" + addresses = ["10.251.135.19"] + avoid_buggy_ips = true + } + address_pools { + pool = "lb-test-ip" + manual_assign = "true" + addresses = ["10.251.135.19"] + avoid_buggy_ips = true + } + } + } + } + + resource "google_gkeonprem_vmware_node_pool" "nodepool" { + name = "tf-test-nodepool-%{random_suffix}" + location = "us-west1" + vmware_cluster = google_gkeonprem_vmware_cluster.cluster.name + annotations = { + env = "test-update" + } + config { + cpus = 5 + memory_mb = 4096 + replicas = 3 + image_type = "windows" + image = "image-updated" + boot_disk_size_gb = 12 + taints { + key = "key-updated" + value = "value-updated" + } + labels = {} + vsphere_config { + datastore = "test-datastore-update" + tags { + category = "test-category-3" + tag = "tag-3" + } + tags { + category = "test-category-4" + tag = "tag-4" + } + host_groups = ["host3", "host4"] + } + enable_load_balancer = false + } + node_pool_autoscaling { + min_replicas = 2 + max_replicas = 6 + } + } +`, context) +} diff --git a/mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_fhir_store_test.go.tmpl b/mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_fhir_store_test.go.tmpl new file mode 100644 index 000000000000..ec54cd6a1326 --- /dev/null +++ b/mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_fhir_store_test.go.tmpl @@ -0,0 +1,232 @@ +package healthcare_test + +import ( + "fmt" + "path" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/healthcare" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccHealthcareFhirStoreIdParsing(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + ImportId string + ExpectedError bool + ExpectedTerraformId string + ExpectedFhirStoreId string + Config *transport_tpg.Config + }{ + "id is in project/location/datasetName/fhirStoreName format": { + ImportId: "test-project/us-central1/test-dataset/test-store-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-dataset/test-store-name", + ExpectedFhirStoreId: "projects/test-project/locations/us-central1/datasets/test-dataset/fhirStores/test-store-name", + }, + "id is in domain:project/location/datasetName/fhirStoreName format": { + ImportId: "example.com:test-project/us-central1/test-dataset/test-store-name", + ExpectedError: false, + ExpectedTerraformId: "example.com:test-project/us-central1/test-dataset/test-store-name", + ExpectedFhirStoreId: "projects/example.com:test-project/locations/us-central1/datasets/test-dataset/fhirStores/test-store-name", + }, + "id is in location/datasetName/fhirStoreName format": { + ImportId: "us-central1/test-dataset/test-store-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-dataset/test-store-name", + ExpectedFhirStoreId: "projects/test-project/locations/us-central1/datasets/test-dataset/fhirStores/test-store-name", + Config: &transport_tpg.Config{Project: "test-project"}, + }, + "id is in location/datasetName/fhirStoreName format without project in config": { + ImportId: "us-central1/test-dataset/test-store-name", + ExpectedError: true, + Config: &transport_tpg.Config{Project: ""}, + }, + } + + for tn, tc := range cases { + fhirStoreId, err := healthcare.ParseHealthcareFhirStoreId(tc.ImportId, tc.Config) + + if tc.ExpectedError && err == nil { + t.Fatalf("bad: %s, expected an error", tn) + } + + if err != nil { + if tc.ExpectedError { + continue + } + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if fhirStoreId.TerraformId() != tc.ExpectedTerraformId { + t.Fatalf("bad: %s, expected Terraform ID to be `%s` but is `%s`", tn, tc.ExpectedTerraformId, fhirStoreId.TerraformId()) + } + + if fhirStoreId.FhirStoreId() != tc.ExpectedFhirStoreId { + t.Fatalf("bad: %s, expected FhirStore ID to be `%s` but is `%s`", tn, tc.ExpectedFhirStoreId, fhirStoreId.FhirStoreId()) + } + } +} + +func TestAccHealthcareFhirStore_basic(t *testing.T) { + t.Parallel() + + datasetName := fmt.Sprintf("tf-test-dataset-%s", acctest.RandString(t, 10)) + fhirStoreName := fmt.Sprintf("tf-test-fhir-store-%s", acctest.RandString(t, 10)) + pubsubTopic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + resourceName := "google_healthcare_fhir_store.default" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckHealthcareFhirStoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleHealthcareFhirStore_basic(fhirStoreName, datasetName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleHealthcareFhirStore_update(fhirStoreName, datasetName, pubsubTopic), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleHealthcareFhirStoreUpdate(t, pubsubTopic), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleHealthcareFhirStore_basic(fhirStoreName, datasetName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testGoogleHealthcareFhirStore_basic(fhirStoreName, datasetName string) string { + return fmt.Sprintf(` +resource "google_healthcare_fhir_store" "default" { + name = "%s" + dataset = google_healthcare_dataset.dataset.id + + enable_update_create = false + disable_referential_integrity = false + disable_resource_versioning = false + enable_history_import = false + version = "R4" +{{- if ne $.TargetVersionName "ga" }} + enable_history_modifications = false +{{- end }} +} + +resource "google_healthcare_dataset" "dataset" { + name = "%s" + location = "us-central1" +} +`, fhirStoreName, datasetName) +} + +func testGoogleHealthcareFhirStore_update(fhirStoreName, datasetName, pubsubTopic string) string { + return fmt.Sprintf(` +resource "google_healthcare_fhir_store" "default" { + name = "%s" + dataset = google_healthcare_dataset.dataset.id + + enable_update_create = true + version = "R4" + + + notification_configs { + pubsub_topic = google_pubsub_topic.topic.id + send_full_resource = true + send_previous_resource_on_delete = true + } +{{- if ne $.TargetVersionName "ga" }} + enable_history_modifications = true +{{- end }} + + labels = { + label1 = "labelvalue1" + } +} + +resource "google_healthcare_dataset" "dataset" { + name = "%s" + location = "us-central1" +} + +resource "google_pubsub_topic" "topic" { + name = "%s" +} +`, fhirStoreName, datasetName, pubsubTopic) +} + +func testAccCheckGoogleHealthcareFhirStoreUpdate(t *testing.T, pubsubTopic string) resource.TestCheckFunc { + return func(s *terraform.State) error { + var foundResource = false + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_healthcare_fhir_store" { + continue + } + foundResource = true + + config := acctest.GoogleProviderConfig(t) + + gcpResourceUri, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}dataset{{"}}"}}/fhirStores/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + response, err := config.NewHealthcareClient(config.UserAgent).Projects.Locations.Datasets.FhirStores.Get(gcpResourceUri).Do() + if err != nil { + return fmt.Errorf("Unexpected failure while verifying 'updated' dataset: %s", err) + } + + if !response.EnableUpdateCreate { + return fmt.Errorf("fhirStore 'EnableUpdateCreate' not updated: %s", gcpResourceUri) + } + + // because the GET for the FHIR store resource does not return the "enableHistoryImport" flag, this value + // will always be false and cannot be relied upon + + //if !response.EnableHistoryImport { + // return fmt.Errorf("fhirStore 'EnableHistoryImport' not updated: %s", gcpResourceUri) + //} + + if len(response.Labels) == 0 || response.Labels["label1"] != "labelvalue1" { + return fmt.Errorf("fhirStore labels not updated: %s", gcpResourceUri) + } + + notifications := response.NotificationConfigs + if len(notifications) > 0 { + topicName := path.Base(notifications[0].PubsubTopic) + if topicName != pubsubTopic { + return fmt.Errorf("fhirStore 'NotificationConfig' not updated ('%s' != '%s'): %s", topicName, pubsubTopic, gcpResourceUri) + } + } + } + + if !foundResource { + return fmt.Errorf("google_healthcare_fhir_store resource was missing") + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_hl7_v2_store_test.go.tmpl b/mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_hl7_v2_store_test.go.tmpl new file mode 100644 index 000000000000..d68e93b16a9b --- /dev/null +++ b/mmv1/third_party/terraform/services/healthcare/go/resource_healthcare_hl7_v2_store_test.go.tmpl @@ -0,0 +1,311 @@ +package healthcare_test + +import ( + "fmt" + "path" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/healthcare" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccHealthcareHl7V2StoreIdParsing(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + ImportId string + ExpectedError bool + ExpectedTerraformId string + ExpectedHl7V2StoreId string + Config *transport_tpg.Config + }{ + "id is in project/location/datasetName/hl7V2StoreName format": { + ImportId: "test-project/us-central1/test-dataset/test-store-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-dataset/test-store-name", + ExpectedHl7V2StoreId: "projects/test-project/locations/us-central1/datasets/test-dataset/hl7V2Stores/test-store-name", + }, + "id is in domain:project/location/datasetName/hl7V2StoreName format": { + ImportId: "example.com:test-project/us-central1/test-dataset/test-store-name", + ExpectedError: false, + ExpectedTerraformId: "example.com:test-project/us-central1/test-dataset/test-store-name", + ExpectedHl7V2StoreId: "projects/example.com:test-project/locations/us-central1/datasets/test-dataset/hl7V2Stores/test-store-name", + }, + "id is in location/datasetName/hl7V2StoreName format": { + ImportId: "us-central1/test-dataset/test-store-name", + ExpectedError: false, + ExpectedTerraformId: "test-project/us-central1/test-dataset/test-store-name", + ExpectedHl7V2StoreId: "projects/test-project/locations/us-central1/datasets/test-dataset/hl7V2Stores/test-store-name", + Config: &transport_tpg.Config{Project: "test-project"}, + }, + "id is in location/datasetName/hl7V2StoreName format without project in config": { + ImportId: "us-central1/test-dataset/test-store-name", + ExpectedError: true, + Config: &transport_tpg.Config{Project: ""}, + }, + } + + for tn, tc := range cases { + hl7V2StoreId, err := healthcare.ParseHealthcareHl7V2StoreId(tc.ImportId, tc.Config) + + if tc.ExpectedError && err == nil { + t.Fatalf("bad: %s, expected an error", tn) + } + + if err != nil { + if tc.ExpectedError { + continue + } + t.Fatalf("bad: %s, err: %#v", tn, err) + } + + if hl7V2StoreId.TerraformId() != tc.ExpectedTerraformId { + t.Fatalf("bad: %s, expected Terraform ID to be `%s` but is `%s`", tn, tc.ExpectedTerraformId, hl7V2StoreId.TerraformId()) + } + + if hl7V2StoreId.Hl7V2StoreId() != tc.ExpectedHl7V2StoreId { + t.Fatalf("bad: %s, expected Hl7V2Store ID to be `%s` but is `%s`", tn, tc.ExpectedHl7V2StoreId, hl7V2StoreId.Hl7V2StoreId()) + } + } +} + +func TestAccHealthcareHl7V2Store_basic(t *testing.T) { + t.Parallel() + + datasetName := fmt.Sprintf("tf-test-dataset-%s", acctest.RandString(t, 10)) + hl7_v2StoreName := fmt.Sprintf("tf-test-hl7_v2-store-%s", acctest.RandString(t, 10)) + pubsubTopic := fmt.Sprintf("tf-test-topic-%s", acctest.RandString(t, 10)) + resourceName := "google_healthcare_hl7_v2_store.default" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckHealthcareHl7V2StoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleHealthcareHl7V2Store_basic(hl7_v2StoreName, datasetName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleHealthcareHl7V2Store_update(hl7_v2StoreName, datasetName, pubsubTopic), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleHealthcareHl7V2StoreUpdate(t, pubsubTopic), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleHealthcareHl7V2Store_basic(hl7_v2StoreName, datasetName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccHealthcareHl7V2Store_updateSchema(t *testing.T) { + t.Parallel() + + datasetName := fmt.Sprintf("tf-test-dataset-%s", acctest.RandString(t, 10)) + hl7_v2StoreName := fmt.Sprintf("tf-test-hl7_v2-store-%s", acctest.RandString(t, 10)) + resourceName := "google_healthcare_hl7_v2_store.default" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckHealthcareHl7V2StoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleHealthcareHl7V2Store_basicSchema(hl7_v2StoreName, datasetName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testGoogleHealthcareHl7V2Store_updateSchema(hl7_v2StoreName, datasetName), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testGoogleHealthcareHl7V2Store_basic(hl7_v2StoreName, datasetName string) string { + return fmt.Sprintf(` +resource "google_healthcare_hl7_v2_store" "default" { + name = "%s" + dataset = google_healthcare_dataset.dataset.id + reject_duplicate_message = true +} + +resource "google_healthcare_dataset" "dataset" { + name = "%s" + location = "us-central1" +} +`, hl7_v2StoreName, datasetName) +} + +func testGoogleHealthcareHl7V2Store_update(hl7_v2StoreName, datasetName, pubsubTopic string) string { + return fmt.Sprintf(` +resource "google_healthcare_hl7_v2_store" "default" { + name = "%s" + dataset = google_healthcare_dataset.dataset.id + + parser_config { + allow_null_header = true + segment_terminator = "Jw==" + } + + notification_configs { + pubsub_topic = google_pubsub_topic.topic.id + } + + labels = { + label1 = "labelvalue1" + } +} + +resource "google_healthcare_dataset" "dataset" { + name = "%s" + location = "us-central1" +} + +resource "google_pubsub_topic" "topic" { + name = "%s" +} +`, hl7_v2StoreName, datasetName, pubsubTopic) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testGoogleHealthcareHl7V2Store_basicSchema(hl7_v2StoreName, datasetName string) string { + return fmt.Sprintf(` +resource "google_healthcare_hl7_v2_store" "default" { + provider = google-beta + name = "%s" + dataset = google_healthcare_dataset.dataset.id + + parser_config { + schema = < 0 { + topicName := path.Base(notifications[0].PubsubTopic) + if topicName != pubsubTopic { + return fmt.Errorf("hl7_v2_store 'NotificationConfig' not updated ('%s' != '%s'): %s", topicName, pubsubTopic, gcpResourceUri) + } + } + } + + if !foundResource { + return fmt.Errorf("google_healthcare_hl7_v2_store resource was missing") + } + return nil + } +} diff --git a/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go b/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go new file mode 100644 index 000000000000..2cc5c3a9c80b --- /dev/null +++ b/mmv1/third_party/terraform/services/iam2/go/resource_iam_deny_policy_test.go @@ -0,0 +1,220 @@ +package iam2_test + +import ( + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccIAM2DenyPolicy_iamDenyPolicyUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAM2DenyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAM2DenyPolicy_iamDenyPolicyUpdate(context), + }, + { + ResourceName: "google_iam_deny_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "parent"}, + }, + { + Config: testAccIAM2DenyPolicy_iamDenyPolicyUpdate2(context), + }, + { + ResourceName: "google_iam_deny_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "parent"}, + }, + { + Config: testAccIAM2DenyPolicy_iamDenyPolicyUpdate(context), + }, + { + ResourceName: "google_iam_deny_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "parent"}, + }, + }, + }) +} + +func TestAccIAM2DenyPolicy_iamDenyPolicyFolderParent(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAM2DenyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAM2DenyPolicy_iamDenyPolicyFolder(context), + }, + { + ResourceName: "google_iam_deny_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "parent"}, + }, + { + Config: testAccIAM2DenyPolicy_iamDenyPolicyFolderUpdate(context), + }, + { + ResourceName: "google_iam_deny_policy.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "parent"}, + }, + }, + }) +} + +func testAccIAM2DenyPolicy_iamDenyPolicyUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_iam_deny_policy" "example" { + parent = urlencode("cloudresourcemanager.googleapis.com/projects/${google_project.project.project_id}") + name = "tf-test-my-deny-policy%{random_suffix}" + display_name = "A deny rule" + rules { + description = "First rule" + deny_rule { + denied_principals = ["principal://iam.googleapis.com/projects/-/serviceAccounts/${google_service_account.test-account.email}"] + denial_condition { + title = "Some expr" + expression = "!resource.matchTag('12345678/env', 'test')" + } + denied_permissions = ["cloudresourcemanager.googleapis.com/projects.update"] + } + } + rules { + description = "Second rule" + deny_rule { + denied_principals = ["principalSet://goog/public:all"] + denial_condition { + title = "Some expr" + expression = "!resource.matchTag('12345678/env', 'test')" + } + denied_permissions = ["cloudresourcemanager.googleapis.com/projects.update"] + exception_principals = ["principal://iam.googleapis.com/projects/-/serviceAccounts/${google_service_account.test-account.email}"] + } + } +} + +resource "google_service_account" "test-account" { + account_id = "tf-test-deny-account%{random_suffix}" + display_name = "Test Service Account" + project = google_project.project.project_id +} +`, context) +} + +func testAccIAM2DenyPolicy_iamDenyPolicyUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "project" { + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "%{org_id}" + billing_account = "%{billing_account}" +} + +resource "google_iam_deny_policy" "example" { + parent = urlencode("cloudresourcemanager.googleapis.com/projects/${google_project.project.project_id}") + name = "tf-test-my-deny-policy%{random_suffix}" + display_name = "A deny rule" + rules { + description = "Second rule" + deny_rule { + denied_principals = ["principalSet://goog/public:all"] + denial_condition { + title = "Some other expr" + expression = "!resource.matchTag('87654321/env', 'test')" + location = "/some/file" + description = "A denial condition" + } + denied_permissions = ["cloudresourcemanager.googleapis.com/projects.update"] + } + } +} + +resource "google_service_account" "test-account" { + account_id = "tf-test-deny-account%{random_suffix}" + display_name = "Test Service Account" + project = google_project.project.project_id +} +`, context) +} + +func testAccIAM2DenyPolicy_iamDenyPolicyFolder(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_deny_policy" "example" { + parent = urlencode("cloudresourcemanager.googleapis.com/${google_folder.folder.id}") + name = "tf-test-my-deny-policy%{random_suffix}" + display_name = "A deny rule" + rules { + description = "Second rule" + deny_rule { + denied_principals = ["principalSet://goog/public:all"] + denial_condition { + title = "Some expr" + expression = "!resource.matchTag('12345678/env', 'test')" + } + denied_permissions = ["cloudresourcemanager.googleapis.com/projects.delete"] + } + } +} + +resource "google_folder" "folder" { + display_name = "tf-test-%{random_suffix}" + parent = "organizations/%{org_id}" +} +`, context) +} + +func testAccIAM2DenyPolicy_iamDenyPolicyFolderUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_deny_policy" "example" { + parent = urlencode("cloudresourcemanager.googleapis.com/${google_folder.folder.id}") + name = "tf-test-my-deny-policy%{random_suffix}" + display_name = "A deny rule" + rules { + description = "Second rule" + deny_rule { + denied_principals = ["principalSet://goog/public:all"] + denied_permissions = ["cloudresourcemanager.googleapis.com/projects.delete"] + } + } +} + +resource "google_folder" "folder" { + display_name = "tf-test-%{random_suffix}" + parent = "organizations/%{org_id}" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool.go.tmpl new file mode 100644 index 000000000000..377d473692ce --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool.go.tmpl @@ -0,0 +1,44 @@ +package iambeta + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceIAMBetaWorkloadIdentityPool() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceIAMBetaWorkloadIdentityPool().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "workload_identity_pool_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceIAMBetaWorkloadIdentityPoolRead, + Schema: dsSchema, + } +} + +func dataSourceIAMBetaWorkloadIdentityPoolRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/global/workloadIdentityPools/{{"{{"}}workload_identity_pool_id{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceIAMBetaWorkloadIdentityPoolRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider.go.tmpl new file mode 100644 index 000000000000..774f9a40fa89 --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider.go.tmpl @@ -0,0 +1,45 @@ +package iambeta + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceIAMBetaWorkloadIdentityPoolProvider() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceIAMBetaWorkloadIdentityPoolProvider().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "workload_identity_pool_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "workload_identity_pool_provider_id") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceIAMBetaWorkloadIdentityPoolProviderRead, + Schema: dsSchema, + } +} + +func dataSourceIAMBetaWorkloadIdentityPoolProviderRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/locations/global/workloadIdentityPools/{{"{{"}}workload_identity_pool_id{{"}}"}}/providers/{{"{{"}}workload_identity_pool_provider_id{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceIAMBetaWorkloadIdentityPoolProviderRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider_test.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider_test.go.tmpl new file mode 100644 index 000000000000..2b9b50978b1f --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_provider_test.go.tmpl @@ -0,0 +1,61 @@ +package iambeta_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceIAMBetaWorkloadIdentityPoolProvider_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceIAMBetaWorkloadIdentityPoolProviderBasic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_iam_workload_identity_pool_provider.foo", "google_iam_workload_identity_pool_provider.bar"), + ), + }, + }, + }) +} + +func testAccDataSourceIAMBetaWorkloadIdentityPoolProviderBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "pool" { + workload_identity_pool_id = "pool-%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "bar" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = "bar-provider-%{random_suffix}" + display_name = "Name of provider" + description = "OIDC identity pool provider for automated test" + disabled = true + attribute_condition = "\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups" + attribute_mapping = { + "google.subject" = "assertion.sub" + } + oidc { + allowed_audiences = ["https://example.com/gcp-oidc-federation"] + issuer_uri = "https://sts.windows.net/azure-tenant-id" + } + } + +data "google_iam_workload_identity_pool_provider" "foo" { + workload_identity_pool_id = google_iam_workload_identity_pool.pool.workload_identity_pool_id + workload_identity_pool_provider_id = google_iam_workload_identity_pool_provider.bar.workload_identity_pool_provider_id +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_test.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_test.go.tmpl new file mode 100644 index 000000000000..b34f354a625a --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/data_source_iam_workload_identity_pool_test.go.tmpl @@ -0,0 +1,47 @@ +package iambeta_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccDataSourceIAMBetaWorkloadIdentityPool_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceIAMBetaWorkloadIdentityPoolBasic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_iam_workload_identity_pool.foo", "google_iam_workload_identity_pool.bar"), + ), + }, + }, + }) +} + +func testAccDataSourceIAMBetaWorkloadIdentityPoolBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "bar" { + workload_identity_pool_id = "bar-pool-%{random_suffix}" + display_name = "Name of pool" + description = "Identity pool for automated test" + disabled = true +} + +data "google_iam_workload_identity_pool" "foo" { + workload_identity_pool_id = google_iam_workload_identity_pool.bar.workload_identity_pool_id +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_id_test.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_id_test.go.tmpl new file mode 100644 index 000000000000..900961532d80 --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_id_test.go.tmpl @@ -0,0 +1,36 @@ +package iambeta_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/services/iambeta" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func TestValidateIAMBetaWorkloadIdentityPoolId(t *testing.T) { + x := []verify.StringValidationTestCase{ + // No errors + {TestName: "basic", Value: "foobar"}, + {TestName: "with numbers", Value: "foobar123"}, + {TestName: "short", Value: "foos"}, + {TestName: "long", Value: "12345678901234567890123456789012"}, + {TestName: "has a hyphen", Value: "foo-bar"}, + + // With errors + {TestName: "empty", Value: "", ExpectError: true}, + {TestName: "starts with a gcp-", Value: "gcp-foobar", ExpectError: true}, + {TestName: "with uppercase", Value: "fooBar", ExpectError: true}, + {TestName: "has an slash", Value: "foo/bar", ExpectError: true}, + {TestName: "has an backslash", Value: "foo\bar", ExpectError: true}, + {TestName: "too short", Value: "foo", ExpectError: true}, + {TestName: "too long", Value: strings.Repeat("f", 33), ExpectError: true}, + } + + es := verify.TestStringValidationCases(x, iambeta.ValidateWorkloadIdentityPoolId) + if len(es) > 0 { + t.Errorf("Failed to validate WorkloadIdentityPool names: %v", es) + } +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_id_test.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_id_test.go.tmpl new file mode 100644 index 000000000000..fc3c42422a6a --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_id_test.go.tmpl @@ -0,0 +1,36 @@ +package iambeta_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/services/iambeta" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func TestValidateIAMBetaWorkloadIdentityPoolProviderId(t *testing.T) { + x := []verify.StringValidationTestCase{ + // No errors + {TestName: "basic", Value: "foobar"}, + {TestName: "with numbers", Value: "foobar123"}, + {TestName: "short", Value: "foos"}, + {TestName: "long", Value: "12345678901234567890123456789012"}, + {TestName: "has a hyphen", Value: "foo-bar"}, + + // With errors + {TestName: "empty", Value: "", ExpectError: true}, + {TestName: "starts with a gcp-", Value: "gcp-foobar", ExpectError: true}, + {TestName: "with uppercase", Value: "fooBar", ExpectError: true}, + {TestName: "has an slash", Value: "foo/bar", ExpectError: true}, + {TestName: "has an backslash", Value: "foo\bar", ExpectError: true}, + {TestName: "too short", Value: "foo", ExpectError: true}, + {TestName: "too long", Value: strings.Repeat("f", 33), ExpectError: true}, + } + + es := verify.TestStringValidationCases(x, iambeta.ValidateWorkloadIdentityPoolProviderId) + if len(es) > 0 { + t.Errorf("Failed to validate WorkloadIdentityPoolProvider names: %v", es) + } +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl new file mode 100644 index 000000000000..062850fb5b6e --- /dev/null +++ b/mmv1/third_party/terraform/services/iambeta/go/resource_iam_workload_identity_pool_provider_test.go.tmpl @@ -0,0 +1,242 @@ +package iambeta_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccIAMBetaWorkloadIdentityPoolProvider_aws(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_aws_full(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.my_provider", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_aws_enabled(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.my_provider", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_aws_basic(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.my_provider", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccIAMBetaWorkloadIdentityPoolProvider_oidc(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMBetaWorkloadIdentityPoolProviderDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_oidc_full(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.my_provider", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_oidc_update(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.my_provider", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIAMBetaWorkloadIdentityPoolProvider_oidc_basic(context), + }, + { + ResourceName: "google_iam_workload_identity_pool_provider.my_provider", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_aws_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "my_pool" { + workload_identity_pool_id = "my-pool-%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "my_provider" { + workload_identity_pool_id = google_iam_workload_identity_pool.my_pool.workload_identity_pool_id + workload_identity_pool_provider_id = "my-provider-%{random_suffix}" + display_name = "Name of provider" + description = "AWS identity pool provider for automated test" + disabled = true + attribute_condition = "attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"" + attribute_mapping = { + "google.subject" = "assertion.arn" + "attribute.aws_account" = "assertion.account" + "attribute.environment" = "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"" + } + aws { + account_id = "999999999999" + } +} +`, context) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_aws_enabled(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "my_pool" { + workload_identity_pool_id = "my-pool-%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "my_provider" { + workload_identity_pool_id = google_iam_workload_identity_pool.my_pool.workload_identity_pool_id + workload_identity_pool_provider_id = "my-provider-%{random_suffix}" + display_name = "Name of provider" + description = "AWS identity pool provider for automated test" + disabled = false + attribute_condition = "attribute.aws_role==\"arn:aws:sts::999999999999:assumed-role/stack-eu-central-1-lambdaRole\"" + attribute_mapping = { + "google.subject" = "assertion.arn" + "attribute.aws_account" = "assertion.account" + "attribute.environment" = "assertion.arn.contains(\":instance-profile/Production\") ? \"prod\" : \"test\"" + } + aws { + account_id = "999999999999" + } +} +`, context) +} + +func testAccIAMBetaWorkloadIdentityPoolProvider_oidc_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workload_identity_pool" "my_pool" { + workload_identity_pool_id = "my-pool-%{random_suffix}" +} + +resource "google_iam_workload_identity_pool_provider" "my_provider" { + workload_identity_pool_id = google_iam_workload_identity_pool.my_pool.workload_identity_pool_id + workload_identity_pool_provider_id = "my-provider-%{random_suffix}" + display_name = "Name of provider" + description = "OIDC identity pool provider for automated test" + disabled = true + attribute_condition = "\"e968c2ef-047c-498d-8d79-16ca1b61e77e\" in assertion.groups" + attribute_mapping = { + "google.subject" = "\"azure::\" + assertion.tid + \"::\" + assertion.sub" + "attribute.tid" = "assertion.tid" + "attribute.managed_identity_name" = < MIIDpDCCAoygAwIBAgIGAX7/5qPhMA0GCSqGSIb3DQEBCwUAMIGSMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEUMBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi00NTg0MjExHDAaBgkqhkiG9w0BCQEWDWluZm9Ab2t0YS5jb20wHhcNMjIwMjE2MDAxOTEyWhcNMzIwMjE2MDAyMDEyWjCBkjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtNDU4NDIxMRwwGgYJKoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxrBl7GKz52cRpxF9xCsirnRuMxnhFBaUrsHqAQrLqWmdlpNYZTVg+T9iQ+aq/iE68L+BRZcZniKIvW58wqqS0ltXVvIkXuDSvnvnkkI5yMIVErR20K8jSOKQm1FmK+fgAJ4koshFiu9oLiqu0Ejc0DuL3/XRsb4RuxjktKTb1khgBBtb+7idEk0sFR0RPefAweXImJkDHDm7SxjDwGJUubbqpdTxasPr0W+AHI1VUzsUsTiHAoyb0XDkYqHfDzhj/ZdIEl4zHQ3bEZvlD984ztAnmX2SuFLLKfXeAAGHei8MMixJvwxYkkPeYZ/5h8WgBZPP4heS2CPjwYExt29L8QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQARjJFz++a9Z5IQGFzsZMrX2EDR5ML4xxUiQkbhld1S1PljOLcYFARDmUC2YYHOueU4ee8Jid9nPGEUebV/4Jok+b+oQh+dWMgiWjSLI7h5q4OYZ3VJtdlVwgMFt2iz+/4yBKMUZ50g3Qgg36vE34us+eKitg759JgCNsibxn0qtJgSPm0sgP2L6yTaLnoEUbXBRxCwynTSkp9ZijZqEzbhN0e2dWv7Rx/nfpohpDP6vEiFImKFHpDSv3M/5de1ytQzPFrZBYt9WlzlYwE1aD9FHCxdd+rWgYMVVoRaRmndpV/Rq3QUuDuFJtaoX11bC7ExkOpg9KstZzA63i3VcfYv" + } + display_name = "Display name" + description = "A sample SAML workforce pool provider." + disabled = false + attribute_condition = "true" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePoolProvider_saml_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} + +resource "google_iam_workforce_pool_provider" "my_provider" { + workforce_pool_id = google_iam_workforce_pool.my_pool.workforce_pool_id + location = google_iam_workforce_pool.my_pool.location + provider_id = "my-provider-%{random_suffix}" + attribute_mapping = { + "google.subject": "false" + } + saml { + idp_metadata_xml = " MIIDpDCCAoygAwIBAgIGAX7/5qPhMA0GCSqGSIb3DQEBCwUAMIGSMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEUMBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi00NTg0MjExHDAaBgkqhkiG9w0BCQEWDWluZm9Ab2t0YS5jb20wHhcNMjIwMjE2MDAxOTEyWhcNMzIwMjE2MDAyMDEyWjCBkjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtNDU4NDIxMRwwGgYJKoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxrBl7GKz52cRpxF9xCsirnRuMxnhFBaUrsHqAQrLqWmdlpNYZTVg+T9iQ+aq/iE68L+BRZcZniKIvW58wqqS0ltXVvIkXuDSvnvnkkI5yMIVErR20K8jSOKQm1FmK+fgAJ4koshFiu9oLiqu0Ejc0DuL3/XRsb4RuxjktKTb1khgBBtb+7idEk0sFR0RPefAweXImJkDHDm7SxjDwGJUubbqpdTxasPr0W+AHI1VUzsUsTiHAoyb0XDkYqHfDzhj/ZdIEl4zHQ3bEZvlD984ztAnmX2SuFLLKfXeAAGHei8MMixJvwxYkkPeYZ/5h8WgBZPP4heS2CPjwYExt29L8QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQARjJFz++a9Z5IQGFzsZMrX2EDR5ML4xxUiQkbhld1S1PljOLcYFARDmUC2YYHOueU4ee8Jid9nPGEUebV/4Jok+b+oQh+dWMgiWjSLI7h5q4OYZ3VJtdlVwgMFt2iz+/4yBKMUZ50g3Qgg36vE34us+eKitg759JgCNsibxn0qtJgSPm0sgP2L6yTaLnoEUbXBRxCwynTSkp9ZijZqEzbhN0e2dWv7Rx/nfpohpDP6vEiFImKFHpDSv3M/5de1ytQzPFrZBYt9WlzlYwE1aD9FHCxdd+rWgYMVVoRaRmndpV/Rq3QUuDuFJtaoX11bC7ExkOpg9KstZzA63i3VcfYv" + } + display_name = "New Display name" + description = "A sample SAML workforce pool provider with updated description." + disabled = true + attribute_condition = "false" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePoolProvider_saml_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} + +resource "google_iam_workforce_pool_provider" "my_provider" { + workforce_pool_id = google_iam_workforce_pool.my_pool.workforce_pool_id + location = google_iam_workforce_pool.my_pool.location + provider_id = "my-provider-%{random_suffix}" + attribute_mapping = { + "google.subject" = "assertion.sub" + } + saml { + idp_metadata_xml = " MIIDpDCCAoygAwIBAgIGAX7/5qPhMA0GCSqGSIb3DQEBCwUAMIGSMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNU2FuIEZyYW5jaXNjbzENMAsGA1UECgwET2t0YTEUMBIGA1UECwwLU1NPUHJvdmlkZXIxEzARBgNVBAMMCmRldi00NTg0MjExHDAaBgkqhkiG9w0BCQEWDWluZm9Ab2t0YS5jb20wHhcNMjIwMjE2MDAxOTEyWhcNMzIwMjE2MDAyMDEyWjCBkjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFjAUBgNVBAcMDVNhbiBGcmFuY2lzY28xDTALBgNVBAoMBE9rdGExFDASBgNVBAsMC1NTT1Byb3ZpZGVyMRMwEQYDVQQDDApkZXYtNDU4NDIxMRwwGgYJKoZIhvcNAQkBFg1pbmZvQG9rdGEuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxrBl7GKz52cRpxF9xCsirnRuMxnhFBaUrsHqAQrLqWmdlpNYZTVg+T9iQ+aq/iE68L+BRZcZniKIvW58wqqS0ltXVvIkXuDSvnvnkkI5yMIVErR20K8jSOKQm1FmK+fgAJ4koshFiu9oLiqu0Ejc0DuL3/XRsb4RuxjktKTb1khgBBtb+7idEk0sFR0RPefAweXImJkDHDm7SxjDwGJUubbqpdTxasPr0W+AHI1VUzsUsTiHAoyb0XDkYqHfDzhj/ZdIEl4zHQ3bEZvlD984ztAnmX2SuFLLKfXeAAGHei8MMixJvwxYkkPeYZ/5h8WgBZPP4heS2CPjwYExt29L8QIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQARjJFz++a9Z5IQGFzsZMrX2EDR5ML4xxUiQkbhld1S1PljOLcYFARDmUC2YYHOueU4ee8Jid9nPGEUebV/4Jok+b+oQh+dWMgiWjSLI7h5q4OYZ3VJtdlVwgMFt2iz+/4yBKMUZ50g3Qgg36vE34us+eKitg759JgCNsibxn0qtJgSPm0sgP2L6yTaLnoEUbXBRxCwynTSkp9ZijZqEzbhN0e2dWv7Rx/nfpohpDP6vEiFImKFHpDSv3M/5de1ytQzPFrZBYt9WlzlYwE1aD9FHCxdd+rWgYMVVoRaRmndpV/Rq3QUuDuFJtaoX11bC7ExkOpg9KstZzA63i3VcfYv" + } +} +`, context) +} + + +func testAccIAMWorkforcePoolWorkforcePoolProvider_extraAttributesOauth2Client_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} + +resource "google_iam_workforce_pool_provider" "my_provider" { + workforce_pool_id = google_iam_workforce_pool.my_pool.workforce_pool_id + location = google_iam_workforce_pool.my_pool.location + provider_id = "my-provider-%{random_suffix}" + attribute_mapping = { + "google.subject" = "assertion.sub" + } + oidc { + issuer_uri = "https://sts.windows.net/826602fe-2101-470c-9d71-ee1343668989/" + client_id = "https://analysis.windows.net/powerbi/connector/GoogleBigQuery" + client_secret { + value { + plain_text = "client-secret" + } + } + web_sso_config { + response_type = "CODE" + assertion_claims_behavior = "MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS" + additional_scopes = ["groups", "roles"] + } + } + extra_attributes_oauth2_client { + issuer_uri = "https://login.microsoftonline.com/826602fe-2101-470c-9d71-ee1343668989/v2.0" + client_id = "client-id" + client_secret { + value { + plain_text = "client-secret" + } + } + attributes_type = "AZURE_AD_GROUPS_MAIL" + query_parameters { + filter = "mail:gcp" + } + } + display_name = "Display name" + description = "A sample OIDC workforce pool provider." + disabled = false + attribute_condition = "true" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePoolProvider_extraAttributesOauth2Client_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} + +resource "google_iam_workforce_pool_provider" "my_provider" { + workforce_pool_id = google_iam_workforce_pool.my_pool.workforce_pool_id + location = google_iam_workforce_pool.my_pool.location + provider_id = "my-provider-%{random_suffix}" + attribute_mapping = { + "google.subject" = "false" + } + oidc { + issuer_uri = "https://sts.windows.net/826602fe-2101-470c-9d71-ee1343668989/" + client_id = "https://analysis.windows.net/powerbi/connector/GoogleBigQuery" + client_secret { + value { + plain_text = "client-secret" + } + } + web_sso_config { + response_type = "CODE" + assertion_claims_behavior = "MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS" + additional_scopes = ["groups", "roles"] + } + } + extra_attributes_oauth2_client { + issuer_uri = "https://login.microsoftonline.com/826602fe-2101-470c-9d71-ee1343668989/v2.0/" + client_id = "new-client-id" + client_secret { + value { + plain_text = "new-client-secret" + } + } + attributes_type = "AZURE_AD_GROUPS_MAIL" + query_parameters { + filter = "displayName:gcp" + } + } + display_name = "New Display name" + description = "A sample OIDC workforce pool provider with updated description." + disabled = true + attribute_condition = "false" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePoolProvider_extraAttributesOauth2Client_update_clearConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} + +resource "google_iam_workforce_pool_provider" "my_provider" { + workforce_pool_id = google_iam_workforce_pool.my_pool.workforce_pool_id + location = google_iam_workforce_pool.my_pool.location + provider_id = "my-provider-%{random_suffix}" + attribute_mapping = { + "google.subject" = "false" + } + oidc { + issuer_uri = "https://sts.windows.net/826602fe-2101-470c-9d71-ee1343668989/" + client_id = "https://analysis.windows.net/powerbi/connector/GoogleBigQuery" + client_secret { + value { + plain_text = "client-secret" + } + } + web_sso_config { + response_type = "CODE" + assertion_claims_behavior = "MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS" + additional_scopes = ["groups", "roles"] + } + } + display_name = "New Display name" + description = "A sample OIDC workforce pool provider with updated description." + disabled = true + attribute_condition = "false" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePoolProvider_extraAttributesOauth2Client_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} + +resource "google_iam_workforce_pool_provider" "my_provider" { + workforce_pool_id = google_iam_workforce_pool.my_pool.workforce_pool_id + location = google_iam_workforce_pool.my_pool.location + provider_id = "my-provider-%{random_suffix}" + attribute_mapping = { + "google.subject" = "false" + } + oidc { + issuer_uri = "https://sts.windows.net/826602fe-2101-470c-9d71-ee1343668989/" + client_id = "https://analysis.windows.net/powerbi/connector/GoogleBigQuery" + client_secret { + value { + plain_text = "client-secret" + } + } + web_sso_config { + response_type = "CODE" + assertion_claims_behavior = "MERGE_USER_INFO_OVER_ID_TOKEN_CLAIMS" + additional_scopes = ["groups", "roles"] + } + } + extra_attributes_oauth2_client { + issuer_uri = "https://login.microsoftonline.com/826602fe-2101-470c-9d71-ee1343668989/v2.0" + client_id = "client-id" + client_secret { + value { + plain_text = "client-secret" + } + } + attributes_type = "AZURE_AD_GROUPS_MAIL" + } + display_name = "New Display name" + description = "A sample OIDC workforce pool provider with updated description." + disabled = true + attribute_condition = "false" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePoolProvider_destroy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_test.go b/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_test.go new file mode 100644 index 000000000000..62cf132b5941 --- /dev/null +++ b/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_test.go @@ -0,0 +1,138 @@ +package iamworkforcepool_test + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccIAMWorkforcePoolWorkforcePool_full(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMWorkforcePoolWorkforcePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAMWorkforcePoolWorkforcePool_full(context), + }, + { + ResourceName: "google_iam_workforce_pool.my_pool", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIAMWorkforcePoolWorkforcePool_full_update(context), + }, + { + ResourceName: "google_iam_workforce_pool.my_pool", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccIAMWorkforcePoolWorkforcePool_minimal(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckIAMWorkforcePoolWorkforcePoolDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccIAMWorkforcePoolWorkforcePool_minimal(context), + }, + { + ResourceName: "google_iam_workforce_pool.my_pool", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccIAMWorkforcePoolWorkforcePool_minimal_update(context), + }, + { + ResourceName: "google_iam_workforce_pool.my_pool", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccIAMWorkforcePoolWorkforcePool_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" + display_name = "Display name" + description = "A sample workforce pool." + disabled = false + session_duration = "7200s" + access_restrictions { + allowed_services { + domain = "backstory.chronicle.security" + } + disable_programmatic_signin = false + } +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePool_minimal(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePool_full_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" + display_name = "New display name" + description = "A sample workforce pool with updated description." + disabled = true + session_duration = "3600s" + access_restrictions { + allowed_services { + domain = "backstory.chronicle.security" + } + disable_programmatic_signin = false + } +} +`, context) +} + +func testAccIAMWorkforcePoolWorkforcePool_minimal_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_iam_workforce_pool" "my_pool" { + workforce_pool_id = "my-pool-%{random_suffix}" + parent = "organizations/%{org_id}" + location = "global" + display_name = "New display name" + description = "A sample workforce pool with updated description." + disabled = true + session_duration = "3600s" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_id_test.go b/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_id_test.go new file mode 100644 index 000000000000..bb24645a4539 --- /dev/null +++ b/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_id_test.go @@ -0,0 +1,35 @@ +package iamworkforcepool_test + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func TestValidateIAMWorkforcePoolWorkforcePoolId(t *testing.T) { + x := []verify.StringValidationTestCase{ + // No errors + {TestName: "with numbers", Value: "foobar123"}, + {TestName: "short", Value: "foobar"}, + {TestName: "long", Value: strings.Repeat("f", 63)}, + {TestName: "has a hyphen", Value: "foo-bar"}, + + // With errors + {TestName: "empty", Value: "", ExpectError: true}, + {TestName: "starts with a gcp-", Value: "gcp-foobar", ExpectError: true}, + {TestName: "with uppercase", Value: "fooBar", ExpectError: true}, + {TestName: "has an slash", Value: "foo/bar", ExpectError: true}, + {TestName: "has an backslash", Value: "foo\bar", ExpectError: true}, + {TestName: "too short", Value: "foooo", ExpectError: true}, + {TestName: "too long", Value: strings.Repeat("f", 64), ExpectError: true}, + {TestName: "doesn't start with a lowercase letter", Value: "123foo", ExpectError: true}, + {TestName: "ends with a hyphen", Value: "foobar-", ExpectError: true}, + } + + es := verify.TestStringValidationCases(x, iamworkforcepool.ValidateWorkforcePoolId) + if len(es) > 0 { + t.Errorf("Failed to validate WorkforcePool names: %v", es) + } +} diff --git a/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_provider_id_test.go b/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_provider_id_test.go new file mode 100644 index 000000000000..c8425098357e --- /dev/null +++ b/mmv1/third_party/terraform/services/iamworkforcepool/go/resource_iam_workforce_pool_workforce_pool_provider_id_test.go @@ -0,0 +1,33 @@ +package iamworkforcepool_test + +import ( + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/services/iamworkforcepool" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func TestValidateIAMWorkforcePoolWorkforcePoolProviderId(t *testing.T) { + x := []verify.StringValidationTestCase{ + // No errors + {TestName: "with numbers", Value: "foobar123"}, + {TestName: "short", Value: "foo-"}, + {TestName: "long", Value: strings.Repeat("f", 32)}, + {TestName: "has a hyphen", Value: "foo-bar"}, + + // With errors + {TestName: "empty", Value: "", ExpectError: true}, + {TestName: "starts with a gcp-", Value: "gcp-foobar", ExpectError: true}, + {TestName: "with uppercase", Value: "fooBar", ExpectError: true}, + {TestName: "has an slash", Value: "foo/bar", ExpectError: true}, + {TestName: "has an backslash", Value: "foo\bar", ExpectError: true}, + {TestName: "too short", Value: "foo", ExpectError: true}, + {TestName: "too long", Value: strings.Repeat("f", 33), ExpectError: true}, + } + + es := verify.TestStringValidationCases(x, iamworkforcepool.ValidateWorkforcePoolProviderId) + if len(es) > 0 { + t.Errorf("Failed to validate WorkforcePoolProvider names: %v", es) + } +} diff --git a/mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric.go.tmpl b/mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric.go.tmpl new file mode 100644 index 000000000000..e43bd4cbcd6f --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric.go.tmpl @@ -0,0 +1,156 @@ +package kms + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "context" + "encoding/base64" + "fmt" + "hash/crc32" + "regexp" + "strconv" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudkms/v1" +) + +var ( + cryptoKeyVersionRegexp = regexp.MustCompile(`^(//[^/]*/[^/]*/)?(projects/[^/]+/locations/[^/]+/keyRings/[^/]+/cryptoKeys/[^/]+/cryptoKeyVersions/[^/]+)$`) +) + +func DataSourceGoogleKmsSecretAsymmetric() *schema.Resource { + return &schema.Resource{ + ReadContext: dataSourceGoogleKmsSecretAsymmetricReadContext, + Schema: map[string]*schema.Schema{ + "crypto_key_version": { + Type: schema.TypeString, + Description: "The fully qualified KMS crypto key version name", + ValidateFunc: verify.ValidateRegexp(cryptoKeyVersionRegexp.String()), + Required: true, + }, + "ciphertext": { + Type: schema.TypeString, + Description: "The public key encrypted ciphertext in base64 encoding", + ValidateFunc: validateBase64WithWhitespaces, + Required: true, + }, + "crc32": { + Type: schema.TypeString, + Description: "The crc32 checksum of the ciphertext, hexadecimal encoding. If not specified, it will be computed", + ValidateFunc: validateHexadecimalUint32, + Optional: true, + }, + "plaintext": { + Type: schema.TypeString, + Computed: true, + Sensitive: true, + }, + }, + } +} + +func dataSourceGoogleKmsSecretAsymmetricReadContext(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + + err := dataSourceGoogleKmsSecretAsymmetricRead(ctx, d, meta) + if err != nil { + diags = diag.FromErr(err) + } + return diags +} + +func dataSourceGoogleKmsSecretAsymmetricRead(ctx context.Context, d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // `google_kms_crypto_key_version` returns an id with the prefix + // //cloudkms.googleapis.com/v1, which is an invalid name. To allow for the most elegant + // configuration, we will allow it as an input. + keyVersion := cryptoKeyVersionRegexp.FindStringSubmatch(d.Get("crypto_key_version").(string)) + cryptoKeyVersion := keyVersion[len(keyVersion)-1] + + base64CipherText := removeWhiteSpaceFromString(d.Get("ciphertext").(string)) + ciphertext, err := base64.StdEncoding.DecodeString(base64CipherText) + if err != nil { + return err + } + + crc32c := func(data []byte) uint32 { + t := crc32.MakeTable(crc32.Castagnoli) + return crc32.Checksum(data, t) + } + + ciphertextCRC32C := crc32c(ciphertext) + if s, ok := d.Get("crc32").(string); ok && s != "" { + u, err := strconv.ParseUint(s, 16, 32) + if err != nil { + return fmt.Errorf("failed to convert crc32 into uint32, %s", err) + } + ciphertextCRC32C = uint32(u) + } else { + if err := d.Set("crc32", fmt.Sprintf("%x", ciphertextCRC32C)); err != nil { + return fmt.Errorf("failed to set crc32, %s", err) + } + } + + req := cloudkms.AsymmetricDecryptRequest{ + Ciphertext: base64CipherText, + CiphertextCrc32c: int64(ciphertextCRC32C)} + + client := config.NewKmsClientWithCtx(ctx, userAgent) + if client == nil { + return fmt.Errorf("failed to get a KMS client") + } + + result, err := client.Projects.Locations.KeyRings.CryptoKeys.CryptoKeyVersions.AsymmetricDecrypt(cryptoKeyVersion, &req).Do() + if err != nil { + return fmt.Errorf("failed to decrypt ciphertext: %v", err) + } + plaintext, err := base64.StdEncoding.DecodeString(result.Plaintext) + if err != nil { + return fmt.Errorf("failed to base64 decode plaintext: %v", err) + } + + plaintextCrc32c := int64(crc32c(plaintext)) + if !result.VerifiedCiphertextCrc32c || plaintextCrc32c != result.PlaintextCrc32c { + return fmt.Errorf("asymmetricDecrypt response corrupted in-transit, got %x, expected %x", + plaintextCrc32c, result.PlaintextCrc32c) + } + + if err := d.Set("plaintext", string(plaintext)); err != nil { + return fmt.Errorf("error setting plaintext: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%x:%s", cryptoKeyVersion, ciphertextCRC32C, base64CipherText)) + return nil +} + +func removeWhiteSpaceFromString(s string) string { + whitespaceRegexp := regexp.MustCompile(`(?m)[\s]+`) + return whitespaceRegexp.ReplaceAllString(s, "") +} + +func validateBase64WithWhitespaces(i interface{}, val string) ([]string, []error) { + _, err := base64.StdEncoding.DecodeString(removeWhiteSpaceFromString(i.(string))) + if err != nil { + return nil, []error{fmt.Errorf("could not decode %q as a valid base64 value. Please use the terraform base64 functions such as base64encode() or filebase64() to supply a valid base64 string", val)} + } + return nil, nil +} + +func validateHexadecimalUint32(i interface{}, val string) ([]string, []error) { + _, err := strconv.ParseUint(i.(string), 16, 32) + if err != nil { + return nil, []error{fmt.Errorf("could not decode %q as a unsigned 32 bit hexadecimal integer", val)} + } + return nil, nil +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric_test.go.tmpl b/mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric_test.go.tmpl new file mode 100644 index 000000000000..e27f012190d4 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/data_source_google_kms_secret_asymmetric_test.go.tmpl @@ -0,0 +1,159 @@ +package kms_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "hash/crc32" + "log" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccKmsSecretAsymmetricBasic(t *testing.T) { + // Nested tests confuse VCR + acctest.SkipIfVcr(t) + t.Parallel() + + projectOrg := envvar.GetTestOrgFromEnv(t) + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + + projectID := "tf-test-" + acctest.RandString(t, 10) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + plaintext := fmt.Sprintf("secret-%s", acctest.RandString(t, 10)) + + // The first test creates resources needed to encrypt plaintext and produce ciphertext + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: kmsCryptoKeyAsymmetricDecryptBasic(projectID, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName), + Check: func(s *terraform.State) error { + ciphertext, cryptoKeyVersionID, crc, err := testAccEncryptSecretDataAsymmetricWithPublicKey(t, s, "data.google_kms_crypto_key_version.crypto_key", plaintext) + if err != nil { + return err + } + + // The second test asserts that the data source has the correct plaintext, given the created ciphertext + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: googleKmsSecretAsymmetricDatasource(cryptoKeyVersionID, ciphertext), + Check: resource.TestCheckResourceAttr("data.google_kms_secret_asymmetric.acceptance", "plaintext", plaintext), + }, + { + Config: googleKmsSecretAsymmetricDatasourceWithCrc(cryptoKeyVersionID, ciphertext, crc), + Check: resource.TestCheckResourceAttr("data.google_kms_secret_asymmetric.acceptance_with_crc", "plaintext", plaintext), + }, + }, + }) + + return nil + }, + }, + }, + }) +} + +func testAccEncryptSecretDataAsymmetricWithPublicKey(t *testing.T, s *terraform.State, cryptoKeyResourceName, plaintext string) (string, string, uint32, error) { + rs, ok := s.RootModule().Resources[cryptoKeyResourceName] + if !ok { + return "", "", 0, fmt.Errorf("resource not found: %s", cryptoKeyResourceName) + } + + cryptoKeyVersionID := rs.Primary.Attributes["id"] + + block, _ := pem.Decode([]byte(rs.Primary.Attributes["public_key.0.pem"])) + publicKey, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return "", "", 0, fmt.Errorf("failed to parse public key: %v", err) + } + rsaKey, ok := publicKey.(*rsa.PublicKey) + if !ok { + return "", "", 0, fmt.Errorf("public key is not rsa") + } + + ciphertext, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, rsaKey, []byte(plaintext), nil) + if err != nil { + return "", "", 0, fmt.Errorf("rsa.EncryptOAEP: %v", err) + } + + crc := crc32.Checksum(ciphertext, crc32.MakeTable(crc32.Castagnoli)) + + result := base64.StdEncoding.EncodeToString(ciphertext) + log.Printf("[INFO] Successfully encrypted plaintext and got ciphertext: %s", result) + + return result, cryptoKeyVersionID, crc, nil +} + +func googleKmsSecretAsymmetricDatasource(cryptoKeyTerraformID, ciphertext string) string { + return fmt.Sprintf(` +data "google_kms_secret_asymmetric" "acceptance" { + crypto_key_version = "%s" + ciphertext = "%s" +} +`, cryptoKeyTerraformID, ciphertext) +} + +func googleKmsSecretAsymmetricDatasourceWithCrc(cryptoKeyTerraformID, ciphertext string, crc uint32) string { + return fmt.Sprintf(` +data "google_kms_secret_asymmetric" "acceptance_with_crc" { + crypto_key_version = "%s" + ciphertext = "%s" + crc32 = "%x" +} +`, cryptoKeyTerraformID, ciphertext, crc) +} + +func kmsCryptoKeyAsymmetricDecryptBasic(projectID, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" + depends_on = [google_project_service.acceptance] +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ASYMMETRIC_DECRYPT" + version_template { + algorithm = "RSA_DECRYPT_OAEP_4096_SHA256" + } +} + +data "google_kms_crypto_key_version" "crypto_key" { + crypto_key = google_kms_crypto_key.crypto_key.id +} +`, projectID, projectID, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key.go.tmpl b/mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key.go.tmpl new file mode 100644 index 000000000000..9b765b23ffe9 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key.go.tmpl @@ -0,0 +1,110 @@ +package kms + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudkms/v1" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamKmsCryptoKeySchema = map[string]*schema.Schema{ + "crypto_key_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type KmsCryptoKeyIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewKmsCryptoKeyIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + cryptoKey := d.Get("crypto_key_id").(string) + cryptoKeyId, err := ParseKmsCryptoKeyId(cryptoKey, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{"{{"}}err{{"}}"}}", cryptoKey), err) + } + + return &KmsCryptoKeyIamUpdater{ + resourceId: cryptoKeyId.CryptoKeyId(), + d: d, + Config: config, + }, nil +} + +func CryptoIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + cryptoKeyId, err := ParseKmsCryptoKeyId(d.Id(), config) + if err != nil { + return err + } + if err := d.Set("crypto_key_id", cryptoKeyId.CryptoKeyId()); err != nil { + return fmt.Errorf("Error setting crypto_key_id: %s", err) + } + d.SetId(cryptoKeyId.CryptoKeyId()) + return nil +} + +func (u *KmsCryptoKeyIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *KmsCryptoKeyIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + kmsPolicy, err := resourceManagerToKmsPolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.CryptoKeys.SetIamPolicy(u.resourceId, &cloudkms.SetIamPolicyRequest{ + Policy: kmsPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *KmsCryptoKeyIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *KmsCryptoKeyIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-kms-crypto-key-%s", u.resourceId) +} + +func (u *KmsCryptoKeyIamUpdater) DescribeResource() string { + return fmt.Sprintf("KMS CryptoKey %q", u.resourceId) +} diff --git a/mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key_test.go.tmpl b/mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key_test.go.tmpl new file mode 100644 index 000000000000..29708dc3ba80 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/iam_kms_crypto_key_test.go.tmpl @@ -0,0 +1,717 @@ +package kms_test + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/kms" +) + +func TestAccKmsCryptoKeyIamBinding(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyDecrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Binding creation + Config: testAccKmsCryptoKeyIamBinding_basic(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId), + Check: testAccCheckGoogleKmsCryptoKeyIamBindingExists(t, "foo", roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + }), + }, + { + ResourceName: "google_kms_crypto_key_iam_binding.foo", + ImportStateId: fmt.Sprintf("%s/%s %s", keyRingId.TerraformId(), cryptoKeyName, roleId), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccKmsCryptoKeyIamBinding_update(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId), + Check: testAccCheckGoogleKmsCryptoKeyIamBindingExists(t, "foo", roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + fmt.Sprintf("serviceAccount:%s-2@%s.iam.gserviceaccount.com", account, projectId), + }), + }, + { + ResourceName: "google_kms_crypto_key_iam_binding.foo", + ImportStateId: fmt.Sprintf("%s/%s %s", keyRingId.TerraformId(), cryptoKeyName, roleId), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsCryptoKeyIamBinding_withCondition(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyDecrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsCryptoKeyIamBinding_withCondition(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle), + }, + { + ResourceName: "google_kms_crypto_key_iam_binding.foo", + ImportStateId: fmt.Sprintf("%s/%s %s %s", keyRingId.TerraformId(), cryptoKeyName, roleId, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccKmsCryptoKeyIamMember(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccKmsCryptoKeyIamMember_basic(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId), + Check: testAccCheckGoogleKmsCryptoKeyIamMemberExists(t, "foo", roleId, + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + ), + }, + { + ResourceName: "google_kms_crypto_key_iam_member.foo", + ImportStateId: fmt.Sprintf("%s/%s %s serviceAccount:%s@%s.iam.gserviceaccount.com", keyRingId.TerraformId(), cryptoKeyName, roleId, account, projectId), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsCryptoKeyIamMember_withCondition(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsCryptoKeyIamMember_withCondition(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle), + }, + { + ResourceName: "google_kms_crypto_key_iam_member.foo", + ImportStateId: fmt.Sprintf("%s/%s %s serviceAccount:%s@%s.iam.gserviceaccount.com %s", keyRingId.TerraformId(), cryptoKeyName, roleId, account, projectId, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccKmsCryptoKeyIamPolicy(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsCryptoKeyIamPolicy_basic(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleCryptoKmsKeyIam(t, "foo", roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + }), + resource.TestCheckResourceAttrSet("data.google_kms_crypto_key_iam_policy.foo", "policy_data"), + ), + }, + { + ResourceName: "google_kms_crypto_key_iam_policy.foo", + ImportStateId: fmt.Sprintf("%s/%s", keyRingId.TerraformId(), cryptoKeyName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsCryptoKeyIamPolicy_withCondition(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + conditionTitle := "expires_after_2019_12_31" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsCryptoKeyIamPolicy_withCondition(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle), + }, + { + ResourceName: "google_kms_crypto_key_iam_policy.foo", + ImportStateId: fmt.Sprintf("%s/%s", keyRingId.TerraformId(), cryptoKeyName), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccCheckGoogleKmsCryptoKeyIamBindingExists(t *testing.T, bindingResourceName, roleId string, members []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + bindingRs, ok := s.RootModule().Resources[fmt.Sprintf("google_kms_crypto_key_iam_binding.%s", bindingResourceName)] + if !ok { + return fmt.Errorf("Not found: %s", bindingResourceName) + } + + config := acctest.GoogleProviderConfig(t) + cryptoKeyId, err := kms.ParseKmsCryptoKeyId(bindingRs.Primary.Attributes["crypto_key_id"], config) + + if err != nil { + return err + } + + p, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(cryptoKeyId.CryptoKeyId()).Do() + if err != nil { + return err + } + + for _, binding := range p.Bindings { + if binding.Role == roleId { + sort.Strings(members) + sort.Strings(binding.Members) + + if reflect.DeepEqual(members, binding.Members) { + return nil + } + + return fmt.Errorf("Binding found but expected members is %v, got %v", members, binding.Members) + } + } + + return fmt.Errorf("No binding for role %q", roleId) + } +} + +func testAccCheckGoogleKmsCryptoKeyIamMemberExists(t *testing.T, n, role, member string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources["google_kms_crypto_key_iam_member."+n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + config := acctest.GoogleProviderConfig(t) + cryptoKeyId, err := kms.ParseKmsCryptoKeyId(rs.Primary.Attributes["crypto_key_id"], config) + + if err != nil { + return err + } + + p, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.GetIamPolicy(cryptoKeyId.CryptoKeyId()).Do() + if err != nil { + return err + } + + for _, binding := range p.Bindings { + if binding.Role == role { + for _, m := range binding.Members { + if m == member { + return nil + } + } + + return fmt.Errorf("Missing member %q, got %v", member, binding.Members) + } + } + + return fmt.Errorf("No binding for role %q", role) + } +} + +func testAccCheckGoogleCryptoKmsKeyIam(t *testing.T, n, role string, members []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources["google_kms_crypto_key_iam_policy."+n] + if !ok { + return fmt.Errorf("IAM policy resource not found") + } + + config := acctest.GoogleProviderConfig(t) + cryptoKeyId, err := kms.ParseKmsCryptoKeyId(rs.Primary.Attributes["crypto_key_id"], config) + + if err != nil { + return err + } + + p, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.GetIamPolicy(cryptoKeyId.CryptoKeyId()).Do() + if err != nil { + return err + } + + for _, binding := range p.Bindings { + if binding.Role == role { + sort.Strings(members) + sort.Strings(binding.Members) + + if reflect.DeepEqual(members, binding.Members) { + return nil + } + + return fmt.Errorf("Binding found but expected members is %v, got %v", members, binding.Members) + } else { + return fmt.Errorf("Binding found but not expected for role: %v", binding.Role) + } + } + + return fmt.Errorf("No binding for role %q", role) + } +} + +// We are using a custom role since iam_binding is authoritative on the member list and +// we want to avoid removing members from an existing role to prevent unwanted side effects. +func testAccKmsCryptoKeyIamBinding_basic(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +resource "google_kms_crypto_key_iam_binding" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "%s" + members = ["serviceAccount:${google_service_account.test_account.email}"] +} +`, projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId) +} + +func testAccKmsCryptoKeyIamBinding_update(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_service_account" "test_account_2" { + project = google_project_service.iam.project + account_id = "%s-2" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +resource "google_kms_crypto_key_iam_binding" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "%s" + members = [ + "serviceAccount:${google_service_account.test_account.email}", + "serviceAccount:${google_service_account.test_account_2.email}", + ] +} +`, projectId, orgId, billingAccount, account, account, keyRingName, cryptoKeyName, roleId) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccKmsCryptoKeyIamBinding_withCondition(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +resource "google_kms_crypto_key_iam_binding" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "%s" + members = ["serviceAccount:${google_service_account.test_account.email}"] + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle) +} +{{- end }} + +func testAccKmsCryptoKeyIamMember_basic(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +resource "google_kms_crypto_key_iam_member" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "%s" + member = "serviceAccount:${google_service_account.test_account.email}" +} +`, projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccKmsCryptoKeyIamMember_withCondition(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +resource "google_kms_crypto_key_iam_member" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "%s" + member = "serviceAccount:${google_service_account.test_account.email}" + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle) +} +{{- end }} + +func testAccKmsCryptoKeyIamPolicy_basic(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +data "google_iam_policy" "foo" { + binding { + role = "%s" + members = ["serviceAccount:${google_service_account.test_account.email}"] + } +} + +resource "google_kms_crypto_key_iam_policy" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_kms_crypto_key_iam_policy" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id +} +`, projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccKmsCryptoKeyIamPolicy_withCondition(projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Crypto Key Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_crypto_key" "crypto_key" { + key_ring = google_kms_key_ring.key_ring.id + name = "%s" +} + +data "google_iam_policy" "foo" { + binding { + role = "%s" + members = ["serviceAccount:${google_service_account.test_account.email}"] + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } + } +} + +resource "google_kms_crypto_key_iam_policy" "foo" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + policy_data = data.google_iam_policy.foo.policy_data +} +`, projectId, orgId, billingAccount, account, keyRingName, cryptoKeyName, roleId, conditionTitle) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring.go.tmpl b/mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring.go.tmpl new file mode 100644 index 000000000000..44584b043a75 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring.go.tmpl @@ -0,0 +1,129 @@ +package kms + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudkms/v1" + "google.golang.org/api/cloudresourcemanager/v1" +) + +var IamKmsKeyRingSchema = map[string]*schema.Schema{ + "key_ring_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, +} + +type KmsKeyRingIamUpdater struct { + resourceId string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func NewKmsKeyRingIamUpdater(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + keyRing := d.Get("key_ring_id").(string) + keyRingId, err := parseKmsKeyRingId(keyRing, config) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error parsing resource ID for %s: {{"{{"}}err{{"}}"}}", keyRing), err) + } + + return &KmsKeyRingIamUpdater{ + resourceId: keyRingId.KeyRingId(), + d: d, + Config: config, + }, nil +} + +func KeyRingIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + keyRingId, err := parseKmsKeyRingId(d.Id(), config) + if err != nil { + return err + } + + if err := d.Set("key_ring_id", keyRingId.KeyRingId()); err != nil { + return fmt.Errorf("Error setting key_ring_id: %s", err) + } + d.SetId(keyRingId.KeyRingId()) + return nil +} + +func (u *KmsKeyRingIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + p, err := u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.GetIamPolicy(u.resourceId).OptionsRequestedPolicyVersion(tpgiamresource.IamPolicyVersion).Do() + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + cloudResourcePolicy, err := kmsToResourceManagerPolicy(p) + + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return cloudResourcePolicy, nil +} + +func (u *KmsKeyRingIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + kmsPolicy, err := resourceManagerToKmsPolicy(policy) + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Invalid IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = u.Config.NewKmsClient(userAgent).Projects.Locations.KeyRings.SetIamPolicy(u.resourceId, &cloudkms.SetIamPolicyRequest{ + Policy: kmsPolicy, + }).Do() + + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{"{{"}}err{{"}}"}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *KmsKeyRingIamUpdater) GetResourceId() string { + return u.resourceId +} + +func (u *KmsKeyRingIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-kms-key-ring-%s", u.resourceId) +} + +func (u *KmsKeyRingIamUpdater) DescribeResource() string { + return fmt.Sprintf("KMS KeyRing %q", u.resourceId) +} + +func resourceManagerToKmsPolicy(p *cloudresourcemanager.Policy) (*cloudkms.Policy, error) { + out := &cloudkms.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a v1 policy to a kms policy: {{"{{"}}err{{"}}"}}", err) + } + return out, nil +} + +func kmsToResourceManagerPolicy(p *cloudkms.Policy) (*cloudresourcemanager.Policy, error) { + out := &cloudresourcemanager.Policy{} + err := tpgresource.Convert(p, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a kms policy to a v1 policy: {{"{{"}}err{{"}}"}}", err) + } + return out, nil +} diff --git a/mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring_test.go.tmpl b/mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring_test.go.tmpl new file mode 100644 index 000000000000..24ea402262c5 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/go/iam_kms_key_ring_test.go.tmpl @@ -0,0 +1,593 @@ +package kms_test + +import ( + "fmt" + "reflect" + "sort" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/kms" +) + +const DEFAULT_KMS_TEST_LOCATION = "us-central1" + +func TestAccKmsKeyRingIamBinding(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyDecrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Binding creation + Config: testAccKmsKeyRingIamBinding_basic(projectId, orgId, billingAccount, account, keyRingName, roleId), + Check: testAccCheckGoogleKmsKeyRingIam(t, keyRingId.KeyRingId(), roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + }), + }, + { + ResourceName: "google_kms_key_ring_iam_binding.foo", + ImportStateId: fmt.Sprintf("%s %s", keyRingId.TerraformId(), roleId), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccKmsKeyRingIamBinding_update(projectId, orgId, billingAccount, account, keyRingName, roleId), + Check: testAccCheckGoogleKmsKeyRingIam(t, keyRingId.KeyRingId(), roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + fmt.Sprintf("serviceAccount:%s-2@%s.iam.gserviceaccount.com", account, projectId), + }), + }, + { + ResourceName: "google_kms_key_ring_iam_binding.foo", + ImportStateId: fmt.Sprintf("%s %s", keyRingId.TerraformId(), roleId), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsKeyRingIamBinding_withCondition(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyDecrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + conditionTitle := "expires_after_2019_12_31" + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsKeyRingIamBinding_withCondition(projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle), + }, + { + ResourceName: "google_kms_key_ring_iam_binding.foo", + ImportStateId: fmt.Sprintf("%s %s %s", keyRingId.TerraformId(), roleId, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccKmsKeyRingIamMember(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccKmsKeyRingIamMember_basic(projectId, orgId, billingAccount, account, keyRingName, roleId), + Check: testAccCheckGoogleKmsKeyRingIam(t, keyRingId.KeyRingId(), roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + }), + }, + { + ResourceName: "google_kms_key_ring_iam_member.foo", + ImportStateId: fmt.Sprintf("%s %s serviceAccount:%s@%s.iam.gserviceaccount.com", keyRingId.TerraformId(), roleId, account, projectId), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsKeyRingIamMember_withCondition(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + conditionTitle := "expires_after_2019_12_31" + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsKeyRingIamMember_withCondition(projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle), + }, + { + ResourceName: "google_kms_key_ring_iam_member.foo", + ImportStateId: fmt.Sprintf("%s %s serviceAccount:%s@%s.iam.gserviceaccount.com %s", keyRingId.TerraformId(), roleId, account, projectId, conditionTitle), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func TestAccKmsKeyRingIamPolicy(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsKeyRingIamPolicy_basic(projectId, orgId, billingAccount, account, keyRingName, roleId), + Check: testAccCheckGoogleKmsKeyRingIam(t, keyRingId.KeyRingId(), roleId, []string{ + fmt.Sprintf("serviceAccount:%s@%s.iam.gserviceaccount.com", account, projectId), + }), + }, + { + ResourceName: "google_kms_key_ring_iam_policy.foo", + ImportStateId: keyRingId.TerraformId(), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccKmsKeyRingIamPolicy_withCondition(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + billingAccount := envvar.GetTestBillingAccountFromEnv(t) + account := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + roleId := "roles/cloudkms.cryptoKeyEncrypter" + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + conditionTitle := "expires_after_2019_12_31" + + keyRingId := &kms.KmsKeyRingId{ + Project: projectId, + Location: DEFAULT_KMS_TEST_LOCATION, + Name: keyRingName, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccKmsKeyRingIamPolicy_withCondition(projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle), + }, + { + ResourceName: "google_kms_key_ring_iam_policy.foo", + ImportStateId: keyRingId.TerraformId(), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +{{- end }} + +func testAccCheckGoogleKmsKeyRingIam(t *testing.T, keyRingId, role string, members []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + p, err := config.NewKmsClient(config.UserAgent).Projects.Locations.KeyRings.GetIamPolicy(keyRingId).Do() + if err != nil { + return err + } + + for _, binding := range p.Bindings { + if binding.Role == role { + sort.Strings(members) + sort.Strings(binding.Members) + + if reflect.DeepEqual(members, binding.Members) { + return nil + } + + return fmt.Errorf("Binding found but expected members is %v, got %v", members, binding.Members) + } + } + + return fmt.Errorf("No binding for role %q", role) + } +} + +// We are using a custom role since iam_binding is authoritative on the member list and +// we want to avoid removing members from an existing role to prevent unwanted side effects. +func testAccKmsKeyRingIamBinding_basic(projectId, orgId, billingAccount, account, keyRingName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_key_ring_iam_binding" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + role = "%s" + members = ["serviceAccount:${google_service_account.test_account.email}"] +} +`, projectId, orgId, billingAccount, account, keyRingName, roleId) +} + +func testAccKmsKeyRingIamBinding_update(projectId, orgId, billingAccount, account, keyRingName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_service_account" "test_account_2" { + project = google_project_service.iam.project + account_id = "%s-2" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "%s" + name = "%s" +} + +resource "google_kms_key_ring_iam_binding" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + role = "%s" + members = [ + "serviceAccount:${google_service_account.test_account.email}", + "serviceAccount:${google_service_account.test_account_2.email}", + ] +} +`, projectId, orgId, billingAccount, account, account, DEFAULT_KMS_TEST_LOCATION, keyRingName, roleId) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccKmsKeyRingIamBinding_withCondition(projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "us-central1" + name = "%s" +} + +resource "google_kms_key_ring_iam_binding" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + role = "%s" + members = ["serviceAccount:${google_service_account.test_account.email}"] + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle) +} +{{- end }} + +func testAccKmsKeyRingIamMember_basic(projectId, orgId, billingAccount, account, keyRingName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "%s" + name = "%s" +} + +resource "google_kms_key_ring_iam_member" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + role = "%s" + member = "serviceAccount:${google_service_account.test_account.email}" +} +`, projectId, orgId, billingAccount, account, DEFAULT_KMS_TEST_LOCATION, keyRingName, roleId) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccKmsKeyRingIamMember_withCondition(projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "%s" + name = "%s" +} + +resource "google_kms_key_ring_iam_member" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + role = "%s" + member = "serviceAccount:${google_service_account.test_account.email}" + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +`, projectId, orgId, billingAccount, account, DEFAULT_KMS_TEST_LOCATION, keyRingName, roleId, conditionTitle) +} +{{- end }} + +func testAccKmsKeyRingIamPolicy_basic(projectId, orgId, billingAccount, account, keyRingName, roleId string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "%s" + name = "%s" +} + +data "google_iam_policy" "foo" { + binding { + role = "%s" + + members = ["serviceAccount:${google_service_account.test_account.email}"] + } +} + +resource "google_kms_key_ring_iam_policy" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + policy_data = data.google_iam_policy.foo.policy_data +} +`, projectId, orgId, billingAccount, account, DEFAULT_KMS_TEST_LOCATION, keyRingName, roleId) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccKmsKeyRingIamPolicy_withCondition(projectId, orgId, billingAccount, account, keyRingName, roleId, conditionTitle string) string { + return fmt.Sprintf(` +resource "google_project" "test_project" { + name = "Test project" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "kms" { + project = google_project.test_project.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_project_service" "iam" { + project = google_project_service.kms.project + service = "iam.googleapis.com" +} + +resource "google_service_account" "test_account" { + project = google_project_service.iam.project + account_id = "%s" + display_name = "Kms Key Ring Iam Testing Account" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.iam.project + location = "%s" + name = "%s" +} + +data "google_iam_policy" "foo" { + binding { + role = "%s" + + members = ["serviceAccount:${google_service_account.test_account.email}"] + condition { + title = "%s" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } + } +} + +resource "google_kms_key_ring_iam_policy" "foo" { + key_ring_id = google_kms_key_ring.key_ring.id + policy_data = data.google_iam_policy.foo.policy_data +} +`, projectId, orgId, billingAccount, account, DEFAULT_KMS_TEST_LOCATION, keyRingName, roleId, conditionTitle) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_cluster_test.go.tmpl new file mode 100644 index 000000000000..3b4edebc8d56 --- /dev/null +++ b/mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_cluster_test.go.tmpl @@ -0,0 +1,110 @@ +package managedkafka_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccManagedKafkaCluster_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckManagedKafkaClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccManagedKafkaCluster_basic(context), + }, + { + ResourceName: "google_managed_kafka_cluster.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_id", "labels", "location", "terraform_labels"}, + }, + { + Config: testAccManagedKafkaCluster_update(context), + }, + { + ResourceName: "google_managed_kafka_cluster.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster_id", "labels", "location", "terraform_labels"}, + }, + }, + }) +} + +func testAccManagedKafkaCluster_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + rebalance_config { + mode = "NO_REBALANCE" + } + labels = { + key = "value" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} + +func testAccManagedKafkaCluster_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 4 + memory_bytes = 4512135122 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + rebalance_config { + mode = "AUTO_REBALANCE_ON_SCALE_UP" + } + labels = { + key = "new-value" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} +{{- else }} +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +{{- end }} diff --git a/mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_topic_test.go.tmpl b/mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_topic_test.go.tmpl new file mode 100644 index 000000000000..b739ada2c4d6 --- /dev/null +++ b/mmv1/third_party/terraform/services/managedkafka/go/resource_managed_kafka_topic_test.go.tmpl @@ -0,0 +1,124 @@ +package managedkafka_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccManagedKafkaTopic_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckManagedKafkaTopicDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccManagedKafkaTopic_basic(context), + }, + { + ResourceName: "google_managed_kafka_topic.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "location", "topic_id"}, + }, + { + Config: testAccManagedKafkaTopic_update(context), + }, + { + ResourceName: "google_managed_kafka_topic.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"cluster", "location", "topic_id"}, + }, + }, + }) +} + +func testAccManagedKafkaTopic_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + + provider = google-beta +} + +resource "google_managed_kafka_topic" "example" { + cluster = google_managed_kafka_cluster.example.cluster_id + topic_id = "tf-test-my-topic%{random_suffix}" + location = "us-central1" + partition_count = 2 + replication_factor = 3 + configs = { + "cleanup.policy" = "compact" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} + +func testAccManagedKafkaTopic_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_managed_kafka_cluster" "example" { + cluster_id = "tf-test-my-cluster%{random_suffix}" + location = "us-central1" + capacity_config { + vcpu_count = 3 + memory_bytes = 3221225472 + } + gcp_config { + access_config { + network_configs { + subnet = "projects/${data.google_project.project.number}/regions/us-central1/subnetworks/default" + } + } + } + + provider = google-beta +} + +resource "google_managed_kafka_topic" "example" { + cluster = google_managed_kafka_cluster.example.cluster_id + topic_id = "tf-test-my-topic%{random_suffix}" + location = "us-central1" + partition_count = 3 + replication_factor = 3 + configs = { + "cleanup.policy" = "compact" + } + + provider = google-beta +} + +data "google_project" "project" { + provider = google-beta +} +`, context) +} +{{- else }} +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +{{- end }} diff --git a/mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_hub_sweeper.go.tmpl b/mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_hub_sweeper.go.tmpl new file mode 100644 index 000000000000..2477809b4a93 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_hub_sweeper.go.tmpl @@ -0,0 +1,126 @@ +package networkconnectivity + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkConnectivityHub", testSweepNetworkConnectivityHub) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkConnectivityHub(region string) error { + resourceName := "NetworkConnectivityHub" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networkconnectivity.googleapis.com/v1/projects/{{"{{"}}project{{"}}"}}/locations/global/hubs", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["hubs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkconnectivity.googleapis.com/v1/projects/{{"{{"}}project{{"}}"}}/locations/global/hubs/{{"{{"}}name{{"}}"}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_spoke_sweeper.go.tmpl b/mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_spoke_sweeper.go.tmpl new file mode 100644 index 000000000000..2ab46c9eef8d --- /dev/null +++ b/mmv1/third_party/terraform/services/networkconnectivity/go/resource_network_connectivity_spoke_sweeper.go.tmpl @@ -0,0 +1,126 @@ +package networkconnectivity + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("NetworkConnectivitySpoke", testSweepNetworkConnectivitySpoke) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepNetworkConnectivitySpoke(region string) error { + resourceName := "NetworkConnectivitySpoke" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://networkconnectivity.googleapis.com/v1/projects/{{"{{"}}project{{"}}"}}/locations/global/spokes", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["spokes"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://networkconnectivity.googleapis.com/v1/projects/{{"{{"}}project{{"}}"}}/locations/global/spokes/{{"{{"}}name{{"}}"}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_authorization_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_authorization_policy_test.go.tmpl new file mode 100644 index 000000000000..808c5ba38b57 --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_authorization_policy_test.go.tmpl @@ -0,0 +1,83 @@ +package networksecurity_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetworkSecurityAuthorizationPolicy_update(t *testing.T) { + t.Parallel() + + authorizationPolicyName := fmt.Sprintf("tf-test-authorization-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecurityAuthorizationPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityAuthorizationPolicy_basic(authorizationPolicyName), + }, + { + ResourceName: "google_network_security_authorization_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityAuthorizationPolicy_update(authorizationPolicyName), + }, + { + ResourceName: "google_network_security_authorization_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecurityAuthorizationPolicy_basic(authorizationPolicyName string) string { + return fmt.Sprintf(` + resource "google_network_security_authorization_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "my description" + action = "ALLOW" + rules { + sources { + principals = ["namespace/*"] + ip_blocks = ["1.2.3.0/24"] + } + } + } +`, authorizationPolicyName) +} + +func testAccNetworkSecurityAuthorizationPolicy_update(authorizationPolicyName string) string { + return fmt.Sprintf(` + resource "google_network_security_authorization_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "updated description" + action = "DENY" + rules { + sources { + principals = ["namespace1/*"] + ip_blocks = ["1.2.3.0/24"] + } + } + } +`, authorizationPolicyName) +} + +{{ end }} + diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl new file mode 100644 index 000000000000..0cda14504a41 --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_client_tls_policy_test.go.tmpl @@ -0,0 +1,96 @@ +package networksecurity_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetworkSecurityClientTlsPolicy_update(t *testing.T) { + t.Parallel() + + clientTlsPolicyName := fmt.Sprintf("tf-test-client-tls-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecurityClientTlsPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityClientTlsPolicy_basic(clientTlsPolicyName), + }, + { + ResourceName: "google_network_security_client_tls_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityClientTlsPolicy_update(clientTlsPolicyName), + }, + { + ResourceName: "google_network_security_client_tls_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecurityClientTlsPolicy_basic(clientTlsPolicyName string) string { + return fmt.Sprintf(` + resource "google_network_security_client_tls_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "my description" + sni = "secure.example.com" + client_certificate { + certificate_provider_instance { + plugin_instance = "google_cloud_private_spiffe" + } + } + server_validation_ca { + grpc_endpoint { + target_uri = "unix:mypath" + } + } + } +`, clientTlsPolicyName) +} + +func testAccNetworkSecurityClientTlsPolicy_update(clientTlsPolicyName string) string { + return fmt.Sprintf(` + resource "google_network_security_client_tls_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "updated description" + sni = "secure1.example.com" + client_certificate { + certificate_provider_instance { + plugin_instance = "google_cloud" + } + } + server_validation_ca { + grpc_endpoint { + target_uri = "unix:mypath1" + } + } + server_validation_ca { + grpc_endpoint { + target_uri = "unix:mypath2" + } + } + } +`, clientTlsPolicyName) +} + +{{ end }} + diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_association_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_association_test.go.tmpl new file mode 100644 index 000000000000..0cc684a4ce64 --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_association_test.go.tmpl @@ -0,0 +1,214 @@ +package networksecurity_test + +import ( + "fmt" + "strings" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestAccNetworkSecurityFirewallEndpointAssociations_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "orgId": envvar.GetTestOrgFromEnv(t), + "randomSuffix": acctest.RandString(t, 10), + "billingProjectId": envvar.GetTestProjectFromEnv(), + "disabled": strconv.FormatBool(false), + } + + testResourceName := "google_network_security_firewall_endpoint_association.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecurityFirewallEndpointDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityFirewallEndpointAssociation_basic(context), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityFirewallEndpointAssociation_update(context), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccNetworkSecurityFirewallEndpointAssociations_disabled(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "orgId": envvar.GetTestOrgFromEnv(t), + "randomSuffix": acctest.RandString(t, 10), + "billingProjectId": envvar.GetTestProjectFromEnv(), + } + + testResourceName := "google_network_security_firewall_endpoint_association.foobar" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecurityFirewallEndpointDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityFirewallEndpointAssociation_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "disabled", "false"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityFirewallEndpointAssociation_update(testContextMapDisabledField(context, true)), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "disabled", "true"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityFirewallEndpointAssociation_update(testContextMapDisabledField(context, false)), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(testResourceName, "disabled", "false"), + ), + }, + { + ResourceName: testResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testContextMapDisabledField(context map[string]interface{}, disabled bool) map[string]interface{} { + context["disabled"] = strconv.FormatBool(disabled) + return context +} + +func testAccNetworkSecurityFirewallEndpointAssociation_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "foobar" { + name = "tf-test-my-vpc%{randomSuffix}" + auto_create_subnetworks = false +} + +resource "google_network_security_firewall_endpoint" "foobar" { + name = "tf-test-my-firewall-endpoint%{randomSuffix}" + parent = "organizations/%{orgId}" + location = "us-central1-a" + billing_project_id = "%{billingProjectId}" +} + +# TODO: add tlsInspectionPolicy once resource is ready +resource "google_network_security_firewall_endpoint_association" "foobar" { + name = "tf-test-my-firewall-endpoint-association%{randomSuffix}" + parent = "projects/%{billingProjectId}" + location = "us-central1-a" + firewall_endpoint = google_network_security_firewall_endpoint.foobar.id + network = google_compute_network.foobar.id + + labels = { + foo = "bar" + } +} +`, context) +} + +func testAccNetworkSecurityFirewallEndpointAssociation_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "foobar" { + name = "tf-test-my-vpc%{randomSuffix}" + auto_create_subnetworks = false +} + +resource "google_network_security_firewall_endpoint" "foobar" { + name = "tf-test-my-firewall-endpoint%{randomSuffix}" + parent = "organizations/%{orgId}" + location = "us-central1-a" + billing_project_id = "%{billingProjectId}" +} + +# TODO: add tlsInspectionPolicy once resource is ready +resource "google_network_security_firewall_endpoint_association" "foobar" { + name = "tf-test-my-firewall-endpoint-association%{randomSuffix}" + parent = "projects/%{billingProjectId}" + location = "us-central1-a" + firewall_endpoint = google_network_security_firewall_endpoint.foobar.id + network = google_compute_network.foobar.id + disabled = "%{disabled}" + + labels = { + foo = "bar-updated" + } +} +`, context) +} + +func testAccCheckNetworkSecurityFirewallEndpointAssociationDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_network_security_firewall_endpoint_association" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}NetworkSecurityBasePath{{"}}"}}{{"{{"}}parent{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/firewallEndpointAssociations/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("NetworkSecurityFirewallEndpointAssociation still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_test.go.tmpl new file mode 100644 index 000000000000..e50b644a141a --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_firewall_endpoint_test.go.tmpl @@ -0,0 +1,119 @@ +package networksecurity_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestAccNetworkSecurityFirewallEndpoints_basic(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + billingProjectId := envvar.GetTestProjectFromEnv() + orgId := envvar.GetTestOrgFromEnv(t) + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecurityFirewallEndpointDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityFirewallEndpoints_basic(orgId, billingProjectId, randomSuffix), + }, + { + ResourceName: "google_network_security_firewall_endpoint.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityFirewallEndpoints_update(orgId, billingProjectId, randomSuffix), + }, + { + ResourceName: "google_network_security_firewall_endpoint.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecurityFirewallEndpoints_basic(orgId string, billingProjectId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_firewall_endpoint" "foobar" { + name = "tf-test-my-firewall-endpoint%[1]s" + parent = "organizations/%[2]s" + location = "us-central1-a" + billing_project_id = "%[3]s" + + labels = { + foo = "bar" + } +} +`, randomSuffix, orgId, billingProjectId) +} + +func testAccNetworkSecurityFirewallEndpoints_update(orgId string, billingProjectId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_firewall_endpoint" "foobar" { + name = "tf-test-my-firewall-endpoint%[1]s" + parent = "organizations/%[2]s" + location = "us-central1-a" + billing_project_id = "%[3]s" + + labels = { + foo = "bar-updated" + } +} +`, randomSuffix, orgId, billingProjectId) +} + +func testAccCheckNetworkSecurityFirewallEndpointDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_network_security_firewall_endpoint" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}NetworkSecurityBasePath{{"}}"}}{{"{{"}}parent{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/firewallEndpoints/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("NetworkSecurityFirewallEndpoint still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_group_test.go b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_group_test.go new file mode 100644 index 000000000000..f7eddcee9826 --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_group_test.go @@ -0,0 +1,96 @@ +package networksecurity_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccNetworkSecuritySecurityProfileGroups_update(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecuritySecurityProfileGroupDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecuritySecurityProfileGroups_basic(orgId, randomSuffix), + }, + { + ResourceName: "google_network_security_security_profile_group.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecuritySecurityProfileGroups_update(orgId, randomSuffix), + }, + { + ResourceName: "google_network_security_security_profile_group.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecuritySecurityProfileGroups_basic(orgId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_security_profile" "foobar" { + name = "tf-test-my-security-profile%s" + type = "THREAT_PREVENTION" + parent = "organizations/%s" + location = "global" +} + +resource "google_network_security_security_profile_group" "foobar" { + name = "tf-test-my-security-profile-group%s" + parent = "organizations/%s" + location = "global" + description = "My security profile group." + threat_prevention_profile = google_network_security_security_profile.foobar.id + + labels = { + foo = "bar" + } +} +`, randomSuffix, orgId, randomSuffix, orgId) +} + +func testAccNetworkSecuritySecurityProfileGroups_update(orgId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_security_profile" "foobar" { + name = "tf-test-my-security-profile%s" + type = "THREAT_PREVENTION" + parent = "organizations/%s" + location = "global" +} + +resource "google_network_security_security_profile" "foobar_updated" { + name = "tf-test-my-security-profile-updated%s" + type = "THREAT_PREVENTION" + parent = "organizations/%s" + location = "global" +} + +resource "google_network_security_security_profile_group" "foobar" { + name = "tf-test-my-security-profile-group%s" + parent = "organizations/%s" + location = "global" + description = "My security profile group. Update" + threat_prevention_profile = google_network_security_security_profile.foobar_updated.id + + labels = { + foo = "foo" + } +} +`, randomSuffix, orgId, randomSuffix, orgId, randomSuffix, orgId) +} diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_test.go b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_test.go new file mode 100644 index 000000000000..70afec07b0e6 --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_security_profile_test.go @@ -0,0 +1,87 @@ +package networksecurity_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccNetworkSecuritySecurityProfiles_update(t *testing.T) { + t.Parallel() + + orgId := envvar.GetTestOrgFromEnv(t) + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecuritySecurityProfileDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecuritySecurityProfiles_basic(orgId, randomSuffix), + }, + { + ResourceName: "google_network_security_security_profile.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecuritySecurityProfiles_update(orgId, randomSuffix), + }, + { + ResourceName: "google_network_security_security_profile.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecuritySecurityProfiles_basic(orgId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_security_profile" "foobar" { + name = "tf-test-my-security-profile%s" + parent = "organizations/%s" + location = "global" + description = "My security profile." + type = "THREAT_PREVENTION" + + labels = { + foo = "bar" + } +} +`, randomSuffix, orgId) +} + +func testAccNetworkSecuritySecurityProfiles_update(orgId string, randomSuffix string) string { + return fmt.Sprintf(` +resource "google_network_security_security_profile" "foobar" { + name = "tf-test-my-security-profile%s" + parent = "organizations/%s" + location = "global" + description = "My security profile. Update" + type = "THREAT_PREVENTION" + + labels = { + foo = "foo" + } + + threat_prevention_profile { + severity_overrides { + action = "ALLOW" + severity = "INFORMATIONAL" + } + + severity_overrides { + action = "DENY" + severity = "HIGH" + } + } +} +`, randomSuffix, orgId) +} diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_server_tls_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_server_tls_policy_test.go.tmpl new file mode 100644 index 000000000000..838d84ccd3b9 --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_server_tls_policy_test.go.tmpl @@ -0,0 +1,81 @@ +package networksecurity_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetworkSecurityServerTlsPolicy_update(t *testing.T) { + t.Parallel() + + serverTlsPolicyName := fmt.Sprintf("tf-test-server-tls-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkSecurityServerTlsPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityServerTlsPolicy_basic(serverTlsPolicyName), + }, + { + ResourceName: "google_network_security_server_tls_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkSecurityServerTlsPolicy_update(serverTlsPolicyName), + }, + { + ResourceName: "google_network_security_server_tls_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkSecurityServerTlsPolicy_basic(serverTlsPolicyName string) string { + return fmt.Sprintf(` + resource "google_network_security_server_tls_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "my description" + allow_open = "false" + server_certificate { + certificate_provider_instance { + plugin_instance = "google_cloud_private_spiffe" + } + } + } +`, serverTlsPolicyName) +} + +func testAccNetworkSecurityServerTlsPolicy_update(serverTlsPolicyName string) string { + return fmt.Sprintf(` + resource "google_network_security_server_tls_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "updated description" + allow_open = "false" + server_certificate { + certificate_provider_instance { + plugin_instance = "google_cloud_private_spiffe" + } + } + } +`, serverTlsPolicyName) +} + +{{ end }} + diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl new file mode 100644 index 000000000000..51820210982d --- /dev/null +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl @@ -0,0 +1,380 @@ +package networksecurity_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccNetworkSecurityTlsInspectionPolicy_update(t *testing.T){ + t.Parallel() + + context := map[string]interface{}{ + "projectNumber": envvar.GetTestProjectNumberFromEnv(), + "randomSuffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckNetworkSecurityTlsInspectionPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkSecurityTlsInspectionPolicy_basic(context), + }, + { + ResourceName: "google_network_security_tls_inspection_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkSecurityTlsInspectionPolicy_update(context), + }, + { + ResourceName: "google_network_security_tls_inspection_policy.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkSecurityTlsInspectionPolicy_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_privateca_ca_pool" "default" { + provider = google-beta + name = "tf-test-cap-%{randomSuffix}" + location = "us-central1" + tier = "DEVOPS" + + publishing_options { + publish_ca_cert = false + publish_crl = false + } + + issuance_policy { + maximum_lifetime = "1209600s" + baseline_values { + ca_options { + is_ca = false + } + key_usage { + base_key_usage {} + extended_key_usage { + server_auth = true + } + } + } + } +} + +resource "google_privateca_certificate_authority" "default" { + provider = google-beta + pool = google_privateca_ca_pool.default.name + certificate_authority_id = "tf-test-ca-%{randomSuffix}" + location = "us-central1" + lifetime = "86400s" + type = "SELF_SIGNED" + deletion_protection = false + skip_grace_period = true + ignore_active_certificates_on_deletion = true + + config { + subject_config { + subject { + organization = "Test LLC" + common_name = "my-ca" + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = false + } + } + } + } + + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } +} + +resource "google_project_service_identity" "default" { + provider = google-beta + service = "networksecurity.googleapis.com" +} + +resource "google_privateca_ca_pool_iam_member" "default" { + provider = google-beta + ca_pool = google_privateca_ca_pool.default.id + role = "roles/privateca.certificateManager" + member = "serviceAccount:${google_project_service_identity.default.email}" +} + +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + name = "tf-test-tc-%{randomSuffix}" + description = "sample trust config description" + location = "us-central1" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } +} + +resource "google_network_security_tls_inspection_policy" "default" { + provider = google-beta + name = "tf-test-tip-%{randomSuffix}" + location = "us-central1" + ca_pool = google_privateca_ca_pool.default.id + exclude_public_ca_set = false + min_tls_version = "TLS_1_0" + trust_config = google_certificate_manager_trust_config.default.id + tls_feature_profile = "PROFILE_CUSTOM" + + custom_tls_features = [ + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", + "TLS_RSA_WITH_3DES_EDE_CBC_SHA", + "TLS_RSA_WITH_AES_128_CBC_SHA", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_256_CBC_SHA", + "TLS_RSA_WITH_AES_256_GCM_SHA384" + ] + + depends_on = [ + google_privateca_certificate_authority.default, + google_privateca_ca_pool_iam_member.default + ] +} +`, context) +} + +func testAccNetworkSecurityTlsInspectionPolicy_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_privateca_ca_pool" "default" { + provider = google-beta + name = "tf-test-cap-%{randomSuffix}" + location = "us-central1" + tier = "DEVOPS" + + publishing_options { + publish_ca_cert = false + publish_crl = false + } + + issuance_policy { + maximum_lifetime = "1209600s" + baseline_values { + ca_options { + is_ca = false + } + key_usage { + base_key_usage {} + extended_key_usage { + server_auth = true + } + } + } + } +} + +resource "google_privateca_ca_pool" "default_updated" { + provider = google-beta + name = "tf-test-cap-updated-%{randomSuffix}" + location = "us-central1" + tier = "DEVOPS" + + publishing_options { + publish_ca_cert = false + publish_crl = false + } + + issuance_policy { + maximum_lifetime = "1209600s" + baseline_values { + ca_options { + is_ca = false + } + key_usage { + base_key_usage {} + extended_key_usage { + server_auth = true + } + } + } + } +} + +resource "google_privateca_certificate_authority" "default" { + provider = google-beta + pool = google_privateca_ca_pool.default.name + certificate_authority_id = "tf-test-ca-%{randomSuffix}" + location = "us-central1" + lifetime = "86400s" + type = "SELF_SIGNED" + deletion_protection = false + skip_grace_period = true + ignore_active_certificates_on_deletion = true + + config { + subject_config { + subject { + organization = "Test LLC" + common_name = "my-ca" + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = false + } + } + } + } + + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } +} + +resource "google_privateca_certificate_authority" "default_updated" { + provider = google-beta + pool = google_privateca_ca_pool.default_updated.name + certificate_authority_id = "tf-test-ca-%{randomSuffix}" + location = "us-central1" + lifetime = "86400s" + type = "SELF_SIGNED" + deletion_protection = false + skip_grace_period = true + ignore_active_certificates_on_deletion = true + + config { + subject_config { + subject { + organization = "Test LLC" + common_name = "my-ca" + } + } + x509_config { + ca_options { + is_ca = true + } + key_usage { + base_key_usage { + cert_sign = true + crl_sign = true + } + extended_key_usage { + server_auth = false + } + } + } + } + + key_spec { + algorithm = "RSA_PKCS1_4096_SHA256" + } +} + +resource "google_project_service_identity" "default" { + provider = google-beta + service = "networksecurity.googleapis.com" +} + +resource "google_privateca_ca_pool_iam_member" "default" { + provider = google-beta + ca_pool = google_privateca_ca_pool.default.id + role = "roles/privateca.certificateManager" + member = "serviceAccount:${google_project_service_identity.default.email}" +} + +resource "google_privateca_ca_pool_iam_member" "default_updated" { + provider = google-beta + ca_pool = google_privateca_ca_pool.default_updated.id + role = "roles/privateca.certificateManager" + member = "serviceAccount:${google_project_service_identity.default.email}" +} + +resource "google_certificate_manager_trust_config" "default" { + provider = google-beta + name = "tf-test-tc-%{randomSuffix}" + description = "sample trust config description" + location = "us-central1" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } +} + +resource "google_certificate_manager_trust_config" "default_updated" { + provider = google-beta + name = "tf-test-tc-updated-%{randomSuffix}" + description = "another sample trust config description" + location = "us-central1" + + trust_stores { + trust_anchors { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + intermediate_cas { + pem_certificate = file("test-fixtures/ca_cert.pem") + } + } +} + +resource "google_network_security_tls_inspection_policy" "default" { + provider = google-beta + name = "tf-test-tip-%{randomSuffix}" + location = "us-central1" + description = "my tls inspection policy updated" + ca_pool = google_privateca_ca_pool.default_updated.id + exclude_public_ca_set = true + min_tls_version = "TLS_1_2" + trust_config = google_certificate_manager_trust_config.default_updated.id + + depends_on = [ + google_privateca_certificate_authority.default_updated, + google_privateca_ca_pool_iam_member.default_updated + ] +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_endpoint_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_endpoint_policy_test.go.tmpl new file mode 100644 index 000000000000..d3aa003ac92b --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_endpoint_policy_test.go.tmpl @@ -0,0 +1,92 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesEndpointPolicy_update(t *testing.T) { + t.Parallel() + + endpointPolicyName := fmt.Sprintf("tf-test-endpoint-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesEndpointPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesEndpointPolicy_basic(endpointPolicyName), + }, + { + ResourceName: "google_network_services_endpoint_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesEndpointPolicy_update(endpointPolicyName), + }, + { + ResourceName: "google_network_services_endpoint_policy.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesEndpointPolicy_basic(endpointPolicyName string) string { + return fmt.Sprintf(` +resource "google_network_services_endpoint_policy" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "my description" + type = "SIDECAR_PROXY" + traffic_port_selector { + ports = ["8081"] + } + endpoint_matcher { + metadata_label_matcher { + metadata_label_match_criteria = "MATCH_ANY" + metadata_labels { + label_name = "foo" + label_value = "bar" + } + } + } +} +`, endpointPolicyName) +} + +func testAccNetworkServicesEndpointPolicy_update(endpointPolicyName string) string { + return fmt.Sprintf(` +resource "google_network_services_endpoint_policy" "foobar" { + name = "%s" + labels = { + foo = "barbar" + baz = "qux" + } + description = "update description" + type = "GRPC_SERVER" + endpoint_matcher { + metadata_label_matcher { + metadata_label_match_criteria = "MATCH_ALL" + metadata_labels { + label_name = "baz" + label_value = "bux" + } + } + } +} +`, endpointPolicyName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_grpc_route_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_grpc_route_test.go.tmpl new file mode 100644 index 000000000000..f37749eb11e8 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_grpc_route_test.go.tmpl @@ -0,0 +1,133 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetworkServicesGrpcRoute_update(t *testing.T) { + t.Parallel() + + grpcRouteName := fmt.Sprintf("tf-test-grpc-route-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesGrpcRouteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesGrpcRoute_basic(grpcRouteName), + }, + { + ResourceName: "google_network_services_grpc_route.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesGrpcRoute_update(grpcRouteName), + }, + { + ResourceName: "google_network_services_grpc_route.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesGrpcRoute_basic(grpcRouteName string) string { + return fmt.Sprintf(` + resource "google_network_services_grpc_route" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "my description" + hostnames = ["example"] + rules { + matches { + headers { + key = "key" + value = "value" + } + } + action { + retry_policy { + retry_conditions = ["cancelled"] + num_retries = 1 + } + } + } + rules { + matches { + headers { + key = "key" + value = "value" + } + } + action { + fault_injection_policy { + delay { + fixed_delay = "1s" + percentage = 1 + } + abort { + http_status = 500 + percentage = 1 + } + } + } + } + } +`, grpcRouteName) +} + +func testAccNetworkServicesGrpcRoute_update(grpcRouteName string) string { + return fmt.Sprintf(` + resource "google_network_services_grpc_route" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "updated description" + hostnames = ["example"] + rules { + matches { + headers { + key = "key" + value = "value" + } + } + action { + retry_policy { + retry_conditions = ["cancelled"] + num_retries = 2 + } + } + } + rules { + matches { + headers { + key = "key1" + value = "value1" + } + } + action { + retry_policy { + retry_conditions = ["connect-failure"] + num_retries = 1 + } + } + } + } +`, grpcRouteName) +} + +{{ end }} + diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_http_route_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_http_route_test.go.tmpl new file mode 100644 index 000000000000..678d10758e8f --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_http_route_test.go.tmpl @@ -0,0 +1,85 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesHttpRoute_update(t *testing.T) { + t.Parallel() + + httpRouteName := fmt.Sprintf("tf-test-http-route-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesHttpRouteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesHttpRoute_basic(httpRouteName), + }, + { + ResourceName: "google_network_services_http_route.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesHttpRoute_update(httpRouteName), + }, + { + ResourceName: "google_network_services_http_route.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesHttpRoute_basic(httpRouteName string) string { + return fmt.Sprintf(` +resource "google_network_services_http_route" "foobar" { + name = "%s" + description = "my description" + hostnames = ["example"] + rules { + matches { + query_parameters { + query_parameter = "key" + exact_match = "value" + } + full_path_match = "example" + } + } +} +`, httpRouteName) +} + +func testAccNetworkServicesHttpRoute_update(httpRouteName string) string { + return fmt.Sprintf(` +resource "google_network_services_http_route" "foobar" { + name = "%s" + description = "update description" + labels = { + foo = "bar" + } + hostnames = ["example"] + rules { + matches { + query_parameters { + query_parameter = "key" + exact_match = "value" + } + full_path_match = "example" + } + } +} +`, httpRouteName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_mesh_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_mesh_test.go.tmpl new file mode 100644 index 000000000000..8db8475759c6 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_mesh_test.go.tmpl @@ -0,0 +1,65 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesMesh_update(t *testing.T) { + t.Parallel() + + meshName := fmt.Sprintf("tf-test-mesh-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesMeshDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesMesh_basic(meshName), + }, + { + ResourceName: "google_network_services_mesh.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesMesh_update(meshName), + }, + { + ResourceName: "google_network_services_mesh.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesMesh_basic(meshName string) string { + return fmt.Sprintf(` +resource "google_network_services_mesh" "foobar" { + name = "%s" + description = "my description" +} +`, meshName) +} + +func testAccNetworkServicesMesh_update(meshName string) string { + return fmt.Sprintf(` +resource "google_network_services_mesh" "foobar" { + name = "%s" + description = "update description" + labels = { + foo = "bar" + } +} +`, meshName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_binding_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_binding_test.go.tmpl new file mode 100644 index 000000000000..b9ad131be383 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_binding_test.go.tmpl @@ -0,0 +1,60 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesServiceBinding_update(t *testing.T) { + t.Parallel() + + serviceNamespace := fmt.Sprintf("tf-test-service-namespace-%s", acctest.RandString(t, 10)) + serviceName := fmt.Sprintf("tf-test-service-%s", acctest.RandString(t, 10)) + serviceBindingName := fmt.Sprintf("tf-test-service-binding-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesServiceBindingDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesServiceBinding_create(serviceNamespace, serviceName, serviceBindingName), + }, + { + ResourceName: "google_network_services_service_binding.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkServicesServiceBinding_create(serviceNamespace string, serviceName string, serviceBindingName string) string { + return fmt.Sprintf(` + + resource "google_service_directory_namespace" "foo" { + namespace_id = "%s" + location = "us-central1" + } + resource "google_service_directory_service" "bar" { + service_id = "%s" + namespace = google_service_directory_namespace.foo.id + + metadata = { + stage = "prod" + region = "us-central1" + } + } + resource "google_network_services_service_binding" "foobar" { + name = "%s" + description = "my description" + service = google_service_directory_service.bar.id + } +`, serviceNamespace, serviceName, serviceBindingName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_lb_policies_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_lb_policies_test.go.tmpl new file mode 100644 index 000000000000..e592e521843f --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_service_lb_policies_test.go.tmpl @@ -0,0 +1,78 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesLBPolicies_update(t *testing.T) { + t.Parallel() + + policyName := fmt.Sprintf("tf-test-lb-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesServiceLbPoliciesDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesLBPolicies_basic(policyName), + }, + { + ResourceName: "google_network_services_service_lb_policies.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesLBPolicies_update(policyName), + }, + { + ResourceName: "google_network_services_service_lb_policies.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesLBPolicies_basic(policyName string) string { + return fmt.Sprintf(` +resource "google_network_services_service_lb_policies" "foobar" { + name = "%s" + location = "global" + description = "my description" +} +`, policyName) +} + +func testAccNetworkServicesLBPolicies_update(policyName string) string { + return fmt.Sprintf(` +resource "google_network_services_service_lb_policies" "foobar" { + name = "%s" + location = "global" + description = "my description" + load_balancing_algorithm = "SPRAY_TO_REGION" + + auto_capacity_drain { + enable = true + } + + failover_config { + failover_health_threshold = 70 + } + + labels = { + foo = "bar" + } +} +`, policyName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl new file mode 100644 index 000000000000..672cec100e97 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tcp_route_test.go.tmpl @@ -0,0 +1,120 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesTcpRoute_update(t *testing.T) { + t.Parallel() + + tcpServiceName := fmt.Sprintf("tf-test-tcp-service-%s", acctest.RandString(t, 10)) + tcpHealthCheckName := fmt.Sprintf("tf-test-tcp-healthcheck-%s", acctest.RandString(t, 10)) + tcpRouteName := fmt.Sprintf("tf-test-tcp-route-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesTcpRouteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesTcpRoute_basic(tcpServiceName, tcpHealthCheckName, tcpRouteName), + }, + { + ResourceName: "google_network_services_tcp_route.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNetworkServicesTcpRoute_update(tcpServiceName, tcpHealthCheckName, tcpRouteName), + }, + { + ResourceName: "google_network_services_tcp_route.foobar", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetworkServicesTcpRoute_basic(tcpServiceName string, tcpHealthCheckName string, tcpRouteName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foo" { + name = "%s" + health_checks = [google_compute_http_health_check.bar.id] +} + +resource "google_compute_http_health_check" "bar" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_network_services_tcp_route" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "my description" + rules { + matches { + address = "10.0.0.1/32" + port = "8081" + } + action { + destinations { + service_name = google_compute_backend_service.foo.id + weight = 1 + } + original_destination = false + } + } +} +`, tcpServiceName, tcpHealthCheckName, tcpRouteName) +} + +func testAccNetworkServicesTcpRoute_update(tcpServiceName string, tcpHealthCheckName string, tcpRouteName string) string { + return fmt.Sprintf(` + resource "google_compute_backend_service" "foo" { + name = "%s" + health_checks = [google_compute_http_health_check.bar.id] +} + +resource "google_compute_http_health_check" "bar" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_network_services_tcp_route" "foobar" { + name = "%s" + labels = { + foo = "bar" + } + description = "update description" + rules { + matches { + address = "10.0.0.1/32" + port = "8081" + } + action { + destinations { + service_name = google_compute_backend_service.foo.id + weight = 1 + } + original_destination = false + } + } +} +`, tcpServiceName, tcpHealthCheckName, tcpRouteName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tls_route_test.go.tmpl b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tls_route_test.go.tmpl new file mode 100644 index 000000000000..11896ecb4b40 --- /dev/null +++ b/mmv1/third_party/terraform/services/networkservices/go/resource_network_services_tls_route_test.go.tmpl @@ -0,0 +1,110 @@ +package networkservices_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNetworkServicesTlsRoute_update(t *testing.T) { + t.Parallel() + + tlsServiceName := fmt.Sprintf("tf-test-tls-service-%s", acctest.RandString(t, 10)) + tlsHealthCheckName := fmt.Sprintf("tf-test-tls-healthcheck-%s", acctest.RandString(t, 10)) + tlsRouteName := fmt.Sprintf("tf-test-tls-route-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNetworkServicesTlsRouteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNetworkServicesTlsRoute_basic(tlsServiceName, tlsHealthCheckName, tlsRouteName), + }, + { + ResourceName: "google_network_services_tls_route.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccNetworkServicesTlsRoute_update(tlsServiceName, tlsHealthCheckName, tlsRouteName), + }, + { + ResourceName: "google_network_services_tls_route.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNetworkServicesTlsRoute_basic(tlsServiceName string, tlsHealthCheckName string, tlsRouteName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foo" { + name = "%s" + health_checks = [google_compute_http_health_check.bar.id] +} + +resource "google_compute_http_health_check" "bar" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_network_services_tls_route" "foobar" { + name = "%s" + description = "my description" + rules { + matches { + sni_host = ["example.com"] + alpn = ["http/1.1"] + } + action { + destinations { + service_name = google_compute_backend_service.foo.id + weight = 1 + } + } + } +} +`, tlsServiceName, tlsHealthCheckName, tlsRouteName) +} + +func testAccNetworkServicesTlsRoute_update(tlsServiceName string, tlsHealthCheckName string, tlsRouteName string) string { + return fmt.Sprintf(` + resource "google_compute_backend_service" "foo" { + name = "%s" + health_checks = [google_compute_http_health_check.bar.id] + } + + resource "google_compute_http_health_check" "bar" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 + } + + resource "google_network_services_tls_route" "foobar" { + name = "%s" + description = "update description" + rules { + matches { + sni_host = ["example.com"] + alpn = ["http/1.1"] + } + action { + destinations { + service_name = google_compute_backend_service.foo.id + weight = 1 + } + } + } + } +`, tlsServiceName, tlsHealthCheckName, tlsRouteName) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_environment_test.go.tmpl b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_environment_test.go.tmpl new file mode 100644 index 000000000000..984d4fd05686 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_environment_test.go.tmpl @@ -0,0 +1,46 @@ +package notebooks_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNotebooksEnvironment_create(t *testing.T) { + t.Parallel() + + prefix := fmt.Sprintf("%d", acctest.RandInt(t)) + name := fmt.Sprintf("tf-env-%s", prefix) + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksEnvironment_create(name), + }, + { + ResourceName: "google_notebooks_environment.test", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccNotebooksEnvironment_create(name string) string { + return fmt.Sprintf(` + +resource "google_notebooks_environment" "test" { + name = "%s" + location = "us-west1-a" + container_image { + repository = "gcr.io/deeplearning-platform-release/base-cpu" + } +} +`, name) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_container_test.go.tmpl b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_container_test.go.tmpl new file mode 100644 index 000000000000..44f23aeb8e79 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_container_test.go.tmpl @@ -0,0 +1,53 @@ +package notebooks_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNotebooksInstance_create_container(t *testing.T) { + t.Parallel() + + prefix := fmt.Sprintf("%d", acctest.RandInt(t)) + name := fmt.Sprintf("tf-%s", prefix) + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksInstance_create_container(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image"}, + }, + }, + }) +} + +func testAccNotebooksInstance_create_container(name string) string { + return fmt.Sprintf(` + +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "e2-medium" + metadata = { + proxy-mode = "service_account" + terraform = "true" + } + container_image { + repository = "gcr.io/deeplearning-platform-release/base-cpu" + tag = "latest" + } +} +`, name) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_gpu_test.go.tmpl b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_gpu_test.go.tmpl new file mode 100644 index 000000000000..ba61e606fd29 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_gpu_test.go.tmpl @@ -0,0 +1,58 @@ +package notebooks_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNotebooksInstance_create_gpu(t *testing.T) { + t.Parallel() + + prefix := fmt.Sprintf("%d", acctest.RandInt(t)) + name := fmt.Sprintf("tf-%s", prefix) + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksInstance_create_gpu(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image"}, + }, + }, + }) +} + +func testAccNotebooksInstance_create_gpu(name string) string { + return fmt.Sprintf(` + +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "n1-standard-1" // can't be e2 because of accelerator + metadata = { + proxy-mode = "service_account" + terraform = "true" + } + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-gpu" + } + install_gpu_driver = true + accelerator_config { + type = "NVIDIA_TESLA_T4" + core_count = 1 + } +} +`, name) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_state_test.go.tmpl b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_state_test.go.tmpl new file mode 100644 index 000000000000..a935c50910e9 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_state_test.go.tmpl @@ -0,0 +1,84 @@ +package notebooks_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNotebooksInstance_state(t *testing.T) { + t.Parallel() + + prefix := fmt.Sprintf("%d", acctest.RandInt(t)) + name := fmt.Sprintf("tf-%s", prefix) + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksInstance_basic_active(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image","desired_state", "update_time"}, + }, + { + Config: testAccNotebooksInstance_basic_stopped(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image","desired_state", "update_time"}, + }, + { + Config: testAccNotebooksInstance_basic_active(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ExpectNonEmptyPlan: true, + ImportStateVerifyIgnore: []string{"container_image", "metadata", "vm_image","desired_state", "update_time"}, + }, + }, + }) +} + +func testAccNotebooksInstance_basic_active(name string) string { + return fmt.Sprintf(` +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "e2-medium" + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + desired_state = "ACTIVE" +} +`, name) +} + +func testAccNotebooksInstance_basic_stopped(name string) string { + return fmt.Sprintf(` +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "e2-medium" + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + desired_state = "STOPPED" +} +`, name) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_test.go b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_test.go new file mode 100644 index 000000000000..376cd5cb3465 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_instance_test.go @@ -0,0 +1,152 @@ +package notebooks_test + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNotebooksInstance_create_vm_image(t *testing.T) { + t.Parallel() + + prefix := fmt.Sprintf("%d", acctest.RandInt(t)) + name := fmt.Sprintf("tf-%s", prefix) + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksInstance_create_vm_image(name), + }, + { + ResourceName: "google_notebooks_instance.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"vm_image", "metadata"}, + }, + }, + }) +} + +func TestAccNotebooksInstance_update(t *testing.T) { + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksInstance_basic(context), + }, + { + ResourceName: "google_notebooks_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"vm_image", "metadata", "update_time", "proxy_uri", "state"}, + }, + { + Config: testAccNotebooksInstance_update(context, true), + }, + { + ResourceName: "google_notebooks_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"vm_image", "metadata", "labels", "terraform_labels", "update_time", "proxy_uri", "state"}, + }, + { + Config: testAccNotebooksInstance_update(context, false), + }, + { + ResourceName: "google_notebooks_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"vm_image", "metadata", "labels", "terraform_labels", "update_time", "proxy_uri", "state"}, + }, + }, + }) +} + +func testAccNotebooksInstance_create_vm_image(name string) string { + return fmt.Sprintf(` + +resource "google_notebooks_instance" "test" { + name = "%s" + location = "us-west1-a" + machine_type = "e2-medium" + metadata = { + proxy-mode = "service_account" + terraform = "true" + } + + nic_type = "VIRTIO_NET" + + reservation_affinity { + consume_reservation_type = "NO_RESERVATION" + } + + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } +} +`, name) +} + +func testAccNotebooksInstance_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_notebooks_instance" "instance" { + name = "tf-test-notebooks-instance%{random_suffix}" + location = "us-central1-a" + machine_type = "e2-medium" + + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + + metadata = { + proxy-mode = "service_account" + terraform = "true" + } + + lifecycle { + prevent_destroy = true + } +} +`, context) +} + +func testAccNotebooksInstance_update(context map[string]interface{}, preventDestroy bool) string { + context["prevent_destroy"] = strconv.FormatBool(preventDestroy) + + return acctest.Nprintf(` +resource "google_notebooks_instance" "instance" { + name = "tf-test-notebooks-instance%{random_suffix}" + location = "us-central1-a" + machine_type = "e2-medium" + + vm_image { + project = "deeplearning-platform-release" + image_family = "tf-latest-cpu" + } + + metadata = { + proxy-mode = "service_account" + terraform = "true" + notebook-upgrade-schedule = "0 * * * *" + } + + labels = { + key = "value" + } + + lifecycle { + prevent_destroy = %{prevent_destroy} + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_runtime_test.go b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_runtime_test.go new file mode 100644 index 000000000000..b87ce5088e13 --- /dev/null +++ b/mmv1/third_party/terraform/services/notebooks/go/resource_notebooks_runtime_test.go @@ -0,0 +1,111 @@ +package notebooks_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccNotebooksRuntime_update(t *testing.T) { + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckNotebooksRuntimeDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccNotebooksRuntime_basic(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNotebooksRuntime_update(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccNotebooksRuntime_basic(context), + }, + { + ResourceName: "google_notebooks_runtime.runtime", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + + +func testAccNotebooksRuntime_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_notebooks_runtime" "runtime" { + name = "tf-test-notebooks-runtime%{random_suffix}" + location = "us-central1" + access_config { + access_type = "SINGLE_USER" + runtime_owner = "admin@hashicorptest.com" + } + software_config {} + virtual_machine { + virtual_machine_config { + machine_type = "n1-standard-4" + data_disk { + initialize_params { + disk_size_gb = "100" + disk_type = "PD_STANDARD" + } + } + reserved_ip_range = "192.168.255.0/24" + } + } +} +`, context) +} + +func testAccNotebooksRuntime_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_notebooks_runtime" "runtime" { + name = "tf-test-notebooks-runtime%{random_suffix}" + location = "us-central1" + access_config { + access_type = "SINGLE_USER" + runtime_owner = "admin@hashicorptest.com" + } + software_config { + idle_shutdown_timeout = "80" + } + virtual_machine { + virtual_machine_config { + machine_type = "n1-standard-8" + data_disk { + initialize_params { + disk_size_gb = "100" + disk_type = "PD_STANDARD" + } + } + accelerator_config { + core_count = "1" + type = "NVIDIA_TESLA_V100" + } + reserved_ip_range = "192.168.255.0/24" + } + } + labels = { + k = "val" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/orgpolicy/go/resource_org_policy_custom_constraint_test.go b/mmv1/third_party/terraform/services/orgpolicy/go/resource_org_policy_custom_constraint_test.go new file mode 100644 index 000000000000..e355de3a0714 --- /dev/null +++ b/mmv1/third_party/terraform/services/orgpolicy/go/resource_org_policy_custom_constraint_test.go @@ -0,0 +1,77 @@ +package orgpolicy_test + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccOrgPolicyCustomConstraint_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOrgPolicyCustomConstraintDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyCustomConstraint_v1(context), + }, + { + ResourceName: "google_org_policy_custom_constraint.constraint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent"}, + }, + { + Config: testAccOrgPolicyCustomConstraint_v2(context), + }, + { + ResourceName: "google_org_policy_custom_constraint.constraint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent"}, + }, + }, + }) +} + +func testAccOrgPolicyCustomConstraint_v1(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.tfTest%{random_suffix}" + parent = "organizations/%{org_id}" + display_name = "Disable GKE auto upgrade" + description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." + + action_type = "ALLOW" + condition = "resource.management.autoUpgrade == false" + method_types = ["CREATE", "UPDATE"] + resource_types = ["container.googleapis.com/NodePool"] +} +`, context) +} + +func testAccOrgPolicyCustomConstraint_v2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.tfTest%{random_suffix}" + parent = "organizations/%{org_id}" + display_name = "Updated" + description = "Updated" + + action_type = "DENY" + condition = "resource.management.autoUpgrade == true" + method_types = ["CREATE"] + resource_types = ["container.googleapis.com/NodePool"] +} +`, context) +} + diff --git a/mmv1/third_party/terraform/services/osconfig/go/resource_os_config_os_policy_assignment_test.go.tmpl b/mmv1/third_party/terraform/services/osconfig/go/resource_os_config_os_policy_assignment_test.go.tmpl new file mode 100644 index 000000000000..3fdf2ab6dfd0 --- /dev/null +++ b/mmv1/third_party/terraform/services/osconfig/go/resource_os_config_os_policy_assignment_test.go.tmpl @@ -0,0 +1,256 @@ + +package osconfig_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccOSConfigOSPolicyAssignment_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOSConfigOSPolicyAssignmentDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOSConfigOSPolicyAssignment_basic(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + { + Config: testAccOSConfigOSPolicyAssignment_update(context), + }, + { + ResourceName: "google_os_config_os_policy_assignment.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"rollout.0.min_wait_duration"}, + }, + }, + }) +} + +func testAccOSConfigOSPolicyAssignment_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = false + exclusion_labels { + labels = { + label-two = "value-two" + } + } + inclusion_labels { + labels = { + label-one = "value-one" + } + } + inventories { + os_short_name = "centos" + os_version = "8.*" + } + } + + location = "us-central1-a" + name = "tf-test-policy-assignment%{random_suffix}" + + os_policies { + id = "policy" + mode = "VALIDATION" + + resource_groups { + resources { + id = "apt-to-yum" + + repository { + apt { + archive_type = "DEB" + components = ["doc"] + distribution = "debian" + uri = "https://atl.mirrors.clouvider.net/debian" + gpg_key = ".gnupg/pubring.kbx" + } + } + } + inventory_filters { + os_short_name = "centos" + os_version = "8.*" + } + + resources { + id = "exec1" + exec { + validate { + interpreter = "SHELL" + args = ["arg1"] + file { + local_path = "$HOME/script.sh" + } + output_file_path = "$HOME/out" + } + enforce { + interpreter = "SHELL" + args = ["arg1"] + file { + allow_insecure = true + remote { + uri = "https://www.example.com/script.sh" + sha256_checksum = "c7938fed83afdccbb0e86a2a2e4cad7d5035012ca3214b4a61268393635c3063" + } + } + output_file_path = "$HOME/out" + } + } + } + } + allow_no_resource_group_match = false + description = "A test os policy" + } + + rollout { + disruption_budget { + percent = 100 + } + + min_wait_duration = "3.2s" + } + + description = "A test os policy assignment" +} +`, context) +} + +func testAccOSConfigOSPolicyAssignment_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_os_config_os_policy_assignment" "primary" { + instance_filter { + all = false + inventories { + os_short_name = "centos" + os_version = "9.*" + } + } + + location = "us-central1-a" + name = "tf-test-policy-assignment%{random_suffix}" + + os_policies { + id = "policy" + mode = "ENFORCEMENT" + + resource_groups { + resources { + id = "apt-to-yum" + + repository { + yum { + id = "new-yum" + display_name = "new-yum" + base_url = "http://mirrors.rcs.alaska.edu/centos/" + gpg_keys = ["RPM-GPG-KEY-CentOS-Debug-7"] + } + } + } + inventory_filters { + os_short_name = "centos" + os_version = "8.*" + } + + resources { + id = "new-exec1" + exec { + validate { + interpreter = "POWERSHELL" + args = ["arg2"] + file { + local_path = "$HOME/script.bat" + } + output_file_path = "$HOME/out" + } + enforce { + interpreter = "POWERSHELL" + args = ["arg2"] + file { + allow_insecure = false + remote { + uri = "https://www.example.com/script.bat" + sha256_checksum = "9f8e5818ccb47024d01000db713c0a333679b64678ff5fe2d9bea0a23014dd54" + } + } + output_file_path = "$HOME/out" + } + } + } + } + allow_no_resource_group_match = true + description = "An updated test os policy" + } + + rollout { + disruption_budget { + percent = 90 + } + + min_wait_duration = "3.1s" + } + + description = "An updated test os policy assignment" +} +`, context) +} + +func testAccCheckOSConfigOSPolicyAssignmentDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_os_config_os_policy_assignment" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}OSConfigBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/osPolicyAssignments/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("OSConfigOSPolicyAssignment still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl b/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl new file mode 100644 index 000000000000..0a632c109ff0 --- /dev/null +++ b/mmv1/third_party/terraform/services/parallelstore/go/resource_parallelstore_instance_test.go.tmpl @@ -0,0 +1,137 @@ +package parallelstore_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccParallelstoreInstance_parallelstoreInstanceBasicExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckParallelstoreInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccParallelstoreInstance_parallelstoreInstanceBasicExample_basic(context), + }, + { + ResourceName: "google_parallelstore_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "instance_id", "labels", "terraform_labels"}, + }, + { + Config: testAccParallelstoreInstance_parallelstoreInstanceBasicExample_update(context), + }, + { + ResourceName: "google_parallelstore_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "instance_id", "labels", "terraform_labels"}, + }, + + }, + }) +} + +func testAccParallelstoreInstance_parallelstoreInstanceBasicExample_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_parallelstore_instance" "instance" { + instance_id = "instance%{random_suffix}" + location = "us-central1-a" + description = "test instance" + capacity_gib = 12000 + network = google_compute_network.network.name + reserved_ip_range = google_compute_global_address.private_ip_alloc.name + labels = { + test = "value" + } + provider = google-beta + depends_on = [google_service_networking_connection.default] +} + +resource "google_compute_network" "network" { + name = "network%{random_suffix}" + auto_create_subnetworks = true + mtu = 8896 + provider = google-beta +} + + + +# Create an IP address +resource "google_compute_global_address" "private_ip_alloc" { + name = "address%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.network.id + provider = google-beta +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] + provider = google-beta +} +`, context) +} + +func testAccParallelstoreInstance_parallelstoreInstanceBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_parallelstore_instance" "instance" { + instance_id = "instance%{random_suffix}" + location = "us-central1-a" + description = "test instance updated" + capacity_gib = 12000 + network = google_compute_network.network.name + + labels = { + test = "value23" + } + provider = google-beta + depends_on = [google_service_networking_connection.default] +} + +resource "google_compute_network" "network" { + name = "network%{random_suffix}" + auto_create_subnetworks = true + mtu = 8896 + provider = google-beta +} + + + +# Create an IP address +resource "google_compute_global_address" "private_ip_alloc" { + name = "address%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.network.id + provider = google-beta +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] + provider = google-beta +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/privilegedaccessmanager/go/resource_privileged_access_manager_entitlement_test.go.tmpl b/mmv1/third_party/terraform/services/privilegedaccessmanager/go/resource_privileged_access_manager_entitlement_test.go.tmpl new file mode 100644 index 000000000000..f09292a7a948 --- /dev/null +++ b/mmv1/third_party/terraform/services/privilegedaccessmanager/go/resource_privileged_access_manager_entitlement_test.go.tmpl @@ -0,0 +1,136 @@ +package privilegedaccessmanager_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementProjectExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "project_name": envvar.GetTestProjectFromEnv(), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckPrivilegedAccessManagerEntitlementDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementBasicExample_basic(context), + }, + { + ResourceName: "google_privileged_access_manager_entitlement.tfentitlement", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "entitlement_id", "parent"}, + }, + { + Config: testAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementBasicExample_update(context), + }, + { + ResourceName: "google_privileged_access_manager_entitlement.tfentitlement", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "entitlement_id", "parent"}, + }, + }, + }) +} + +func testAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementBasicExample_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_privileged_access_manager_entitlement" "tfentitlement" { + provider = google-beta + entitlement_id = "tf-test-example-entitlement%{random_suffix}" + location = "global" + max_request_duration = "43200s" + parent = "projects/%{project_name}" + requester_justification_config { + unstructured{} + } + eligible_users { + principals = ["group:test@google.com"] + } + privileged_access{ + gcp_iam_access{ + role_bindings{ + role = "roles/storage.admin" + condition_expression = "request.time < timestamp(\"2024-04-23T18:30:00.000Z\")" + } + resource = "//cloudresourcemanager.googleapis.com/projects/%{project_name}" + resource_type = "cloudresourcemanager.googleapis.com/Project" + } + } + additional_notification_targets { + admin_email_recipients = ["user@example.com"] + requester_email_recipients = ["user@example.com"] + } + approval_workflow { + manual_approvals { + require_approver_justification = true + steps { + approvals_needed = 1 + approver_email_recipients = ["user@example.com"] + approvers { + principals = ["group:test@google.com"] + } + } + } + } +} +`, context) +} + +func testAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementBasicExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_privileged_access_manager_entitlement" "tfentitlement" { + provider = google-beta + entitlement_id = "tf-test-example-entitlement%{random_suffix}" + location = "global" + max_request_duration = "4300s" + parent = "projects/%{project_name}" + requester_justification_config { + not_mandatory{} + } + eligible_users { + principals = ["group:test@google.com"] + } + privileged_access{ + gcp_iam_access{ + role_bindings{ + role = "roles/storage.admin" + condition_expression = "request.time < timestamp(\"2024-04-23T18:30:00.000Z\")" + } + resource = "//cloudresourcemanager.googleapis.com/projects/%{project_name}" + resource_type = "cloudresourcemanager.googleapis.com/Project" + } + } + additional_notification_targets { + admin_email_recipients = ["user1@example.com"] + requester_email_recipients = ["user2@example.com"] + } + approval_workflow { + manual_approvals { + require_approver_justification = false + steps { + approvals_needed = 1 + approver_email_recipients = ["user3@example.com"] + approvers { + principals = ["group:test@google.com"] + } + } + } + } +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl new file mode 100644 index 000000000000..968e13c777e6 --- /dev/null +++ b/mmv1/third_party/terraform/services/redis/go/resource_redis_cluster_test.go.tmpl @@ -0,0 +1,299 @@ +package redis_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccRedisCluster_createClusterWithNodeType(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with replica count 1 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // clean up the resource + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, nodeType: "REDIS_STANDARD_SMALL", zoneDistributionMode: "MULTI_ZONE"}), + }, + }, + }) +} + + +// Validate zone distribution for the cluster. +func TestAccRedisCluster_createClusterWithZoneDistribution(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with replica count 1 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // clean up the resource + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "SINGLE_ZONE", zone: "us-central1-b"}), + }, + }, + }) +} + +// Validate that replica count is updated for the cluster +func TestAccRedisCluster_updateReplicaCount(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with replica count 1 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // update replica count to 2 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 2, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // clean up the resource + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + // update replica count to 0 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // clean up the resource + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 0, shardCount: 3, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), + }, + }, + }) +} + +// Validate that shard count is updated for the cluster +func TestAccRedisCluster_updateShardCount(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster with shard count 3 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 3, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // update shard count to 5 + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: true, zoneDistributionMode: "MULTI_ZONE"}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // clean up the resource + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, replicaCount: 1, shardCount: 5, preventDestroy: false, zoneDistributionMode: "MULTI_ZONE"}), + }, + }, + }) +} + +// Validate that redisConfigs is updated for the cluster +func TestAccRedisCluster_updateRedisConfigs(t *testing.T) { + t.Parallel() + + name := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckRedisClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + // create cluster + Config: createOrUpdateRedisCluster(&ClusterParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + redisConfigs: map[string]string{ + "maxmemory-policy": "volatile-ttl", + }}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // add a new redis config key-value pair and update existing redis config + Config: createOrUpdateRedisCluster(&ClusterParams{ + name: name, + shardCount: 3, + zoneDistributionMode: "MULTI_ZONE", + redisConfigs: map[string]string{ + "maxmemory-policy": "allkeys-lru", + "maxmemory-clients": "90%", + }}), + }, + { + ResourceName: "google_redis_cluster.test", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"psc_configs"}, + }, + { + // remove all redis configs + Config: createOrUpdateRedisCluster(&ClusterParams{name: name, shardCount: 3, zoneDistributionMode: "MULTI_ZONE"}), + }, + + }, + }) +} + +type ClusterParams struct { + name string + replicaCount int + shardCount int + preventDestroy bool + nodeType string + redisConfigs map[string]string + zoneDistributionMode string + zone string +} + +func createOrUpdateRedisCluster(params *ClusterParams) string { + lifecycleBlock := "" + if params.preventDestroy { + lifecycleBlock = ` + lifecycle { + prevent_destroy = true + }` + } + var strBuilder strings.Builder + for key, value := range params.redisConfigs { + strBuilder.WriteString(fmt.Sprintf("%s = \"%s\"\n", key, value)) + } + + zoneDistributionConfigBlock := `` + if params.zoneDistributionMode != "" { + zoneDistributionConfigBlock = fmt.Sprintf(` + zone_distribution_config { + mode = "%s" + zone = "%s" + } + `, params.zoneDistributionMode, params.zone) + } + + return fmt.Sprintf(` +resource "google_redis_cluster" "test" { + provider = google-beta + name = "%s" + replica_count = %d + shard_count = %d + node_type = "%s" + region = "us-central1" + psc_configs { + network = google_compute_network.producer_net.id + } + redis_configs = { + %s + } + %s + depends_on = [ + google_network_connectivity_service_connection_policy.default + ] + %s +} + +resource "google_network_connectivity_service_connection_policy" "default" { + provider = google-beta + name = "%s" + location = "us-central1" + service_class = "gcp-memorystore-redis" + description = "my basic service connection policy" + network = google_compute_network.producer_net.id + psc_config { + subnetworks = [google_compute_subnetwork.producer_subnet.id] + } +} + +resource "google_compute_subnetwork" "producer_subnet" { + provider = google-beta + name = "%s" + ip_cidr_range = "10.0.0.248/29" + region = "us-central1" + network = google_compute_network.producer_net.id +} + +resource "google_compute_network" "producer_net" { + provider = google-beta + name = "%s" + auto_create_subnetworks = false +} +`, params.name, params.replicaCount, params.shardCount, params.nodeType, strBuilder.String(), zoneDistributionConfigBlock, lifecycleBlock, params.name, params.name, params.name) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config.go.tmpl new file mode 100644 index 000000000000..ed5b54a9663d --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config.go.tmpl @@ -0,0 +1,43 @@ +package runtimeconfig + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleRuntimeconfigConfig() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceRuntimeconfigConfig().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleRuntimeconfigConfigRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleRuntimeconfigConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/configs/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceRuntimeconfigConfigRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config_test.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config_test.go.tmpl new file mode 100644 index 000000000000..6f6d76568e12 --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_config_test.go.tmpl @@ -0,0 +1,43 @@ +package runtimeconfig_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccRuntimeconfigConfigDatasource_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigDatasourceConfig(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_runtimeconfig_config.default", "google_runtimeconfig_config.default"), + ), + }, + }, + }) +} + +func testAccRuntimeconfigDatasourceConfig(suffix string) string { + return fmt.Sprintf(` +resource "google_runtimeconfig_config" "default" { + name = "runtime-%s" + description = "runtime-%s" +} + +data "google_runtimeconfig_config" "default" { + name = google_runtimeconfig_config.default.name +} +`, suffix, suffix) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable.go.tmpl new file mode 100644 index 000000000000..e4be153f05c6 --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable.go.tmpl @@ -0,0 +1,46 @@ +package runtimeconfig + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func DataSourceGoogleRuntimeconfigVariable() *schema.Resource { + + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceRuntimeconfigVariable().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "parent") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleRuntimeconfigVariableRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleRuntimeconfigVariableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/configs/{{"{{"}}parent{{"}}"}}/variables/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + err = resourceRuntimeconfigVariableRead(d, meta) + if err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable_test.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable_test.go.tmpl new file mode 100644 index 000000000000..a61f996d87a0 --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/data_source_runtimeconfig_variable_test.go.tmpl @@ -0,0 +1,50 @@ +package runtimeconfig_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccRuntimeconfigVariableDatasource_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigDatasourceVariable(acctest.RandString(t, 10), acctest.RandString(t, 10), acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_runtimeconfig_variable.default", "google_runtimeconfig_variable.default"), + ), + }, + }, + }) +} + +func testAccRuntimeconfigDatasourceVariable(suffix string, name string, text string) string { + return fmt.Sprintf(` + resource "google_runtimeconfig_config" "default" { + name = "runtime-%s" + description = "runtime-%s" + } + + resource "google_runtimeconfig_variable" "default" { + parent = google_runtimeconfig_config.default.name + name = "%s" + text = "%s" + } + + data "google_runtimeconfig_variable" "default" { + name = google_runtimeconfig_variable.default.name + parent = google_runtimeconfig_config.default.name + } +`, suffix, suffix, name, text) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config.go.tmpl new file mode 100644 index 000000000000..956ee320b37a --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config.go.tmpl @@ -0,0 +1,207 @@ +package runtimeconfig + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + runtimeconfig "google.golang.org/api/runtimeconfig/v1beta1" +) + +var runtimeConfigFullName *regexp.Regexp = regexp.MustCompile("^projects/([^/]+)/configs/(.+)$") + +func ResourceRuntimeconfigConfig() *schema.Resource { + return &schema.Resource{ + Create: resourceRuntimeconfigConfigCreate, + Read: resourceRuntimeconfigConfigRead, + Update: resourceRuntimeconfigConfigUpdate, + Delete: resourceRuntimeconfigConfigDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRuntimeconfigConfigImport, + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateRegexp("[0-9A-Za-z](?:[_.A-Za-z0-9-]{0,62}[_.A-Za-z0-9])?"), + Description: `The name of the runtime config.`, + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description to associate with the runtime config.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceRuntimeconfigConfigCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + fullName := resourceRuntimeconfigFullName(project, name) + runtimeConfig := runtimeconfig.RuntimeConfig{ + Name: fullName, + } + + if val, ok := d.GetOk("description"); ok { + runtimeConfig.Description = val.(string) + } + + _, err = config.NewRuntimeconfigClient(userAgent).Projects.Configs.Create("projects/"+project, &runtimeConfig).Do() + + if err != nil { + return err + } + d.SetId(fullName) + + return nil +} + +func resourceRuntimeconfigConfigRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + fullName := d.Id() + runConfig, err := config.NewRuntimeconfigClient(userAgent).Projects.Configs.Get(fullName).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("RuntimeConfig %q", d.Id())) + } + + project, name, err := resourceRuntimeconfigParseFullName(runConfig.Name) + if err != nil { + return err + } + // Check to see if project matches our current defined value - if it doesn't, we'll explicitly set it + curProject, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + if project != curProject { + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + } + + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("description", runConfig.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + + return nil +} + +func resourceRuntimeconfigConfigUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Update works more like an 'overwrite' method - we build a new runtimeconfig.RuntimeConfig struct and it becomes + // the new config. This means our Update logic looks an awful lot like Create (and hence, doesn't use + // schema.ResourceData.hasChange()). + fullName := d.Id() + runtimeConfig := runtimeconfig.RuntimeConfig{ + Name: fullName, + } + if v, ok := d.GetOk("description"); ok { + runtimeConfig.Description = v.(string) + } + + _, err = config.NewRuntimeconfigClient(userAgent).Projects.Configs.Update(fullName, &runtimeConfig).Do() + if err != nil { + return err + } + return nil +} + +func resourceRuntimeconfigConfigDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + fullName := d.Id() + + _, err = config.NewRuntimeconfigClient(userAgent).Projects.Configs.Delete(fullName).Do() + if err != nil { + return err + } + d.SetId("") + return nil +} + +func resourceRuntimeconfigConfigImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/configs/(?P[^/]+)", "(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/configs/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +// resourceRuntimeconfigFullName turns a given project and a 'short name' for a runtime config into a full name +// (e.g. projects/my-project/configs/my-config). +func resourceRuntimeconfigFullName(project, name string) string { + return fmt.Sprintf("projects/%s/configs/%s", project, name) +} + +// resourceRuntimeconfigParseFullName parses a full name (e.g. projects/my-project/configs/my-config) by parsing out the +// project and the short name. Returns "", "", nil upon error. +func resourceRuntimeconfigParseFullName(fullName string) (project, name string, err error) { + matches := runtimeConfigFullName.FindStringSubmatch(fullName) + if matches == nil { + return "", "", fmt.Errorf("Given full name doesn't match expected regexp; fullname = '%s'", fullName) + } + return matches[1], matches[2], nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config_test.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config_test.go.tmpl new file mode 100644 index 000000000000..24d55ab4bbda --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_config_test.go.tmpl @@ -0,0 +1,179 @@ +package runtimeconfig_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + runtimeconfig "google.golang.org/api/runtimeconfig/v1beta1" +) + +func TestAccRuntimeconfigConfig_basic(t *testing.T) { + t.Parallel() + + var runtimeConfig runtimeconfig.RuntimeConfig + configName := fmt.Sprintf("runtimeconfig-test-%s", acctest.RandString(t, 10)) + description := "my test description" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRuntimeconfigConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigConfig_basicDescription(configName, description), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeConfigExists( + t, "google_runtimeconfig_config.foobar", &runtimeConfig), + testAccCheckRuntimeConfigDescription(&runtimeConfig, description), + ), + }, + { + ResourceName: "google_runtimeconfig_config.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccRuntimeconfig_update(t *testing.T) { + t.Parallel() + + var runtimeConfig runtimeconfig.RuntimeConfig + configName := fmt.Sprintf("runtimeconfig-test-%s", acctest.RandString(t, 10)) + firstDescription := "my test description" + secondDescription := "my updated test description" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRuntimeconfigConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigConfig_basicDescription(configName, firstDescription), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeConfigExists( + t, "google_runtimeconfig_config.foobar", &runtimeConfig), + testAccCheckRuntimeConfigDescription(&runtimeConfig, firstDescription), + ), + }, { + Config: testAccRuntimeconfigConfig_basicDescription(configName, secondDescription), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeConfigExists( + t, "google_runtimeconfig_config.foobar", &runtimeConfig), + testAccCheckRuntimeConfigDescription(&runtimeConfig, secondDescription), + ), + }, + }, + }) +} + +func TestAccRuntimeconfig_updateEmptyDescription(t *testing.T) { + t.Parallel() + + var runtimeConfig runtimeconfig.RuntimeConfig + configName := fmt.Sprintf("runtimeconfig-test-%s", acctest.RandString(t, 10)) + description := "my test description" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRuntimeconfigConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigConfig_basicDescription(configName, description), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeConfigExists( + t, "google_runtimeconfig_config.foobar", &runtimeConfig), + testAccCheckRuntimeConfigDescription(&runtimeConfig, description), + ), + }, { + Config: testAccRuntimeconfigConfig_emptyDescription(configName), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeConfigExists( + t, "google_runtimeconfig_config.foobar", &runtimeConfig), + testAccCheckRuntimeConfigDescription(&runtimeConfig, ""), + ), + }, + }, + }) +} + +func testAccCheckRuntimeConfigDescription(runtimeConfig *runtimeconfig.RuntimeConfig, description string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if runtimeConfig.Description != description { + return fmt.Errorf("On runtime config '%s', expected description '%s', but found '%s'", + runtimeConfig.Name, description, runtimeConfig.Description) + } + return nil + } +} + +func testAccCheckRuntimeConfigExists(t *testing.T, resourceName string, runtimeConfig *runtimeconfig.RuntimeConfig) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewRuntimeconfigClient(config.UserAgent).Projects.Configs.Get(rs.Primary.ID).Do() + if err != nil { + return err + } + + *runtimeConfig = *found + + return nil + } +} + +func testAccCheckRuntimeconfigConfigDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_runtimeconfig_config" { + continue + } + + _, err := config.NewRuntimeconfigClient(config.UserAgent).Projects.Configs.Get(rs.Primary.ID).Do() + + if err == nil { + return fmt.Errorf("Runtimeconfig still exists") + } + } + + return nil + } +} + +func testAccRuntimeconfigConfig_basicDescription(name, description string) string { + return fmt.Sprintf(` +resource "google_runtimeconfig_config" "foobar" { + name = "%s" + description = "%s" +} +`, name, description) +} + +func testAccRuntimeconfigConfig_emptyDescription(name string) string { + return fmt.Sprintf(` +resource "google_runtimeconfig_config" "foobar" { + name = "%s" +} +`, name) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable.go.tmpl new file mode 100644 index 000000000000..78fb3b12229f --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable.go.tmpl @@ -0,0 +1,253 @@ +package runtimeconfig + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "regexp" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + runtimeconfig "google.golang.org/api/runtimeconfig/v1beta1" +) + +func ResourceRuntimeconfigVariable() *schema.Resource { + return &schema.Resource{ + Create: resourceRuntimeconfigVariableCreate, + Read: resourceRuntimeconfigVariableRead, + Update: resourceRuntimeconfigVariableUpdate, + Delete: resourceRuntimeconfigVariableDelete, + + Importer: &schema.ResourceImporter{ + State: resourceRuntimeconfigVariableImport, + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the variable to manage. Note that variable names can be hierarchical using slashes (e.g. "prod-variables/hostname").`, + }, + + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the RuntimeConfig resource containing this variable.`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "value": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ExactlyOneOf: []string{"text", "value"}, + }, + + "text": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + ExactlyOneOf: []string{"text", "value"}, + }, + + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `The timestamp in RFC3339 UTC "Zulu" format, accurate to nanoseconds, representing when the variable was last updated. Example: "2016-10-09T12:33:37.578138407Z".`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceRuntimeconfigVariableCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + variable, parent, err := newRuntimeconfigVariableFromResourceData(d, project) + if err != nil { + return err + } + + createdVariable, err := config.NewRuntimeconfigClient(userAgent).Projects.Configs.Variables.Create(resourceRuntimeconfigFullName(project, parent), variable).Do() + if err != nil { + return err + } + d.SetId(createdVariable.Name) + + return setRuntimeConfigVariableToResourceData(d, *createdVariable) +} + +func resourceRuntimeconfigVariableRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + fullName := d.Id() + createdVariable, err := config.NewRuntimeconfigClient(userAgent).Projects.Configs.Variables.Get(fullName).Do() + if err != nil { + return err + } + + return setRuntimeConfigVariableToResourceData(d, *createdVariable) +} + +func resourceRuntimeconfigVariableUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Update works more like an 'overwrite' method - we build a new runtimeconfig.Variable struct and it becomes the + // new config. This means our Update logic looks an awful lot like Create (and hence, doesn't use + // schema.ResourceData.hasChange()). + + variable, _, err := newRuntimeconfigVariableFromResourceData(d, project) + if err != nil { + return err + } + + createdVariable, err := config.NewRuntimeconfigClient(userAgent).Projects.Configs.Variables.Update(variable.Name, variable).Do() + if err != nil { + return err + } + + return setRuntimeConfigVariableToResourceData(d, *createdVariable) +} + +func resourceRuntimeconfigVariableDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + fullName := d.Id() + + _, err = config.NewRuntimeconfigClient(userAgent).Projects.Configs.Variables.Delete(fullName).Do() + if err != nil { + return err + } + d.SetId("") + + return nil +} + +func resourceRuntimeconfigVariableImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"projects/(?P[^/]+)/configs/(?P[^/]+)/variables/(?P[^/]+)", "(?P[^/]+)/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/configs/{{"{{"}}parent{{"}}"}}/variables/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +// resourceRuntimeconfigVariableFullName turns a given project, runtime config name, and a 'short name' for a runtime +// config variable into a full name (e.g. projects/my-project/configs/my-config/variables/my-variable). +func resourceRuntimeconfigVariableFullName(project, config, name string) string { + return fmt.Sprintf("projects/%s/configs/%s/variables/%s", project, config, name) +} + +// resourceRuntimeconfigVariableParseFullName parses a full name +// (e.g. projects/my-project/configs/my-config/variables/my-variable) by parsing out the +// project, runtime config name, and the short name. Returns "", "", "", err upon error. +func resourceRuntimeconfigVariableParseFullName(fullName string) (project, config, name string, err error) { + re := regexp.MustCompile("^projects/([^/]+)/configs/([^/]+)/variables/(.+)$") + matches := re.FindStringSubmatch(fullName) + if matches == nil { + return "", "", "", fmt.Errorf("Given full name doesn't match expected regexp; fullname = '%s'", fullName) + } + return matches[1], matches[2], matches[3], nil +} + +// newRuntimeconfigVariableFromResourceData builds a new runtimeconfig.Variable struct from the data stored in a +// schema.ResourceData. Also returns the full name of the parent. Returns nil, "", err upon error. +func newRuntimeconfigVariableFromResourceData(d *schema.ResourceData, project string) (variable *runtimeconfig.Variable, parent string, err error) { + + text := d.Get("text") + value := d.Get("value") + + // TODO(selmanj) here we assume it's a simple name, not a full name. Should probably support full name as well + parent = d.Get("parent").(string) + name := d.Get("name").(string) + + fullName := resourceRuntimeconfigVariableFullName(project, parent, name) + + variable = &runtimeconfig.Variable{ + Name: fullName, + } + + if text != "" { + variable.Text = text.(string) + } else { + variable.Value = value.(string) + } + + return variable, parent, nil +} + +// setRuntimeConfigVariableToResourceData stores a provided runtimeconfig.Variable struct inside a schema.ResourceData. +func setRuntimeConfigVariableToResourceData(d *schema.ResourceData, variable runtimeconfig.Variable) error { + varProject, parent, name, err := resourceRuntimeconfigVariableParseFullName(variable.Name) + if err != nil { + return err + } + if err := d.Set("name", name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + if err := d.Set("parent", parent); err != nil { + return fmt.Errorf("Error setting parent: %s", err) + } + if err := d.Set("project", varProject); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("value", variable.Value); err != nil { + return fmt.Errorf("Error setting value: %s", err) + } + if err := d.Set("text", variable.Text); err != nil { + return fmt.Errorf("Error setting text: %s", err) + } + if err := d.Set("update_time", variable.UpdateTime); err != nil { + return fmt.Errorf("Error setting update_time: %s", err) + } + + return nil +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable_test.go.tmpl b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable_test.go.tmpl new file mode 100644 index 000000000000..191290c86cb6 --- /dev/null +++ b/mmv1/third_party/terraform/services/runtimeconfig/go/resource_runtimeconfig_variable_test.go.tmpl @@ -0,0 +1,243 @@ +package runtimeconfig_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + runtimeconfig "google.golang.org/api/runtimeconfig/v1beta1" +) + +func TestAccRuntimeconfigVariable_basic(t *testing.T) { + t.Parallel() + + var variable runtimeconfig.Variable + + varName := fmt.Sprintf("variable-test-%s", acctest.RandString(t, 10)) + varText := "this is my test value" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRuntimeconfigVariableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigVariable_basicText(acctest.RandString(t, 10), varName, varText), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeconfigVariableExists( + t, "google_runtimeconfig_variable.foobar", &variable), + testAccCheckRuntimeconfigVariableText(&variable, varText), + testAccCheckRuntimeconfigVariableUpdateTime("google_runtimeconfig_variable.foobar"), + ), + }, + { + ResourceName: "google_runtimeconfig_variable.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccRuntimeconfigVariable_basicUpdate(t *testing.T) { + t.Parallel() + + var variable runtimeconfig.Variable + + configName := fmt.Sprintf("some-name-%s", acctest.RandString(t, 10)) + varName := fmt.Sprintf("variable-test-%s", acctest.RandString(t, 10)) + varText := "this is my test value" + varText2 := "this is my updated value" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRuntimeconfigVariableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigVariable_basicTextUpdate(configName, varName, varText), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeconfigVariableExists( + t, "google_runtimeconfig_variable.foobar", &variable), + testAccCheckRuntimeconfigVariableText(&variable, varText), + ), + }, { + Config: testAccRuntimeconfigVariable_basicTextUpdate(configName, varName, varText2), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeconfigVariableExists( + t, "google_runtimeconfig_variable.foobar", &variable), + testAccCheckRuntimeconfigVariableText(&variable, varText2), + ), + }, + }, + }) +} + +func TestAccRuntimeconfigVariable_basicValue(t *testing.T) { + t.Parallel() + + var variable runtimeconfig.Variable + + varName := fmt.Sprintf("variable-test-%s", acctest.RandString(t, 10)) + varValue := "Zm9vYmFyCg==" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckRuntimeconfigVariableDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccRuntimeconfigVariable_basicValue(acctest.RandString(t, 10), varName, varValue), + Check: resource.ComposeTestCheckFunc( + testAccCheckRuntimeconfigVariableExists( + t, "google_runtimeconfig_variable.foobar", &variable), + testAccCheckRuntimeconfigVariableValue(&variable, varValue), + testAccCheckRuntimeconfigVariableUpdateTime("google_runtimeconfig_variable.foobar"), + ), + }, + { + ResourceName: "google_runtimeconfig_variable.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccCheckRuntimeconfigVariableExists(t *testing.T, resourceName string, variable *runtimeconfig.Variable) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewRuntimeconfigClient(config.UserAgent).Projects.Configs.Variables.Get(rs.Primary.ID).Do() + if err != nil { + return err + } + + *variable = *found + + return nil + } +} + +func testAccCheckRuntimeconfigVariableUpdateTime(resourceName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + updateTime := rs.Primary.Attributes["update_time"] + if updateTime == "" { + return fmt.Errorf("No update time set for resource %s", resourceName) + } + + // Make sure it's a valid rfc 3339 date + _, err := time.Parse(time.RFC3339, updateTime) + if err != nil { + return fmt.Errorf("Error while parsing update time for resource %s: %s", resourceName, err.Error()) + } + + return nil + } +} + +func testAccCheckRuntimeconfigVariableText(variable *runtimeconfig.Variable, text string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if variable.Text != text { + return fmt.Errorf("Variable %s had incorrect text: expected '%s' but found '%s'", variable.Name, + text, variable.Text) + } + + return nil + } +} + +func testAccCheckRuntimeconfigVariableValue(variable *runtimeconfig.Variable, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if variable.Value != value { + return fmt.Errorf("Variable %s had incorrect value: expected '%s' but found '%s'", variable.Name, + value, variable.Value) + } + + return nil + } +} + +func testAccCheckRuntimeconfigVariableDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_runtimeconfig_variable" { + continue + } + + _, err := config.NewRuntimeconfigClient(config.UserAgent).Projects.Configs.Variables.Get(rs.Primary.ID).Do() + + if err == nil { + return fmt.Errorf("Runtimeconfig variable still exists") + } + } + + return nil + } +} + +func testAccRuntimeconfigVariable_basicText(suffix, name, text string) string { + return fmt.Sprintf(` +resource "google_runtimeconfig_config" "foobar" { + name = "some-config-%s" +} + +resource "google_runtimeconfig_variable" "foobar" { + parent = google_runtimeconfig_config.foobar.name + name = "%s" + text = "%s" +} +`, suffix, name, text) +} + +func testAccRuntimeconfigVariable_basicTextUpdate(configName, name, text string) string { + return fmt.Sprintf(` +resource "google_runtimeconfig_config" "foobar" { + name = "%s" +} + +resource "google_runtimeconfig_variable" "foobar" { + parent = google_runtimeconfig_config.foobar.name + name = "%s" + text = "%s" +} +`, configName, name, text) +} + +func testAccRuntimeconfigVariable_basicValue(suffix, name, value string) string { + return fmt.Sprintf(` +resource "google_runtimeconfig_config" "foobar" { + name = "some-config-%s" +} + +resource "google_runtimeconfig_variable" "foobar" { + parent = google_runtimeconfig_config.foobar.name + name = "%s" + value = "%s" +} +`, suffix, name, value) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/secretmanager/go/iam_secret_manager_secret_test.go b/mmv1/third_party/terraform/services/secretmanager/go/iam_secret_manager_secret_test.go new file mode 100644 index 000000000000..ee567f13d1d0 --- /dev/null +++ b/mmv1/third_party/terraform/services/secretmanager/go/iam_secret_manager_secret_test.go @@ -0,0 +1,118 @@ +package secretmanager_test + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccSecretManagerSecretIam_iamMemberConditionUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/secretmanager.secretAccessor", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecretIam_iamMemberCondition_basic(context), + }, + { + ResourceName: "google_secret_manager_secret_iam_member.default", + ImportStateId: fmt.Sprintf("projects/%s/secrets/%s %s serviceAccount:%s %s", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-secret-%s", context["random_suffix"]), context["role"], fmt.Sprintf("tf-test-sa-%s@%s.iam.gserviceaccount.com", context["random_suffix"], envvar.GetTestProjectFromEnv()), fmt.Sprintf("tf-test-condition-%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSecretManagerSecretIam_iamMemberCondition_update(context), + }, + { + ResourceName: "google_secret_manager_secret_iam_member.default", + ImportStateId: fmt.Sprintf("projects/%s/secrets/%s %s serviceAccount:%s %s", envvar.GetTestProjectFromEnv(), fmt.Sprintf("tf-test-secret-%s", context["random_suffix"]), context["role"], fmt.Sprintf("tf-test-sa-%s@%s.iam.gserviceaccount.com", context["random_suffix"], envvar.GetTestProjectFromEnv()), fmt.Sprintf("tf-test-condition-new-%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccSecretManagerSecretIam_iamMemberCondition_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "default" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "Secret manager IAM testing account" +} + +resource "google_secret_manager_secret" "default" { + secret_id = "tf-test-secret-%{random_suffix}" + ttl = "3600s" + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_iam_member" "default" { + secret_id = google_secret_manager_secret.default.id + role = "%{role}" + member = "serviceAccount:${google_service_account.default.email}" + condition { + title = "tf-test-condition-%{random_suffix}" + description = "test condition" + expression = "request.time < timestamp(\"2022-03-01T00:00:00Z\")" + } +} +`, context) +} + +func testAccSecretManagerSecretIam_iamMemberCondition_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_service_account" "default" { + account_id = "tf-test-sa-%{random_suffix}" + display_name = "Secret manager IAM testing account" +} + +resource "google_secret_manager_secret" "default" { + secret_id = "tf-test-secret-%{random_suffix}" + ttl = "3600s" + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_iam_member" "default" { + secret_id = google_secret_manager_secret.default.id + role = "%{role}" + member = "serviceAccount:${google_service_account.default.email}" + condition { + title = "tf-test-condition-new-%{random_suffix}" + description = "test new condition" + expression = "request.time < timestamp(\"2024-03-01T00:00:00Z\")" + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_test.go b/mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_test.go new file mode 100644 index 000000000000..c0ce2fcda347 --- /dev/null +++ b/mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_test.go @@ -0,0 +1,1222 @@ +package secretmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccSecretManagerSecret_import(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_basic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_cmek(t *testing.T) { + t.Parallel() + + kmscentral := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + kmseast := acctest.BootstrapKMSKeyInLocation(t, "us-east1") + context1 := map[string]interface{}{ + "pid": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "kms_key_name_central": kmscentral.CryptoKey.Name, + "kms_key_name_east": kmseast.CryptoKey.Name, + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretMangerSecret_cmek(context1), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_annotationsUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_annotationsBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-with-annotations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccSecretManagerSecret_annotationsUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-with-annotations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels", "annotations"}, + }, + { + Config: testAccSecretManagerSecret_annotationsBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-with-annotations", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels", "annotations"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_versionAliasesUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_basicWithSecretVersions(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_versionAliasesBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_versionAliasesUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_basicWithSecretVersions(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_userManagedCmekUpdate(t *testing.T) { + t.Parallel() + + kmscentral := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-secret-manager-managed-central-key1") + kmseast := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-east1", "tf-secret-manager-managed-east-key1") + kmscentralother := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "us-central1", "tf-secret-manager-managed-central-key2") + context := map[string]interface{}{ + "pid": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "kms_key_name_central": kmscentral.CryptoKey.Name, + "kms_key_name_east": kmseast.CryptoKey.Name, + "kms_key_name_central_other": kmscentralother.CryptoKey.Name, + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretMangerSecret_userManagedCmekBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretMangerSecret_userManagedCmekUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretMangerSecret_userManagedCmekUpdate2(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretMangerSecret_userManagedCmekBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_automaticCmekUpdate(t *testing.T) { + t.Parallel() + + suffix := acctest.RandString(t, 10) + key1 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "global", "tf-secret-manager-automatic-key1") + key2 := acctest.BootstrapKMSKeyWithPurposeInLocationAndName(t, "ENCRYPT_DECRYPT", "global", "tf-secret-manager-automatic-key2") + context := map[string]interface{}{ + "pid": envvar.GetTestProjectFromEnv(), + "random_suffix": suffix, + "kms_key_name_1": key1.CryptoKey.Name, + "kms_key_name_2": key2.CryptoKey.Name, + } + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretMangerSecret_automaticCmekBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretMangerSecret_automaticCmekUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretMangerSecret_automaticCmekUpdate2(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretMangerSecret_automaticCmekBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_rotationPeriodUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "timestamp": "2122-11-26T19:58:16Z", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_withoutRotationPeriod(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_rotationPeriodBasic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_rotationPeriodUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + { + Config: testAccSecretManagerSecret_withoutRotationPeriod(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_ttlUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_withoutTtl(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_basic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_ttlUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_withoutTtl(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_versionDestroyTtlUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_withoutVersionDestroyTtl(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_versionDestroyTtlUpdate(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_withoutVersionDestroyTtl(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccSecretManagerSecret_updateBetweenTtlAndExpireTime(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecret_basic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_expireTime(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + { + Config: testAccSecretManagerSecret_basic(context), + }, + { + ResourceName: "google_secret_manager_secret.secret-basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ttl", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccSecretManagerSecret_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } + + ttl = "3600s" + +} +`, context) +} + +func testAccSecretMangerSecret_cmek(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_project_iam_member" "kms-secret-binding" { + project = data.google_project.project.project_id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_central}" + } + } + replicas { + location = "us-east1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_east}" + } + } + + } + } + project = google_project_iam_member.kms-secret-binding.project +} +`, context) +} + +func testAccSecretManagerSecret_annotationsBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-with-annotations" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + annotations = { + key1 = "someval" + key2 = "someval2" + key3 = "someval3" + key4 = "someval4" + key5 = "someval5" + } + + replication { + auto {} + } +} +`, context) +} + +func testAccSecretManagerSecret_annotationsUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-with-annotations" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + annotations = { + key1 = "someval" + key2update = "someval2" + key3 = "someval3update" + key4update = "someval4update" + } + + replication { + auto {} + } +} +`, context) +} + +func testAccSecretManagerSecret_basicWithSecretVersions(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret-version-1" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-1" +} + +resource "google_secret_manager_secret_version" "secret-version-2" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-2" +} + +resource "google_secret_manager_secret_version" "secret-version-3" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-3" +} + +resource "google_secret_manager_secret_version" "secret-version-4" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-4" +} +`, context) +} + +func testAccSecretManagerSecret_versionAliasesBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + version_aliases = { + firstalias = "1", + secondalias = "2", + thirdalias = "3", + otheralias = "2", + somealias = "3" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret-version-1" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-1" +} + +resource "google_secret_manager_secret_version" "secret-version-2" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-2" +} + +resource "google_secret_manager_secret_version" "secret-version-3" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-3" +} + +resource "google_secret_manager_secret_version" "secret-version-4" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-4" +} +`, context) +} + +func testAccSecretManagerSecret_versionAliasesUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + version_aliases = { + firstalias = "1", + secondaliasupdated = "2", + otheralias = "1", + somealias = "3", + fourthalias = "4" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} + +resource "google_secret_manager_secret_version" "secret-version-1" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-1" +} + +resource "google_secret_manager_secret_version" "secret-version-2" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-2" +} + +resource "google_secret_manager_secret_version" "secret-version-3" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-3" +} + +resource "google_secret_manager_secret_version" "secret-version-4" { + secret = google_secret_manager_secret.secret-basic.id + + secret_data = "some-secret-data-%{random_suffix}-4" +} +`, context) +} + +func testAccSecretMangerSecret_userManagedCmekBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-1" { + crypto_key_id = "%{kms_key_name_central}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-2" { + crypto_key_id = "%{kms_key_name_central_other}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-east-binding" { + crypto_key_id = "%{kms_key_name_east}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-central-binding-1, + google_kms_crypto_key_iam_member.kms-central-binding-2, + google_kms_crypto_key_iam_member.kms-east-binding, + ] +} +`, context) +} + +func testAccSecretMangerSecret_userManagedCmekUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-1" { + crypto_key_id = "%{kms_key_name_central}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-2" { + crypto_key_id = "%{kms_key_name_central_other}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-east-binding" { + crypto_key_id = "%{kms_key_name_east}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_central}" + } + } + replicas { + location = "us-east1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_east}" + } + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-central-binding-1, + google_kms_crypto_key_iam_member.kms-central-binding-2, + google_kms_crypto_key_iam_member.kms-east-binding, + ] +} +`, context) +} + +func testAccSecretMangerSecret_userManagedCmekUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-1" { + crypto_key_id = "%{kms_key_name_central}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-central-binding-2" { + crypto_key_id = "%{kms_key_name_central_other}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-east-binding" { + crypto_key_id = "%{kms_key_name_east}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + user_managed { + replicas { + location = "us-central1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_central_other}" + } + } + replicas { + location = "us-east1" + customer_managed_encryption { + kms_key_name = "%{kms_key_name_east}" + } + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-central-binding-1, + google_kms_crypto_key_iam_member.kms-central-binding-2, + google_kms_crypto_key_iam_member.kms-east-binding, + ] +} +`, context) +} + +func testAccSecretMangerSecret_automaticCmekBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-secret-binding-1" { + crypto_key_id = "%{kms_key_name_1}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-secret-binding-2" { + crypto_key_id = "%{kms_key_name_2}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + auto {} + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-secret-binding-1, + google_kms_crypto_key_iam_member.kms-secret-binding-2, + ] +} +`, context) +} + +func testAccSecretMangerSecret_automaticCmekUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-secret-binding-1" { + crypto_key_id = "%{kms_key_name_1}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-secret-binding-2" { + crypto_key_id = "%{kms_key_name_2}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + auto { + customer_managed_encryption { + kms_key_name = "%{kms_key_name_1}" + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-secret-binding-1, + google_kms_crypto_key_iam_member.kms-secret-binding-2, + ] +} +`, context) +} + +func testAccSecretMangerSecret_automaticCmekUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + project_id = "%{pid}" +} +resource "google_kms_crypto_key_iam_member" "kms-secret-binding-1" { + crypto_key_id = "%{kms_key_name_1}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_kms_crypto_key_iam_member" "kms-secret-binding-2" { + crypto_key_id = "%{kms_key_name_2}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" +} +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + replication { + auto { + customer_managed_encryption { + kms_key_name = "%{kms_key_name_2}" + } + } + } + depends_on = [ + google_kms_crypto_key_iam_member.kms-secret-binding-1, + google_kms_crypto_key_iam_member.kms-secret-binding-2, + ] +} +`, context) +} + +func testAccSecretManagerSecret_withoutRotationPeriod(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_rotationPeriodBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + topics { + name = google_pubsub_topic.topic.id + } + + rotation { + rotation_period = "3600s" + next_rotation_time = "%{timestamp}" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_rotationPeriodUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" {} + +resource "google_pubsub_topic_iam_member" "secrets_manager_access" { + topic = google_pubsub_topic.topic.name + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-secretmanager.iam.gserviceaccount.com" + role = "roles/pubsub.publisher" +} + +resource "google_pubsub_topic" "topic" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + topics { + name = google_pubsub_topic.topic.id + } + + rotation { + rotation_period = "3700s" + next_rotation_time = "%{timestamp}" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + } + } + + depends_on = [ + google_pubsub_topic_iam_member.secrets_manager_access, + ] +} +`, context) +} + +func testAccSecretManagerSecret_withoutTtl(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} +`, context) +} + +func testAccSecretManagerSecret_ttlUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } + + ttl = "7200s" + +} +`, context) +} + +func testAccSecretManagerSecret_withoutVersionDestroyTtl(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } +} +`, context) +} + +func testAccSecretManagerSecret_versionDestroyTtlUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } + + version_destroy_ttl = "86400s" + +} +`, context) +} + +func testAccSecretManagerSecret_expireTime(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + user_managed { + replicas { + location = "us-central1" + } + replicas { + location = "us-east1" + } + } + } + + expire_time = "2122-09-26T10:55:55.163240682Z" + +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_version_test.go b/mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_version_test.go new file mode 100644 index 000000000000..3a89b8473a2e --- /dev/null +++ b/mmv1/third_party/terraform/services/secretmanager/go/resource_secret_manager_secret_version_test.go @@ -0,0 +1,97 @@ +package secretmanager_test + +import ( + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccSecretManagerSecretVersion_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecretManagerSecretVersionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecretManagerSecretVersion_basic(context), + }, + { + ResourceName: "google_secret_manager_secret_version.secret-version-basic", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSecretManagerSecretVersion_disable(context), + }, + { + ResourceName: "google_secret_manager_secret_version.secret-version-basic", + ImportState: true, + ImportStateVerify: true, + // at this point the secret data is disabled and so reading the data on import will + // give an empty string + ImportStateVerifyIgnore: []string{"secret_data"}, + }, + { + Config: testAccSecretManagerSecretVersion_basic(context), + }, + { + ResourceName: "google_secret_manager_secret_version.secret-version-basic", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccSecretManagerSecretVersion_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-version-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.name + + secret_data = "my-tf-test-secret%{random_suffix}" + enabled = true +} +`, context) +} + +func testAccSecretManagerSecretVersion_disable(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_secret_manager_secret" "secret-basic" { + secret_id = "tf-test-secret-version-%{random_suffix}" + + labels = { + label = "my-label" + } + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "secret-version-basic" { + secret = google_secret_manager_secret.secret-basic.name + + secret_data = "my-tf-test-secret%{random_suffix}" + enabled = false +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/securityscanner/go/resource_security_scanner_scan_config_test.go.tmpl b/mmv1/third_party/terraform/services/securityscanner/go/resource_security_scanner_scan_config_test.go.tmpl new file mode 100644 index 000000000000..2dcca59bf305 --- /dev/null +++ b/mmv1/third_party/terraform/services/securityscanner/go/resource_security_scanner_scan_config_test.go.tmpl @@ -0,0 +1,76 @@ +package securityscanner_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccSecurityScannerScanConfig_scanConfigUpdate(t *testing.T) { + t.Parallel() + + firstAddressSuffix := acctest.RandString(t, 10) + secondAddressSuffix := acctest.RandString(t, 10) + context := map[string]interface{}{ + "random_suffix": firstAddressSuffix, + "random_suffix2": secondAddressSuffix, + "static_address_name": "scanner_static_ip", + "user_agent": "CHROME_LINUX", + "export": "ENABLED", + "max_qps": 10, + } + updateContext := map[string]interface{}{ + "random_suffix": firstAddressSuffix, + "random_suffix2": secondAddressSuffix, + "static_address_name": "scanner_static_ip_update", + "user_agent": "CHROME_ANDROID", + "export": "DISABLED", + "max_qps": 20, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecurityScannerScanConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityScannerScanConfig(context), + }, + { + ResourceName: "google_security_scanner_scan_config.terraform-scan-config", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccSecurityScannerScanConfig(updateContext), + }, + }, + }) +} + +func testAccSecurityScannerScanConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_address" "scanner_static_ip" { + name = "tf-test-scan-static-ip-%{random_suffix}" +} + +resource "google_compute_address" "scanner_static_ip_update" { + name = "tf-test-scan-static-ip-%{random_suffix2}" +} + +resource "google_security_scanner_scan_config" "terraform-scan-config" { + display_name = "terraform-scan-config-%{random_suffix}" + max_qps = %{max_qps} + starting_urls = ["http://${google_compute_address.%{static_address_name}.address}"] + target_platforms = ["COMPUTE"] + user_agent = "%{user_agent}" + export_to_security_command_center = "%{export}" +} +`, context) +} +{{- else }} +// Magic Modules doesn't let us remove files - blank out beta-only common-compile files for now. +{{- end }} + diff --git a/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_endpoint_test.go.tmpl b/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_endpoint_test.go.tmpl new file mode 100644 index 000000000000..337184b0e14d --- /dev/null +++ b/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_endpoint_test.go.tmpl @@ -0,0 +1,105 @@ +package servicedirectory_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccServiceDirectoryEndpoint_serviceDirectoryEndpointUpdateExample(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + location := "us-central1" + testId := fmt.Sprintf("tf-test-example-endpoint%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckServiceDirectoryEndpointDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceDirectoryEndpoint_basic(location, testId), + }, + { + ResourceName: "google_service_directory_endpoint.example", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_directory_endpoint.example", + // {{"{{"}}project{{"}}"}}/{{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}}/{{"{{"}}service_id{{"}}"}}/{{"{{"}}endpoint_id{{"}}"}} + ImportStateId: fmt.Sprintf("%s/%s/%s/%s/%s", project, location, testId, testId, testId), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_directory_endpoint.example", + // {{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}}/{{"{{"}}service_id{{"}}"}}/{{"{{"}}endpoint_id{{"}}"}} + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", location, testId, testId, testId), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccServiceDirectoryEndpoint_update(location, testId), + }, + { + ResourceName: "google_service_directory_endpoint.example", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccServiceDirectoryEndpoint_basic(location, testId string) string { + return fmt.Sprintf(` +resource "google_service_directory_namespace" "example" { + namespace_id = "%s" + location = "%s" +} + +resource "google_service_directory_service" "example" { + service_id = "%s" + namespace = google_service_directory_namespace.example.id +} + +resource "google_service_directory_endpoint" "example" { + endpoint_id = "%s" + service = google_service_directory_service.example.id +} +`, testId, location, testId, testId) +} + +func testAccServiceDirectoryEndpoint_update(location, testId string) string { + return fmt.Sprintf(` +resource "google_service_directory_namespace" "example" { + namespace_id = "%s" + location = "%s" +} + +resource "google_service_directory_service" "example" { + service_id = "%s" + namespace = google_service_directory_namespace.example.id +} + +resource "google_service_directory_endpoint" "example" { + endpoint_id = "%s" + service = google_service_directory_service.example.id + + metadata = { + stage = "prod" + region = "us-central1" + } + + address = "1.2.3.4" + port = 5353 +} +`, testId, location, testId, testId) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_namespace_test.go.tmpl b/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_namespace_test.go.tmpl new file mode 100644 index 000000000000..4880ece2ac8f --- /dev/null +++ b/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_namespace_test.go.tmpl @@ -0,0 +1,83 @@ +package servicedirectory_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccServiceDirectoryNamespace_serviceDirectoryNamespaceUpdateExample(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + location := "us-central1" + testId := fmt.Sprintf("tf-test-example-namespace%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckServiceDirectoryNamespaceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceDirectoryNamespace_basic(location, testId), + }, + { + ResourceName: "google_service_directory_namespace.example", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_directory_namespace.example", + // {{"{{"}}project{{"}}"}}/{{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}} + ImportStateId: fmt.Sprintf("%s/%s/%s", project, location, testId), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_directory_namespace.example", + // {{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}} + ImportStateId: fmt.Sprintf("%s/%s", location, testId), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccServiceDirectoryNamespace_update(location, testId), + }, + { + ResourceName: "google_service_directory_namespace.example", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccServiceDirectoryNamespace_basic(location, testId string) string { + return fmt.Sprintf(` +resource "google_service_directory_namespace" "example" { + namespace_id = "%s" + location = "%s" +} +`, testId, location) +} + +func testAccServiceDirectoryNamespace_update(location, testId string) string { + return fmt.Sprintf(` +resource "google_service_directory_namespace" "example" { + namespace_id = "%s" + location = "%s" + + labels = { + key = "value" + foo = "bar" + } +} +`, testId, location) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_service_test.go.tmpl b/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_service_test.go.tmpl new file mode 100644 index 000000000000..50889cc56357 --- /dev/null +++ b/mmv1/third_party/terraform/services/servicedirectory/go/resource_service_directory_service_test.go.tmpl @@ -0,0 +1,92 @@ +package servicedirectory_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccServiceDirectoryService_serviceDirectoryServiceUpdateExample(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + location := "us-central1" + testId := fmt.Sprintf("tf-test-example-service%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckServiceDirectoryServiceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceDirectoryService_basic(location, testId), + }, + { + ResourceName: "google_service_directory_service.example", + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_directory_service.example", + // {{"{{"}}project{{"}}"}}/{{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}}/{{"{{"}}service_id{{"}}"}} + ImportStateId: fmt.Sprintf("%s/%s/%s/%s", project, location, testId, testId), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_service_directory_service.example", + // {{"{{"}}location{{"}}"}}/{{"{{"}}namespace_id{{"}}"}}/{{"{{"}}service_id{{"}}"}} + ImportStateId: fmt.Sprintf("%s/%s/%s", location, testId, testId), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccServiceDirectoryService_update(location, testId), + }, + { + ResourceName: "google_service_directory_service.example", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccServiceDirectoryService_basic(location, testId string) string { + return fmt.Sprintf(` +resource "google_service_directory_namespace" "example" { + namespace_id = "%s" + location = "%s" +} + +resource "google_service_directory_service" "example" { + service_id = "%s" + namespace = google_service_directory_namespace.example.id +} +`, testId, location, testId) +} + +func testAccServiceDirectoryService_update(location, testId string) string { + return fmt.Sprintf(` +resource "google_service_directory_namespace" "example" { + namespace_id = "%s" + location = "%s" +} + +resource "google_service_directory_service" "example" { + service_id = "%s" + namespace = google_service_directory_namespace.example.id + + metadata = { + stage = "prod" + region = "us-central1" + } +} +`, testId, location, testId) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/serviceusage/go/resource_service_usage_consumer_quota_override_test.go.tmpl b/mmv1/third_party/terraform/services/serviceusage/go/resource_service_usage_consumer_quota_override_test.go.tmpl new file mode 100644 index 000000000000..3edd4118ea91 --- /dev/null +++ b/mmv1/third_party/terraform/services/serviceusage/go/resource_service_usage_consumer_quota_override_test.go.tmpl @@ -0,0 +1,55 @@ +package serviceusage_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "regexp" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccServiceUsageConsumerQuotaOverride_consumerQuotaOverrideCustomIncorrectLimitFormat(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckServiceUsageConsumerQuotaOverrideDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccServiceUsageConsumerQuotaOverride_consumerQuotaOverrideCustomIncorrectLimitFormat(context), + ExpectError: regexp.MustCompile("No quota limit with limitId"), + }, + }, + }) +} + +func testAccServiceUsageConsumerQuotaOverride_consumerQuotaOverrideCustomIncorrectLimitFormat(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "my_project" { + provider = google-beta + name = "tf-test-project" + project_id = "quota%{random_suffix}" + org_id = "%{org_id}" +} + +resource "google_service_usage_consumer_quota_override" "override" { + provider = google-beta + project = google_project.my_project.project_id + service = urlencode("bigquery.googleapis.com") + metric = urlencode("bigquery.googleapis.com/quota/query/usage") + limit = urlencode("1/d/{project}/{user}") # Incorrect format for the API the provider uses, correct format for the gcloud CLI + override_value = "1" + force = true +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl b/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl new file mode 100644 index 000000000000..7fc2cecbf66e --- /dev/null +++ b/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl @@ -0,0 +1,608 @@ +package spanner_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccSpannerDatabase_basic(t *testing.T) { + t.Parallel() + + project := envvar.GetTestProjectFromEnv() + rnd := acctest.RandString(t, 10) + instanceName := fmt.Sprintf("tf-test-%s", rnd) + databaseName := fmt.Sprintf("tfgen_%s", rnd) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerDatabase_virtualUpdate(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "1h"), // default set by API + ), + }, + { + Config: testAccSpannerDatabase_basic(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "1h"), // default set by API + ), + }, + { + // Test import with default Terraform ID + ResourceName: "google_spanner_database.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + { + Config: testAccSpannerDatabase_basicUpdate(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2d"), + ), + }, + { + // Test import with default Terraform ID + ResourceName: "google_spanner_database.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + { + ResourceName: "google_spanner_database.basic", + ImportStateId: fmt.Sprintf("projects/%s/instances/%s/databases/%s", project, instanceName, databaseName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + { + ResourceName: "google_spanner_database.basic", + ImportStateId: fmt.Sprintf("instances/%s/databases/%s", instanceName, databaseName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + { + ResourceName: "google_spanner_database.basic", + ImportStateId: fmt.Sprintf("%s/%s", instanceName, databaseName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + }, + }) +} + +func testAccSpannerDatabase_basic(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName) +} + +func testAccSpannerDatabase_basicUpdate(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2d" # increase from default 1h + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", + "CREATE TABLE t3 (t3 INT64 NOT NULL,) PRIMARY KEY(t3)", + "CREATE TABLE t4 (t4 INT64 NOT NULL,) PRIMARY KEY(t4)", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName) +} + +func testAccSpannerDatabase_virtualUpdate(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", + ] + deletion_protection = true +} +`, instanceName, instanceName, databaseName) +} + +func TestAccSpannerDatabase_postgres(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + instanceName := fmt.Sprintf("tf-test-%s", rnd) + databaseName := fmt.Sprintf("tfgen_%s", rnd) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerDatabase_postgres(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic_spangres", "state"), + ), + }, + { + // Test import with default Terraform ID + ResourceName: "google_spanner_database.basic_spangres", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + { + Config: testAccSpannerDatabase_postgresUpdate(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic_spangres", "state"), + ), + }, + { + // Test import with default Terraform ID + ResourceName: "google_spanner_database.basic_spangres", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + }, + }) +} + +func testAccSpannerDatabase_postgres(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic_spangres" { + instance = google_spanner_instance.basic.name + name = "%s-spangres" + database_dialect = "POSTGRESQL" + // Confirm that DDL can be run at creation time for POSTGRESQL + version_retention_period = "2h" + ddl = [ + "CREATE TABLE t1 (t1 bigint NOT NULL PRIMARY KEY)", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName) +} + +func testAccSpannerDatabase_postgresUpdate(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic_spangres" { + instance = google_spanner_instance.basic.name + name = "%s-spangres" + database_dialect = "POSTGRESQL" + version_retention_period = "4d" + ddl = [ + "CREATE TABLE t2 (t2 bigint NOT NULL PRIMARY KEY)", + "CREATE TABLE t3 (t3 bigint NOT NULL PRIMARY KEY)", + "CREATE TABLE t4 (t4 bigint NOT NULL PRIMARY KEY)", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName) +} + +func TestAccSpannerDatabase_versionRetentionPeriod(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + instanceName := fmt.Sprintf("tf-test-%s", rnd) + databaseName := fmt.Sprintf("tfgen_%s", rnd) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + // Test creating a database with `version_retention_period` set + Config: testAccSpannerDatabase_versionRetentionPeriod(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2h"), + ), + }, + { + // Test removing `version_retention_period` and setting retention period to a new value with a DDL statement in `ddl` + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate1(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "4h"), + ), + }, + { + // Test that adding `version_retention_period` controls retention time, regardless of any previous statements in `ddl` + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate2(instanceName, databaseName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2h"), + ), + }, + { + // Test that changing the retention value via DDL when `version_retention_period` is set: + // - changes the value (from 2h to 8h) + // - is unstable; non-empty plan afterwards due to conflict + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate3(instanceName, databaseName), + ExpectNonEmptyPlan: true, // is unstable + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "8h"), + ), + }, + { + // Test that when the above config is reapplied: + // - changes the value (reverts to set value of `version_retention_period`, 2h) + // - is stable; no further conflict + Config: testAccSpannerDatabase_versionRetentionPeriodUpdate3(instanceName, databaseName), //same as previous step + ExpectNonEmptyPlan: false, // is stable + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("google_spanner_database.basic", "state"), + resource.TestCheckResourceAttr("google_spanner_database.basic", "version_retention_period", "2h"), + ), + }, + }, + }) +} + +func testAccSpannerDatabase_versionRetentionPeriod(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2h" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName) +} + +func testAccSpannerDatabase_versionRetentionPeriodUpdate1(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + // Change 1/2 : deleted version_retention_period argument + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"4h\")", // Change 2/2 : set retention with new DDL + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName, databaseName) +} + +func testAccSpannerDatabase_versionRetentionPeriodUpdate2(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2h" // Change : added version_retention_period argument + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"4h\")", + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName, databaseName) +} + +func testAccSpannerDatabase_versionRetentionPeriodUpdate3(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + version_retention_period = "2h" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"4h\")", + "ALTER DATABASE %s SET OPTIONS (version_retention_period=\"8h\")", // Change : set retention with new DDL + ] + deletion_protection = false +} +`, instanceName, instanceName, databaseName, databaseName, databaseName) +} + +func TestAccSpannerDatabase_enableDropProtection(t *testing.T) { + t.Parallel() + + rnd := acctest.RandString(t, 10) + instanceName := fmt.Sprintf("tf-test-%s", rnd) + databaseName := fmt.Sprintf("tfgen_%s", rnd) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerDatabase_enableDropProtection(instanceName, databaseName), + }, + { + ResourceName: "google_spanner_database.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + { + Config: testAccSpannerDatabase_enableDropProtectionUpdate(instanceName, databaseName), + }, + { + ResourceName: "google_spanner_database.basic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + }, + }) +} + +func testAccSpannerDatabase_enableDropProtection(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + enable_drop_protection = true + deletion_protection = false + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + ] +} +`, instanceName, instanceName, databaseName) +} + +func testAccSpannerDatabase_enableDropProtectionUpdate(instanceName, databaseName string) string { + return fmt.Sprintf(` +resource "google_spanner_instance" "basic" { + name = "%s" + config = "regional-us-central1" + display_name = "%s-display" + num_nodes = 1 +} + +resource "google_spanner_database" "basic" { + instance = google_spanner_instance.basic.name + name = "%s" + enable_drop_protection = false + deletion_protection = false + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + ] +} +`, instanceName, instanceName, databaseName) +} + +func TestAccSpannerDatabase_deletionProtection(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerDatabase_deletionProtection(context), + }, + { + ResourceName: "google_spanner_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "instance", "deletion_protection"}, + }, + { + Config: testAccSpannerDatabase_deletionProtection(context), + Destroy: true, + ExpectError: regexp.MustCompile("deletion_protection"), + }, + { + Config: testAccSpannerDatabase_spannerDatabaseBasicExample(context), + }, + }, + }) +} + +func testAccSpannerDatabase_deletionProtection(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_spanner_instance" "main" { + config = "regional-europe-west1" + display_name = "main-instance" + num_nodes = 1 +} + +resource "google_spanner_database" "database" { + instance = google_spanner_instance.main.name + name = "tf-test-my-database%{random_suffix}" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", + ] +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +{{/* Field is not beta, but google_project_service_identity dependency is */ -}} +func TestAccSpannerDatabase_cmek(t *testing.T) { + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckSpannerDatabaseDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSpannerDatabase_cmek(context), + }, + { + ResourceName: "google_spanner_database.database", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"ddl", "deletion_protection"}, + }, + }, + }) +} + +func testAccSpannerDatabase_cmek(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_spanner_instance" "main" { + provider = google-beta + config = "regional-europe-west1" + display_name = "main-instance1" + num_nodes = 1 +} + +resource "google_spanner_database" "database" { + provider = google-beta + instance = google_spanner_instance.main.name + name = "tf-test-cmek-db%{random_suffix}" + ddl = [ + "CREATE TABLE t1 (t1 INT64 NOT NULL,) PRIMARY KEY(t1)", + "CREATE TABLE t2 (t2 INT64 NOT NULL,) PRIMARY KEY(t2)", + ] + + encryption_config { + kms_key_name = google_kms_crypto_key.example-key.id + } + + deletion_protection = false + + depends_on = [google_kms_crypto_key_iam_member.crypto-key-binding] +} + +resource "google_kms_key_ring" "keyring" { + provider = google-beta + name = "tf-test-ring%{random_suffix}" + location = "europe-west1" +} + +resource "google_kms_crypto_key" "example-key" { + provider = google-beta + name = "tf-test-key%{random_suffix}" + key_ring = google_kms_key_ring.keyring.id + rotation_period = "100000s" +} + +resource "google_kms_crypto_key_iam_member" "crypto-key-binding" { + provider = google-beta + crypto_key_id = google_kms_crypto_key.example-key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + + member = "serviceAccount:${google_project_service_identity.ck_sa.email}" +} + +data "google_project" "project" { + provider = google-beta +} + +resource "google_project_service_identity" "ck_sa" { + provider = google-beta + project = data.google_project.project.project_id + service = "spanner.googleapis.com" +} + +`, context) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go new file mode 100644 index 000000000000..905f4da3d07e --- /dev/null +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket.go @@ -0,0 +1,1903 @@ +package storage + +import ( + "bytes" + "context" + "errors" + "fmt" + "log" + "math" + "regexp" + "runtime" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/gammazero/workerpool" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" +) + +func ResourceStorageBucket() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageBucketCreate, + Read: resourceStorageBucketRead, + Update: resourceStorageBucketUpdate, + Delete: resourceStorageBucketDelete, + Importer: &schema.ResourceImporter{ + State: resourceStorageBucketStateImporter, + }, + CustomizeDiff: customdiff.All( + customdiff.ForceNewIfChange("retention_policy.0.is_locked", isPolicyLocked), + tpgresource.SetLabelsDiff, + ), + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Update: schema.DefaultTimeout(4 * time.Minute), + Read: schema.DefaultTimeout(4 * time.Minute), + }, + + SchemaVersion: 1, + StateUpgraders: []schema.StateUpgrader{ + { + Type: resourceStorageBucketV0().CoreConfigSchema().ImpliedType(), + Upgrade: ResourceStorageBucketStateUpgradeV0, + Version: 0, + }, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The name of the bucket.`, + ValidateFunc: verify.ValidateGCSName, + }, + + "encryption": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "default_kms_key_name": { + Type: schema.TypeString, + Required: true, + Description: `A Cloud KMS key that will be used to encrypt objects inserted into this bucket, if no encryption method is specified. You must pay attention to whether the crypto key is available in the location that this bucket is created in. See the docs for more details.`, + }, + }, + }, + Description: `The bucket's encryption configuration.`, + }, + + "requester_pays": { + Type: schema.TypeBool, + Optional: true, + Description: `Enables Requester Pays on a storage bucket.`, + }, + + "force_destroy": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `When deleting a bucket, this boolean option will delete all contained objects. If you try to delete a bucket that contains objects, Terraform will fail that run.`, + }, + + "labels": { + Type: schema.TypeMap, + ValidateFunc: labelKeyValidator, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `A set of key/value label pairs to assign to the bucket.`, + }, + + "terraform_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `The combination of labels configured directly on the resource and default labels configured on the provider.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "effective_labels": { + Type: schema.TypeMap, + Computed: true, + Description: `All of labels (key/value pairs) present on the resource in GCP, including the labels configured through Terraform, other clients and services.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + Description: `The Google Cloud Storage location`, + }, + + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The ID of the project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + + "project_number": { + Type: schema.TypeInt, + Computed: true, + Description: `The project number of the project in which the resource belongs.`, + }, + + "self_link": { + Type: schema.TypeString, + Computed: true, + Description: `The URI of the created resource.`, + }, + + "url": { + Type: schema.TypeString, + Computed: true, + Description: `The base URL of the bucket, in the format gs://.`, + }, + + "storage_class": { + Type: schema.TypeString, + Optional: true, + Default: "STANDARD", + Description: `The Storage Class of the new bucket. Supported values include: STANDARD, MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, + }, + + "lifecycle_rule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 100, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "action": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleActionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": { + Type: schema.TypeString, + Required: true, + Description: `The type of the action of this Lifecycle Rule. Supported values include: Delete, SetStorageClass and AbortIncompleteMultipartUpload.`, + }, + "storage_class": { + Type: schema.TypeString, + Optional: true, + Description: `The target Storage Class of objects affected by this Lifecycle Rule. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE.`, + }, + }, + }, + Description: `The Lifecycle Rule's action configuration. A single block of this type is supported.`, + }, + "condition": { + Type: schema.TypeSet, + Required: true, + MinItems: 1, + MaxItems: 1, + Set: resourceGCSBucketLifecycleRuleConditionHash, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "age": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum age of an object in days to satisfy this condition.`, + }, + "created_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "custom_time_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "days_since_custom_time": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of days elapsed since the user-specified timestamp set on an object.`, + }, + "days_since_noncurrent_time": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of days elapsed since the noncurrent timestamp of an object. This + condition is relevant only for versioned objects.`, + }, + "noncurrent_time_before": { + Type: schema.TypeString, + Optional: true, + Description: `Creation date of an object in RFC 3339 (e.g. 2017-06-13) to satisfy this condition.`, + }, + "no_age": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, age value will be omitted.Required to set true when age is unset in the config file.`, + }, + "with_state": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"LIVE", "ARCHIVED", "ANY", ""}, false), + Description: `Match to live and/or archived objects. Unversioned buckets have only live objects. Supported values include: "LIVE", "ARCHIVED", "ANY".`, + }, + "matches_storage_class": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `Storage Class of objects to satisfy this condition. Supported values include: MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD, DURABLE_REDUCED_AVAILABILITY.`, + }, + "num_newer_versions": { + Type: schema.TypeInt, + Optional: true, + Description: `Relevant only for versioned objects. The number of newer versions of an object to satisfy this condition.`, + }, + "matches_prefix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name prefixes to satisfy this condition.`, + }, + "matches_suffix": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Description: `One or more matching name suffixes to satisfy this condition.`, + }, + "send_days_since_noncurrent_time_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, days_since_noncurrent_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_noncurrent_time field. It can be used alone or together with days_since_noncurrent_time.`, + }, + "send_days_since_custom_time_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, days_since_custom_time value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the days_since_custom_time field. It can be used alone or together with days_since_custom_time.`, + }, + "send_num_newer_versions_if_zero": { + Type: schema.TypeBool, + Optional: true, + Description: `While set true, num_newer_versions value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the num_newer_versions field. It can be used alone or together with num_newer_versions.`, + }, + }, + }, + Description: `The Lifecycle Rule's condition configuration.`, + }, + }, + }, + Description: `The bucket's Lifecycle Rules configuration.`, + }, + + "enable_object_retention": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: `Enables each object in the bucket to have its own retention policy, which prevents deletion until stored for a specific length of time.`, + }, + + "versioning": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `While set to true, versioning is fully enabled for this bucket.`, + }, + }, + }, + Description: `The bucket's Versioning configuration.`, + }, + + "autoclass": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: `While set to true, autoclass automatically transitions objects in your bucket to appropriate storage classes based on each object's access pattern.`, + }, + "terminal_storage_class": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The storage class that objects in the bucket eventually transition to if they are not read for a certain length of time. Supported values include: NEARLINE, ARCHIVE.`, + }, + }, + }, + Description: `The bucket's autoclass configuration.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + _, n := d.GetChange(strings.TrimSuffix(k, ".#")) + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if new == "1" && old == "0" { + l = n.([]interface{}) + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + if contents["enabled"] == false { + return true + } + } + if new == "0" && old == "1" { + n := d.Get(strings.TrimSuffix(k, ".#")) + l = n.([]interface{}) + contents := l[0].(map[string]interface{}) + if contents["enabled"] == false { + return true + } + } + return false + }, + }, + "website": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "main_page_suffix": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.not_found_page", "website.0.main_page_suffix"}, + Description: `Behaves as the bucket's directory index where missing objects are treated as potential directories.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old != "" && new == "" + }, + }, + "not_found_page": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: []string{"website.0.main_page_suffix", "website.0.not_found_page"}, + Description: `The custom object to return when a requested resource is not found.`, + DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { + return old != "" && new == "" + }, + }, + }, + }, + Description: `Configuration if the bucket acts as a website.`, + }, + + "retention_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "is_locked": { + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: `If set to true, the bucket will be locked and permanently restrict edits to the bucket's retention policy. Caution: Locking a bucket is an irreversible action.`, + }, + "retention_period": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, math.MaxInt32), + Description: `The period of time, in seconds, that objects in the bucket must be retained and cannot be deleted, overwritten, or archived. The value must be less than 3,155,760,000 seconds.`, + }, + }, + }, + Description: `Configuration of the bucket's data retention policy for how long objects in the bucket should be retained.`, + }, + + "cors": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "origin": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of Origins eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".`, + }, + "method": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".`, + }, + "response_header": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Description: `The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.`, + }, + "max_age_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.`, + }, + }, + }, + Description: `The bucket's Cross-Origin Resource Sharing (CORS) configuration.`, + }, + + "default_event_based_hold": { + Type: schema.TypeBool, + Optional: true, + Description: `Whether or not to automatically apply an eventBasedHold to new objects added to the bucket.`, + }, + + "logging": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "log_bucket": { + Type: schema.TypeString, + Required: true, + Description: `The bucket that will receive log objects.`, + }, + "log_object_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The object prefix for log objects. If it's not provided, by default Google Cloud Storage sets this to this bucket's name.`, + }, + }, + }, + Description: `The bucket's Access & Storage Logs configuration.`, + }, + "uniform_bucket_level_access": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + Description: `Enables uniform bucket-level access on a bucket.`, + }, + "custom_placement_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "data_locations": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + MaxItems: 2, + MinItems: 2, + Elem: &schema.Schema{ + Type: schema.TypeString, + StateFunc: func(s interface{}) string { + return strings.ToUpper(s.(string)) + }, + }, + Description: `The list of individual regions that comprise a dual-region bucket. See the docs for a list of acceptable regions. Note: If any of the data_locations changes, it will recreate the bucket.`, + }, + }, + }, + Description: `The bucket's custom location configuration, which specifies the individual regions that comprise a dual-region bucket. If the bucket is designated a single or multi-region, the parameters are empty.`, + }, + "rpo": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Specifies the RPO setting of bucket. If set 'ASYNC_TURBO', The Turbo Replication will be enabled for the dual-region bucket. Value 'DEFAULT' will set RPO setting to default. Turbo Replication is only for buckets in dual-regions.See the docs for more details.`, + }, + "public_access_prevention": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `Prevents public access to a bucket.`, + }, + "soft_delete_policy": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Computed: true, + Description: `The bucket's soft delete policy, which defines the period of time that soft-deleted objects will be retained, and cannot be permanently deleted. If it is not provided, by default Google Cloud Storage sets this to default soft delete policy`, + Elem : &schema.Resource{ + Schema: map[string]*schema.Schema{ + "retention_duration_seconds": { + Type: schema.TypeInt, + Default: 604800, + Optional: true, + Description: `The duration in seconds that soft-deleted objects in the bucket will be retained and cannot be permanently deleted. Default value is 604800.`, + }, + "effective_time": { + Type: schema.TypeString, + Computed: true, + Description: `Server-determined value that indicates the time from which the policy, or one with a greater retention, was effective. This value is in RFC 3339 format.`, + }, + }, + }, + }, + }, + UseJSONNumber: true, + } +} + +const resourceDataplexGoogleLabelPrefix = "goog-dataplex" +const resourceDataplexGoogleProvidedLabelPrefix = "labels." + resourceDataplexGoogleLabelPrefix + +var labelKeyRegex = regexp.MustCompile(`^[a-z0-9_-]{1,63}$`) + +func labelKeyValidator(val interface{}, key string) (warns []string, errs []error) { + if val == nil { + return + } + + m := val.(map[string]interface{}) + for k := range m { + if !labelKeyRegex.MatchString(k) { + errs = append(errs, fmt.Errorf("%q is an invalid label key. See https://cloud.google.com/storage/docs/tags-and-labels#bucket-labels", k)) + } + } + return +} + +func resourceDataplexLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if strings.HasPrefix(k, resourceDataplexGoogleProvidedLabelPrefix) && new == "" { + return true + } + + // Let diff be determined by labels (above) + if strings.HasPrefix(k, "labels.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} + +// Is the old bucket retention policy locked? +func isPolicyLocked(_ context.Context, old, new, _ interface{}) bool { + if old == nil || new == nil { + return false + } + + // if the old policy is locked, but the new policy is not + if old.(bool) && !new.(bool) { + return true + } + + return false +} + +func resourceStorageBucketCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + // Get the bucket and location + bucket := d.Get("name").(string) + location := d.Get("location").(string) + + // Create a bucket, setting the labels, location and name. + sb := &storage.Bucket{ + Name: bucket, + Labels: tpgresource.ExpandEffectiveLabels(d), + Location: location, + IamConfiguration: expandIamConfiguration(d), + } + + if v, ok := d.GetOk("storage_class"); ok { + sb.StorageClass = v.(string) + } + + lifecycle, err := expandStorageBucketLifecycle(d.Get("lifecycle_rule")) + if err != nil { + return err + } + sb.Lifecycle = lifecycle + + if v, ok := d.GetOk("versioning"); ok { + sb.Versioning = expandBucketVersioning(v) + } + + if v, ok := d.GetOk("autoclass"); ok { + sb.Autoclass = expandBucketAutoclass(v) + } + + if v, ok := d.GetOk("website"); ok { + sb.Website = expandBucketWebsite(v.([]interface{})) + } + + if v, ok := d.GetOk("retention_policy"); ok { + // Not using expandBucketRetentionPolicy() here because `is_locked` cannot be set on creation. + retention_policies := v.([]interface{}) + + if len(retention_policies) > 0 { + sb.RetentionPolicy = &storage.BucketRetentionPolicy{} + + retentionPolicy := retention_policies[0].(map[string]interface{}) + + if v, ok := retentionPolicy["retention_period"]; ok { + sb.RetentionPolicy.RetentionPeriod = int64(v.(int)) + } + } + } + + if v, ok := d.GetOk("default_event_based_hold"); ok { + sb.DefaultEventBasedHold = v.(bool) + } + + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } + + if v, ok := d.GetOk("logging"); ok { + sb.Logging = expandBucketLogging(v.([]interface{})) + } + + if v, ok := d.GetOk("encryption"); ok { + sb.Encryption = expandBucketEncryption(v.([]interface{})) + } + + if v, ok := d.GetOk("requester_pays"); ok { + sb.Billing = &storage.BucketBilling{ + RequesterPays: v.(bool), + } + } + + if v, ok := d.GetOk("custom_placement_config"); ok { + sb.CustomPlacementConfig = expandBucketCustomPlacementConfig(v.([]interface{})) + } + + if v, ok := d.GetOk("rpo"); ok{ + sb.Rpo = v.(string) + } + + if v, ok := d.GetOk("soft_delete_policy"); ok { + sb.SoftDeletePolicy = expandBucketSoftDeletePolicy(v.([]interface{})) + } + + var res *storage.Bucket + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + insertCall := config.NewStorageClient(userAgent).Buckets.Insert(project, sb) + if d.Get("enable_object_retention").(bool) { + insertCall.EnableObjectRetention(true) + } + res, err = insertCall.Do() + return err + }, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + + if err != nil { + fmt.Printf("Error creating bucket %s: %v", bucket, err) + return err + } + + log.Printf("[DEBUG] Created bucket %v at location %v\n\n", res.Name, res.SelfLink) + d.SetId(res.Id) + + // There seems to be some eventual consistency errors in some cases, so we want to check a few times + // to make sure it exists before moving on + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() + return retryErr + }, + Timeout: d.Timeout(schema.TimeoutCreate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket creation")}, + }) + + if err != nil { + return fmt.Errorf("Error reading bucket after creation: %s", err) + } + + // If the retention policy is not already locked, check if it + // needs to be locked. + if v, ok := d.GetOk("retention_policy"); ok && !res.RetentionPolicy.IsLocked { + retention_policies := v.([]interface{}) + + sb.RetentionPolicy = &storage.BucketRetentionPolicy{} + + retentionPolicy := retention_policies[0].(map[string]interface{}) + + if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) { + err = lockRetentionPolicy(config.NewStorageClient(userAgent).Buckets, bucket, res.Metageneration) + if err != nil { + return err + } + + log.Printf("[DEBUG] Locked bucket %v at location %v\n\n", res.Name, res.SelfLink) + } + } + + return resourceStorageBucketRead(d, meta) +} + +func resourceStorageBucketUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + sb := &storage.Bucket{} + + if detectLifecycleChange(d) { + lifecycle, err := expandStorageBucketLifecycle(d.Get("lifecycle_rule")) + if err != nil { + return err + } + sb.Lifecycle = lifecycle + } + + if d.HasChange("requester_pays") { + v := d.Get("requester_pays") + sb.Billing = &storage.BucketBilling{ + RequesterPays: v.(bool), + ForceSendFields: []string{"RequesterPays"}, + } + } + + if d.HasChange("versioning") { + if v, ok := d.GetOk("versioning"); ok { + sb.Versioning = expandBucketVersioning(v) + } + } + + if d.HasChange("autoclass") { + if v, ok := d.GetOk("autoclass"); ok { + sb.Autoclass = expandBucketAutoclass(v) + } else { + sb.Autoclass = &storage.BucketAutoclass{ + Enabled: false, + ForceSendFields: []string{"Enabled"}, + } + } + } + + if d.HasChange("website") { + sb.Website = expandBucketWebsite(d.Get("website")) + } + + if d.HasChange("retention_policy") { + if v, ok := d.GetOk("retention_policy"); ok { + sb.RetentionPolicy = expandBucketRetentionPolicy(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "RetentionPolicy") + } + } + + if d.HasChange("cors") { + if v, ok := d.GetOk("cors"); ok { + sb.Cors = expandCors(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "Cors") + } + } + + if d.HasChange("default_event_based_hold") { + v := d.Get("default_event_based_hold") + sb.DefaultEventBasedHold = v.(bool) + sb.ForceSendFields = append(sb.ForceSendFields, "DefaultEventBasedHold") + } + + if d.HasChange("logging") { + if v, ok := d.GetOk("logging"); ok { + sb.Logging = expandBucketLogging(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "Logging") + } + } + + if d.HasChange("encryption") { + if v, ok := d.GetOk("encryption"); ok { + sb.Encryption = expandBucketEncryption(v.([]interface{})) + } else { + sb.NullFields = append(sb.NullFields, "Encryption") + } + } + + if d.HasChange("effective_labels") { + sb.Labels = tpgresource.ExpandEffectiveLabels(d) + if len(sb.Labels) == 0 { + sb.NullFields = append(sb.NullFields, "Labels") + } + + // To delete a label using PATCH, we have to explicitly set its value + // to null. + old, _ := d.GetChange("effective_labels") + for k := range old.(map[string]interface{}) { + if _, ok := sb.Labels[k]; !ok { + sb.NullFields = append(sb.NullFields, fmt.Sprintf("Labels.%s", k)) + } + } + } + + if d.HasChange("storage_class") { + if v, ok := d.GetOk("storage_class"); ok { + sb.StorageClass = v.(string) + } + } + + if d.HasChange("uniform_bucket_level_access") || d.HasChange("public_access_prevention") { + sb.IamConfiguration = expandIamConfiguration(d) + } + + if d.HasChange("rpo") { + if v,ok := d.GetOk("rpo"); ok{ + sb.Rpo = v.(string) + } else { + sb.NullFields = append(sb.NullFields, "Rpo") + } + } + + if d.HasChange("soft_delete_policy") { + if v, ok := d.GetOk("soft_delete_policy"); ok { + sb.SoftDeletePolicy = expandBucketSoftDeletePolicy(v.([]interface{})) + } + } + + res, err := config.NewStorageClient(userAgent).Buckets.Patch(d.Get("name").(string), sb).Do() + if err != nil { + return err + } + + // Assign the bucket ID as the resource ID + if err := d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + + // There seems to be some eventual consistency errors in some cases, so we want to check a few times + // to make sure it exists before moving on + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + _, retryErr := config.NewStorageClient(userAgent).Buckets.Get(res.Name).Do() + return retryErr + }, + Timeout: d.Timeout(schema.TimeoutUpdate), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket update")}, + }) + + if err != nil { + return fmt.Errorf("Error reading bucket after update: %s", err) + } + + if d.HasChange("retention_policy") { + if v, ok := d.GetOk("retention_policy"); ok { + retention_policies := v.([]interface{}) + + sb.RetentionPolicy = &storage.BucketRetentionPolicy{} + + retentionPolicy := retention_policies[0].(map[string]interface{}) + + if locked, ok := retentionPolicy["is_locked"]; ok && locked.(bool) && d.HasChange("retention_policy.0.is_locked") { + err = lockRetentionPolicy(config.NewStorageClient(userAgent).Buckets, d.Get("name").(string), res.Metageneration) + if err != nil { + return err + } + } + } + } + + log.Printf("[DEBUG] Patched bucket %v at location %v\n\n", res.Name, res.SelfLink) + + d.SetId(res.Id) + + return nil +} + +func resourceStorageBucketRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Get the bucket and acl + bucket := d.Get("name").(string) + + var res *storage.Bucket + // There seems to be some eventual consistency errors in some cases, so we want to check a few times + // to make sure it exists before moving on + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (operr error) { + var retryErr error + res, retryErr = config.NewStorageClient(userAgent).Buckets.Get(bucket).Do() + return retryErr + }, + Timeout: d.Timeout(schema.TimeoutRead), + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsNotFoundRetryableError("bucket read")}, + }) + + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Storage Bucket %q", d.Get("name").(string))) + } + log.Printf("[DEBUG] Read bucket %v at location %v\n\n", res.Name, res.SelfLink) + + return setStorageBucket(d, config, res, bucket, userAgent) +} + +func resourceStorageBucketDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + // Get the bucket + bucket := d.Get("name").(string) + + var listError, deleteObjectError error + for deleteObjectError == nil { + res, err := config.NewStorageClient(userAgent).Objects.List(bucket).Versions(true).Do() + if err != nil { + log.Printf("Error listing contents of bucket %s: %v", bucket, err) + // If we can't list the contents, try deleting the bucket anyway in case it's empty + listError = err + break + } + + if len(res.Items) == 0 { + break // 0 items, bucket empty + } + + if d.Get("retention_policy.0.is_locked").(bool) { + for _, item := range res.Items { + expiration, err := time.Parse(time.RFC3339, item.RetentionExpirationTime) + if err != nil { + return err + } + if expiration.After(time.Now()) { + deleteErr := errors.New("Bucket '" + d.Get("name").(string) + "' contains objects that have not met the retention period yet and cannot be deleted.") + log.Printf("Error! %s : %s\n\n", bucket, deleteErr) + return deleteErr + } + } + } + + if !d.Get("force_destroy").(bool) { + deleteErr := fmt.Errorf("Error trying to delete bucket %s containing objects without `force_destroy` set to true", bucket) + log.Printf("Error! %s : %s\n\n", bucket, deleteErr) + return deleteErr + } + // GCS requires that a bucket be empty (have no objects or object + // versions) before it can be deleted. + log.Printf("[DEBUG] GCS Bucket attempting to forceDestroy\n\n") + + // Create a workerpool for parallel deletion of resources. In the + // future, it would be great to expose Terraform's global parallelism + // flag here, but that's currently reserved for core use. Testing + // shows that NumCPUs-1 is the most performant on average networks. + // + // The challenge with making this user-configurable is that the + // configuration would reside in the Terraform configuration file, + // decreasing its portability. Ideally we'd want this to connect to + // Terraform's top-level -parallelism flag, but that's not plumbed nor + // is it scheduled to be plumbed to individual providers. + wp := workerpool.New(runtime.NumCPU() - 1) + + for _, object := range res.Items { + log.Printf("[DEBUG] Found %s", object.Name) + object := object + + wp.Submit(func() { + log.Printf("[TRACE] Attempting to delete %s", object.Name) + if err := config.NewStorageClient(userAgent).Objects.Delete(bucket, object.Name).Generation(object.Generation).Do(); err != nil { + deleteObjectError = err + log.Printf("[ERR] Failed to delete storage object %s: %s", object.Name, err) + } else { + log.Printf("[TRACE] Successfully deleted %s", object.Name) + } + }) + } + + // Wait for everything to finish. + wp.StopWait() + } + + // remove empty bucket + err = retry.Retry(1*time.Minute, func() *retry.RetryError { + err := config.NewStorageClient(userAgent).Buckets.Delete(bucket).Do() + if err == nil { + return nil + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 429 { + return retry.RetryableError(gerr) + } + return retry.NonRetryableError(err) + }) + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 && strings.Contains(gerr.Message, "not empty") && listError != nil { + return fmt.Errorf("could not delete non-empty bucket due to error when listing contents: %v", listError) + } + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 409 && strings.Contains(gerr.Message, "not empty") && deleteObjectError != nil { + return fmt.Errorf("could not delete non-empty bucket due to error when deleting contents: %v", deleteObjectError) + } + if err != nil { + log.Printf("Error deleting bucket %s: %v", bucket, err) + return err + } + log.Printf("[DEBUG] Deleted bucket %v\n\n", bucket) + + return nil +} + +func resourceStorageBucketStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // We need to support project/bucket_name and bucket_name formats. This will allow + // importing a bucket that is in a different project than the provider default. + // ParseImportID can't be used because having no project will cause an error but it + // is a valid state as the project_id will be retrieved in READ + parts := strings.Split(d.Id(), "/") + if len(parts) == 1 { + if err := d.Set("name", parts[0]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } else if len(parts) > 1 { + if err := d.Set("project", parts[0]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", parts[1]); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + } + + if err := d.Set("force_destroy", false); err != nil { + return nil, fmt.Errorf("Error setting force_destroy: %s", err) + } + return []*schema.ResourceData{d}, nil +} + +func expandCors(configured []interface{}) []*storage.BucketCors { + if len(configured) == 0 { + return nil + } + corsRules := make([]*storage.BucketCors, 0, len(configured)) + for _, raw := range configured { + data := raw.(map[string]interface{}) + corsRule := storage.BucketCors{ + Origin: tpgresource.ConvertStringArr(data["origin"].([]interface{})), + Method: tpgresource.ConvertStringArr(data["method"].([]interface{})), + ResponseHeader: tpgresource.ConvertStringArr(data["response_header"].([]interface{})), + MaxAgeSeconds: int64(data["max_age_seconds"].(int)), + } + + corsRules = append(corsRules, &corsRule) + } + return corsRules +} + +func flattenCors(corsRules []*storage.BucketCors) []map[string]interface{} { + corsRulesSchema := make([]map[string]interface{}, 0, len(corsRules)) + for _, corsRule := range corsRules { + data := map[string]interface{}{ + "origin": corsRule.Origin, + "method": corsRule.Method, + "response_header": corsRule.ResponseHeader, + "max_age_seconds": corsRule.MaxAgeSeconds, + } + + corsRulesSchema = append(corsRulesSchema, data) + } + return corsRulesSchema +} + +func expandBucketEncryption(configured interface{}) *storage.BucketEncryption { + encs := configured.([]interface{}) + if len(encs) == 0 || encs[0] == nil { + return nil + } + enc := encs[0].(map[string]interface{}) + keyname := enc["default_kms_key_name"] + if keyname == nil || keyname.(string) == "" { + return nil + } + bucketenc := &storage.BucketEncryption{ + DefaultKmsKeyName: keyname.(string), + } + return bucketenc +} + +func flattenBucketEncryption(enc *storage.BucketEncryption) []map[string]interface{} { + encryption := make([]map[string]interface{}, 0, 1) + + if enc == nil { + return encryption + } + + encryption = append(encryption, map[string]interface{}{ + "default_kms_key_name": enc.DefaultKmsKeyName, + }) + + return encryption +} + +func expandBucketCustomPlacementConfig(configured interface{}) *storage.BucketCustomPlacementConfig { + cfcs := configured.([]interface{}) + if len(cfcs) == 0 || cfcs[0] == nil { + return nil + } + cfc := cfcs[0].(map[string]interface{}) + bucketcfc := &storage.BucketCustomPlacementConfig{ + DataLocations: expandBucketDataLocations(cfc["data_locations"]), + } + return bucketcfc +} + +func flattenBucketCustomPlacementConfig(cfc *storage.BucketCustomPlacementConfig) []map[string]interface{} { + customPlacementConfig := make([]map[string]interface{}, 0, 1) + + if cfc == nil { + return customPlacementConfig + } + + customPlacementConfig = append(customPlacementConfig, map[string]interface{}{ + "data_locations": cfc.DataLocations, + }) + + return customPlacementConfig +} + +func expandBucketDataLocations(configured interface{}) []string { + l := configured.(*schema.Set).List() + + // Since we only want uppercase values to prevent unnecessary diffs, we can do a comparison + // to determine whether or not to include the value as part of the request. + + // This extra check comes from the limitations of both DiffStateFunc and StateFunc towards types of Sets,Lists, and Maps. + req := make([]string, 0, len(l)) + for _, raw := range l { + if raw.(string) == strings.ToUpper(raw.(string)) { + req = append(req, raw.(string)) + } + } + return req +} + +func expandBucketLogging(configured interface{}) *storage.BucketLogging { + loggings := configured.([]interface{}) + if len(loggings) == 0 || loggings[0] == nil { + return nil + } + + logging := loggings[0].(map[string]interface{}) + + bucketLogging := &storage.BucketLogging{ + LogBucket: logging["log_bucket"].(string), + LogObjectPrefix: logging["log_object_prefix"].(string), + } + + return bucketLogging +} + +func flattenBucketLogging(bucketLogging *storage.BucketLogging) []map[string]interface{} { + loggings := make([]map[string]interface{}, 0, 1) + + if bucketLogging == nil { + return loggings + } + + logging := map[string]interface{}{ + "log_bucket": bucketLogging.LogBucket, + "log_object_prefix": bucketLogging.LogObjectPrefix, + } + + loggings = append(loggings, logging) + return loggings +} + +func expandBucketRetentionPolicy(configured interface{}) *storage.BucketRetentionPolicy { + retentionPolicies := configured.([]interface{}) + if len(retentionPolicies) == 0 { + return nil + } + retentionPolicy := retentionPolicies[0].(map[string]interface{}) + + bucketRetentionPolicy := &storage.BucketRetentionPolicy{ + IsLocked: retentionPolicy["is_locked"].(bool), + RetentionPeriod: int64(retentionPolicy["retention_period"].(int)), + } + + return bucketRetentionPolicy +} + +func flattenBucketRetentionPolicy(bucketRetentionPolicy *storage.BucketRetentionPolicy) []map[string]interface{} { + bucketRetentionPolicies := make([]map[string]interface{}, 0, 1) + + if bucketRetentionPolicy == nil { + return bucketRetentionPolicies + } + + retentionPolicy := map[string]interface{}{ + "is_locked": bucketRetentionPolicy.IsLocked, + "retention_period": bucketRetentionPolicy.RetentionPeriod, + } + + bucketRetentionPolicies = append(bucketRetentionPolicies, retentionPolicy) + return bucketRetentionPolicies +} + +func flattenBucketObjectRetention(bucketObjectRetention *storage.BucketObjectRetention) bool { + if bucketObjectRetention == nil { + return false + } + if bucketObjectRetention.Mode == "Enabled" { + return true + } + return false +} + +func expandBucketSoftDeletePolicy(configured interface{}) *storage.BucketSoftDeletePolicy{ + configuredSoftDeletePolicies := configured.([]interface{}) + if len(configuredSoftDeletePolicies) == 0 { + return nil + } + configuredSoftDeletePolicy := configuredSoftDeletePolicies[0].(map[string]interface{}) + softDeletePolicy := &storage.BucketSoftDeletePolicy{ + RetentionDurationSeconds: int64(configuredSoftDeletePolicy["retention_duration_seconds"].(int)), + } + softDeletePolicy.ForceSendFields=append(softDeletePolicy.ForceSendFields,"RetentionDurationSeconds") + return softDeletePolicy +} + +func flattenBucketSoftDeletePolicy(softDeletePolicy *storage.BucketSoftDeletePolicy) []map[string]interface{} { + policies := make([]map[string]interface{}, 0, 1) + if softDeletePolicy == nil { + return policies + } + policy := map[string]interface{}{ + "retention_duration_seconds": softDeletePolicy.RetentionDurationSeconds, + "effective_time": softDeletePolicy.EffectiveTime, + } + policies = append(policies, policy) + return policies +} + +func expandBucketVersioning(configured interface{}) *storage.BucketVersioning { + versionings := configured.([]interface{}) + if len(versionings) == 0 { + return nil + } + + versioning := versionings[0].(map[string]interface{}) + + bucketVersioning := &storage.BucketVersioning{} + + bucketVersioning.Enabled = versioning["enabled"].(bool) + bucketVersioning.ForceSendFields = append(bucketVersioning.ForceSendFields, "Enabled") + + return bucketVersioning +} + +func expandBucketAutoclass(configured interface{}) *storage.BucketAutoclass { + autoclassList := configured.([]interface{}) + if len(autoclassList) == 0 { + return nil + } + + autoclass := autoclassList[0].(map[string]interface{}) + + bucketAutoclass := &storage.BucketAutoclass{} + + bucketAutoclass.Enabled = autoclass["enabled"].(bool) + if autoclass["terminal_storage_class"] != "" { + bucketAutoclass.TerminalStorageClass = autoclass["terminal_storage_class"].(string) + } + bucketAutoclass.ForceSendFields = append(bucketAutoclass.ForceSendFields, "Enabled") + + return bucketAutoclass +} + +func flattenBucketVersioning(bucketVersioning *storage.BucketVersioning) []map[string]interface{} { + versionings := make([]map[string]interface{}, 0, 1) + + if bucketVersioning == nil { + return versionings + } + + versioning := map[string]interface{}{ + "enabled": bucketVersioning.Enabled, + } + versionings = append(versionings, versioning) + return versionings +} + +func flattenBucketAutoclass(bucketAutoclass *storage.BucketAutoclass) []map[string]interface{} { + autoclassList := make([]map[string]interface{}, 0, 1) + + if bucketAutoclass == nil { + return autoclassList + } + + autoclass := map[string]interface{}{ + "enabled": bucketAutoclass.Enabled, + "terminal_storage_class": bucketAutoclass.TerminalStorageClass, + } + autoclassList = append(autoclassList, autoclass) + return autoclassList +} + +func flattenBucketLifecycle(d *schema.ResourceData, lifecycle *storage.BucketLifecycle) []map[string]interface{} { + if lifecycle == nil || lifecycle.Rule == nil { + return []map[string]interface{}{} + } + + rules := make([]map[string]interface{}, 0, len(lifecycle.Rule)) + + for index, rule := range lifecycle.Rule { + rules = append(rules, map[string]interface{}{ + "action": schema.NewSet(resourceGCSBucketLifecycleRuleActionHash, []interface{}{flattenBucketLifecycleRuleAction(rule.Action)}), + "condition": schema.NewSet(resourceGCSBucketLifecycleRuleConditionHash, []interface{}{flattenBucketLifecycleRuleCondition(index, d, rule.Condition)}), + }) + } + + return rules +} + +func flattenBucketLifecycleRuleAction(action *storage.BucketLifecycleRuleAction) map[string]interface{} { + return map[string]interface{}{ + "type": action.Type, + "storage_class": action.StorageClass, + } +} + +func flattenBucketLifecycleRuleCondition(index int, d *schema.ResourceData, condition *storage.BucketLifecycleRuleCondition) map[string]interface{} { + ruleCondition := map[string]interface{}{ + "created_before": condition.CreatedBefore, + "matches_storage_class": tpgresource.ConvertStringArrToInterface(condition.MatchesStorageClass), + "num_newer_versions": int(condition.NumNewerVersions), + "custom_time_before": condition.CustomTimeBefore, + "days_since_custom_time": int(condition.DaysSinceCustomTime), + "days_since_noncurrent_time": int(condition.DaysSinceNoncurrentTime), + "noncurrent_time_before": condition.NoncurrentTimeBefore, + "matches_prefix": tpgresource.ConvertStringArrToInterface(condition.MatchesPrefix), + "matches_suffix": tpgresource.ConvertStringArrToInterface(condition.MatchesSuffix), + } + if condition.Age != nil { + ruleCondition["age"] = int(*condition.Age) + } + if condition.IsLive == nil { + ruleCondition["with_state"] = "ANY" + } else { + if *condition.IsLive { + ruleCondition["with_state"] = "LIVE" + } else { + ruleCondition["with_state"] = "ARCHIVED" + } + } + // setting no_age value from state config since it is terraform only variable and not getting value from backend. + if v, ok := d.GetOk(fmt.Sprintf("lifecycle_rule.%d.condition",index)); ok{ + state_condition := v.(*schema.Set).List()[0].(map[string]interface{}) + ruleCondition["no_age"] = state_condition["no_age"].(bool) + ruleCondition["send_days_since_noncurrent_time_if_zero"] = state_condition["send_days_since_noncurrent_time_if_zero"].(bool) + ruleCondition["send_days_since_custom_time_if_zero"] = state_condition["send_days_since_custom_time_if_zero"].(bool) + ruleCondition["send_num_newer_versions_if_zero"] = state_condition["send_num_newer_versions_if_zero"].(bool) + } + + return ruleCondition +} + +func flattenBucketWebsite(website *storage.BucketWebsite) []map[string]interface{} { + if website == nil { + return nil + } + websites := make([]map[string]interface{}, 0, 1) + websites = append(websites, map[string]interface{}{ + "main_page_suffix": website.MainPageSuffix, + "not_found_page": website.NotFoundPage, + }) + + return websites +} + +func expandBucketWebsite(v interface{}) *storage.BucketWebsite { + if v == nil { + return nil + } + vs := v.([]interface{}) + + if len(vs) < 1 || vs[0] == nil { + return nil + } + + website := vs[0].(map[string]interface{}) + w := &storage.BucketWebsite{} + + if v := website["not_found_page"]; v != "" { + w.NotFoundPage = v.(string) + } + + if v := website["main_page_suffix"]; v != "" { + w.MainPageSuffix = v.(string) + } + return w +} + +func expandIamConfiguration(d *schema.ResourceData) *storage.BucketIamConfiguration { + cfg := &storage.BucketIamConfiguration{ + ForceSendFields: []string{"UniformBucketLevelAccess"}, + UniformBucketLevelAccess: &storage.BucketIamConfigurationUniformBucketLevelAccess{ + Enabled: d.Get("uniform_bucket_level_access").(bool), + ForceSendFields: []string{"Enabled"}, + }, + } + + if v, ok := d.GetOk("public_access_prevention"); ok { + cfg.PublicAccessPrevention = v.(string) + } + + return cfg +} + +func expandStorageBucketLifecycle(v interface{}) (*storage.BucketLifecycle, error) { + if v == nil { + return &storage.BucketLifecycle{ + ForceSendFields: []string{"Rule"}, + }, nil + } + lifecycleRules := v.([]interface{}) + transformedRules := make([]*storage.BucketLifecycleRule, 0, len(lifecycleRules)) + + for _, v := range lifecycleRules { + rule, err := expandStorageBucketLifecycleRule(v) + if err != nil { + return nil, err + } + transformedRules = append(transformedRules, rule) + } + + if len(transformedRules) == 0 { + return &storage.BucketLifecycle{ + ForceSendFields: []string{"Rule"}, + }, nil + } + + return &storage.BucketLifecycle{ + Rule: transformedRules, + }, nil +} + +func expandStorageBucketLifecycleRule(v interface{}) (*storage.BucketLifecycleRule, error) { + if v == nil { + return nil, nil + } + + rule := v.(map[string]interface{}) + transformed := &storage.BucketLifecycleRule{} + + if v, ok := rule["action"]; ok { + action, err := expandStorageBucketLifecycleRuleAction(v) + if err != nil { + return nil, err + } + transformed.Action = action + } else { + return nil, fmt.Errorf("exactly one action is required for lifecycle_rule") + } + + if v, ok := rule["condition"]; ok { + cond, err := expandStorageBucketLifecycleRuleCondition(v) + if err != nil { + return nil, err + } + transformed.Condition = cond + } + + return transformed, nil +} + +func expandStorageBucketLifecycleRuleAction(v interface{}) (*storage.BucketLifecycleRuleAction, error) { + if v == nil { + return nil, fmt.Errorf("exactly one action is required for lifecycle_rule") + } + + actions := v.(*schema.Set).List() + if len(actions) != 1 { + return nil, fmt.Errorf("exactly one action is required for lifecycle_rule") + } + + action := actions[0].(map[string]interface{}) + transformed := &storage.BucketLifecycleRuleAction{} + + if v, ok := action["type"]; ok { + transformed.Type = v.(string) + } + + if v, ok := action["storage_class"]; ok { + transformed.StorageClass = v.(string) + } + + return transformed, nil +} + +func expandStorageBucketLifecycleRuleCondition(v interface{}) (*storage.BucketLifecycleRuleCondition, error) { + if v == nil { + return nil, nil + } + conditions := v.(*schema.Set).List() + if len(conditions) != 1 { + return nil, fmt.Errorf("One and only one condition can be provided per lifecycle_rule") + } + + condition := conditions[0].(map[string]interface{}) + transformed := &storage.BucketLifecycleRuleCondition{} + // Setting high precedence of no_age over age when both used together. + // Only sets age value when no_age is not present or no_age is present and has false value + if v, ok := condition["no_age"]; !ok || !(v.(bool)) { + if v, ok := condition["age"]; ok { + age := int64(v.(int)) + transformed.Age = &age + transformed.ForceSendFields = append(transformed.ForceSendFields, "Age") + } + } + + if v, ok := condition["created_before"]; ok { + transformed.CreatedBefore = v.(string) + } + + withStateV, withStateOk := condition["with_state"] + // Because TF schema, withStateOk currently will always be true, + // do the check just in case. + if withStateOk { + switch withStateV.(string) { + case "LIVE": + transformed.IsLive = googleapi.Bool(true) + case "ARCHIVED": + transformed.IsLive = googleapi.Bool(false) + case "ANY", "": + // This is unnecessary, but set explicitly to nil for readability. + transformed.IsLive = nil + default: + return nil, fmt.Errorf("unexpected value %q for condition.with_state", withStateV.(string)) + } + } + + if v, ok := condition["matches_storage_class"]; ok { + classes := v.([]interface{}) + transformedClasses := make([]string, 0, len(classes)) + + for _, v := range classes { + transformedClasses = append(transformedClasses, v.(string)) + } + transformed.MatchesStorageClass = transformedClasses + } + + if v, ok := condition["num_newer_versions"]; ok { + transformed.NumNewerVersions = int64(v.(int)) + if u, ok := condition["send_num_newer_versions_if_zero"]; ok && u.(bool) { + transformed.ForceSendFields = append(transformed.ForceSendFields, "NumNewerVersions") + } + } + + if v, ok := condition["custom_time_before"]; ok { + transformed.CustomTimeBefore = v.(string) + } + + if v, ok := condition["days_since_custom_time"]; ok { + transformed.DaysSinceCustomTime = int64(v.(int)) + if u, ok := condition["send_days_since_custom_time_if_zero"]; ok && u.(bool) { + transformed.ForceSendFields = append(transformed.ForceSendFields, "DaysSinceCustomTime") + } + } + + if v, ok := condition["days_since_noncurrent_time"]; ok { + transformed.DaysSinceNoncurrentTime = int64(v.(int)) + if u, ok := condition["send_days_since_noncurrent_time_if_zero"]; ok && u.(bool) { + transformed.ForceSendFields = append(transformed.ForceSendFields, "DaysSinceNoncurrentTime") + } + } + + if v, ok := condition["noncurrent_time_before"]; ok { + transformed.NoncurrentTimeBefore = v.(string) + } + + if v, ok := condition["matches_prefix"]; ok { + prefixes := v.([]interface{}) + transformedPrefixes := make([]string, 0, len(prefixes)) + + for _, v := range prefixes { + transformedPrefixes = append(transformedPrefixes, v.(string)) + } + transformed.MatchesPrefix = transformedPrefixes + } + if v, ok := condition["matches_suffix"]; ok { + suffixes := v.([]interface{}) + transformedSuffixes := make([]string, 0, len(suffixes)) + + for _, v := range suffixes { + transformedSuffixes = append(transformedSuffixes, v.(string)) + } + transformed.MatchesSuffix = transformedSuffixes + } + + return transformed, nil +} + +func resourceGCSBucketLifecycleRuleActionHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) + + if v, ok := m["storage_class"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + return tpgresource.Hashcode(buf.String()) +} + +func resourceGCSBucketLifecycleRuleConditionHash(v interface{}) int { + if v == nil { + return 0 + } + + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["no_age"]; ok && v.(bool){ + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } else { + if v, ok := m["age"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + } + + if v, ok := m["days_since_custom_time"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + if v, ok := m["days_since_noncurrent_time"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + if v, ok := m["created_before"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["custom_time_before"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + if v, ok := m["noncurrent_time_before"]; ok { + buf.WriteString(fmt.Sprintf("%s-", v.(string))) + } + + withStateV, withStateOk := m["with_state"] + if withStateOk { + switch withStateV.(string) { + case "LIVE": + buf.WriteString(fmt.Sprintf("%t-", true)) + case "ARCHIVED": + buf.WriteString(fmt.Sprintf("%t-", false)) + } + } + + if v, ok := m["matches_storage_class"]; ok { + matches_storage_classes := v.([]interface{}) + for _, matches_storage_class := range matches_storage_classes { + buf.WriteString(fmt.Sprintf("%s-", matches_storage_class)) + } + } + + if v, ok := m["num_newer_versions"]; ok { + buf.WriteString(fmt.Sprintf("%d-", v.(int))) + } + + if v, ok := m["send_days_since_noncurrent_time_if_zero"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + + if v, ok := m["send_days_since_custom_time_if_zero"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + + if v, ok := m["send_num_newer_versions_if_zero"]; ok { + buf.WriteString(fmt.Sprintf("%t-", v.(bool))) + } + + if v, ok := m["matches_prefix"]; ok { + matches_prefixes := v.([]interface{}) + for _, matches_prefix := range matches_prefixes { + buf.WriteString(fmt.Sprintf("%s-", matches_prefix)) + } + } + if v, ok := m["matches_suffix"]; ok { + matches_suffixes := v.([]interface{}) + for _, matches_suffix := range matches_suffixes { + buf.WriteString(fmt.Sprintf("%s-", matches_suffix)) + } + } + + return tpgresource.Hashcode(buf.String()) +} + +func lockRetentionPolicy(bucketsService *storage.BucketsService, bucketName string, metageneration int64) error { + lockPolicyCall := bucketsService.LockRetentionPolicy(bucketName, metageneration) + if _, err := lockPolicyCall.Do(); err != nil { + return err + } + + return nil +} + +// d.HasChange("lifecycle_rule") always returns true, giving false positives. This function detects changes +// to the list size or the actions/conditions of rules directly. +func detectLifecycleChange(d *schema.ResourceData) bool { + if d.HasChange("lifecycle_rule.#") { + return true + } + + if l, ok := d.GetOk("lifecycle_rule"); ok { + lifecycleRules := l.([]interface{}) + for i := range lifecycleRules { + if d.HasChange(fmt.Sprintf("lifecycle_rule.%d.action", i)) || d.HasChange(fmt.Sprintf("lifecycle_rule.%d.condition", i)) { + return true + } + } + } + + return false +} + +// Resource Read and DataSource Read both need to set attributes, but Data Sources don't support Timeouts +// so we pulled this portion out separately (https://github.com/hashicorp/terraform-provider-google/issues/11264) +func setStorageBucket(d *schema.ResourceData, config *transport_tpg.Config, res *storage.Bucket, bucket, userAgent string) error { + // We are trying to support several different use cases for bucket. Buckets are globally + // unique but they are associated with projects internally, but some users want to use + // buckets in a project agnostic way. Thus we will check to see if the project ID has been + // explicitly set and use that first. However if no project is explicitly set, such as during + // import, we will look up the ID from the compute API using the project Number from the + // bucket API response. + // If you are working in a project-agnostic way and have not set the project ID in the provider + // block, or the resource or an environment variable, we use the compute API to lookup the projectID + // from the projectNumber which is included in the bucket API response + if d.Get("project") == "" { + project, _ := tpgresource.GetProject(d, config) + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + } + if d.Get("project") == "" { + proj, err := config.NewComputeClient(userAgent).Projects.Get(strconv.FormatUint(res.ProjectNumber, 10)).Do() + if err != nil { + return err + } + log.Printf("[DEBUG] Bucket %v is in project number %v, which is project ID %s.\n", res.Name, res.ProjectNumber, proj.Name) + if err := d.Set("project", proj.Name); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + } + + // Update the bucket ID according to the resource ID + if err := d.Set("self_link", res.SelfLink); err != nil { + return fmt.Errorf("Error setting self_link: %s", err) + } + if err := d.Set("url", fmt.Sprintf("gs://%s", bucket)); err != nil { + return fmt.Errorf("Error setting url: %s", err) + } + if err := d.Set("project_number", res.ProjectNumber); err != nil { + return fmt.Errorf("Error setting project_number: %s", err) + } + if err := d.Set("storage_class", res.StorageClass); err != nil { + return fmt.Errorf("Error setting storage_class: %s", err) + } + if err := d.Set("encryption", flattenBucketEncryption(res.Encryption)); err != nil { + return fmt.Errorf("Error setting encryption: %s", err) + } + if err := d.Set("location", res.Location); err != nil { + return fmt.Errorf("Error setting location: %s", err) + } + if err := d.Set("cors", flattenCors(res.Cors)); err != nil { + return fmt.Errorf("Error setting cors: %s", err) + } + if err := d.Set("default_event_based_hold", res.DefaultEventBasedHold); err != nil { + return fmt.Errorf("Error setting default_event_based_hold: %s", err) + } + if err := d.Set("logging", flattenBucketLogging(res.Logging)); err != nil { + return fmt.Errorf("Error setting logging: %s", err) + } + if err := d.Set("enable_object_retention", flattenBucketObjectRetention(res.ObjectRetention)); err != nil { + return fmt.Errorf("Error setting object retention: %s", err) + } + if err := d.Set("versioning", flattenBucketVersioning(res.Versioning)); err != nil { + return fmt.Errorf("Error setting versioning: %s", err) + } + if err := d.Set("autoclass", flattenBucketAutoclass(res.Autoclass)); err != nil { + return fmt.Errorf("Error setting autoclass: %s", err) + } + // lifecycle_rule contains terraform only variable no_age. + // Passing config("d") to flattener function to set no_age separately. + if err := d.Set("lifecycle_rule", flattenBucketLifecycle(d, res.Lifecycle)); err != nil { + return fmt.Errorf("Error setting lifecycle_rule: %s", err) + } + if err := tpgresource.SetLabels(res.Labels, d, "labels"); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := tpgresource.SetLabels(res.Labels, d, "terraform_labels"); err != nil { + return fmt.Errorf("Error setting terraform_labels: %s", err) + } + if err := d.Set("effective_labels", res.Labels); err != nil { + return fmt.Errorf("Error setting labels: %s", err) + } + if err := d.Set("website", flattenBucketWebsite(res.Website)); err != nil { + return fmt.Errorf("Error setting website: %s", err) + } + if err := d.Set("retention_policy", flattenBucketRetentionPolicy(res.RetentionPolicy)); err != nil { + return fmt.Errorf("Error setting retention_policy: %s", err) + } + if err := d.Set("custom_placement_config", flattenBucketCustomPlacementConfig(res.CustomPlacementConfig)); err != nil { + return fmt.Errorf("Error setting custom_placement_config: %s", err) + } + // Needs to hide rpo field for single-region buckets. + // Check the Rpo field from API response to determine whether bucket is in single region config or not. + if res.Rpo != "" { + if err := d.Set("rpo", res.Rpo); err != nil { + return fmt.Errorf("Error setting RPO setting : %s", err) + } + } + if err := d.Set("soft_delete_policy", flattenBucketSoftDeletePolicy(res.SoftDeletePolicy)); err != nil { + return fmt.Errorf("Error setting soft_delete_policy: %s", err) + } + if res.IamConfiguration != nil && res.IamConfiguration.UniformBucketLevelAccess != nil { + if err := d.Set("uniform_bucket_level_access", res.IamConfiguration.UniformBucketLevelAccess.Enabled); err != nil { + return fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) + } + } else { + if err := d.Set("uniform_bucket_level_access", false); err != nil { + return fmt.Errorf("Error setting uniform_bucket_level_access: %s", err) + } + } + + if res.IamConfiguration != nil && res.IamConfiguration.PublicAccessPrevention != "" { + if err := d.Set("public_access_prevention", res.IamConfiguration.PublicAccessPrevention); err != nil { + return fmt.Errorf("Error setting public_access_prevention: %s", err) + } + } + + if res.Billing == nil { + if err := d.Set("requester_pays", nil); err != nil { + return fmt.Errorf("Error setting requester_pays: %s", err) + } + } else { + if err := d.Set("requester_pays", res.Billing.RequesterPays); err != nil { + return fmt.Errorf("Error setting requester_pays: %s", err) + } + } + + d.SetId(res.Id) + return nil +} diff --git a/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go new file mode 100644 index 000000000000..caf628772936 --- /dev/null +++ b/mmv1/third_party/terraform/services/storage/go/resource_storage_bucket_test.go @@ -0,0 +1,2636 @@ +package storage_test + +import ( + "bytes" + "fmt" + "log" + "regexp" + "testing" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + + "google.golang.org/api/googleapi" + "google.golang.org/api/storage/v1" +) + +func TestAccStorageBucket_basic(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "false"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "project", envvar.GetTestProjectFromEnv()), + resource.TestCheckResourceAttrSet( + "google_storage_bucket.bucket", "project_number"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportStateId: fmt.Sprintf("%s/%s", envvar.GetTestProjectFromEnv(), bucketName), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_basicWithAutoclass(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + var updated storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basicWithAutoclass(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basicWithAutoclass_update(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + testAccCheckStorageBucketWasUpdated(&updated, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basicWithAutoclass(bucketName, false), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_AutoclassDiffSupress(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basicWithAutoclass(bucketName,false), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basicWithAutoclass(bucketName,true), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_requesterPays(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-requester-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_requesterPays(bucketName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "requester_pays", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_lowercaseLocation(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lowercaseLocation(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_dualLocation(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_dualLocation_lowercase(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation_lowercase(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_dualLocation_versionChange(t *testing.T) { + // Test is not parallel because ENVs are set. + // Need to skip VCR as this test downloads providers from the Terraform Registry + acctest.SkipIfVcr(t) + + creds := envvar.GetTestCredsFromEnv() + project := envvar.GetTestProjectFromEnv() + t.Setenv("GOOGLE_CREDENTIALS", creds) + t.Setenv("GOOGLE_PROJECT", project) + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation(bucketName), + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "5.30.0", + Source: "hashicorp/google", + }, + }, + }, + { + ResourceName: "google_storage_bucket.bucket", + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "5.30.0", + Source: "hashicorp/google", + }, + }, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_dualLocation(bucketName), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + }, + { + ResourceName: "google_storage_bucket.bucket", + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_dualLocation_rpo(t *testing.T) { + t.Parallel() + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_dualLocation(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"ASYNC_TURBO"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "ASYNC_TURBO"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_dualLocation_rpo(bucketName,"DEFAULT"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_multiLocation_rpo(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_multiLocation_rpo(bucketName,"DEFAULT"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "rpo", "DEFAULT"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_customAttributes(t *testing.T) { + t.Parallel() + + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_lifecycleRulesMultiple(t *testing.T) { + // multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lifecycleRulesMultiple(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_lifecycleRulesMultiple_update(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_lifecycleRuleStateLive(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lifecycleRule_withStateLive(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(true), &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_lifecycleRuleStateArchived(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lifecycleRule_emptyArchived(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(nil, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_lifecycleRule_withStateArchived(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_lifecycleRuleStateAny(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lifecycleRule_withStateArchived(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_lifecycleRule_withStateLive(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(true), &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_lifecycleRule_withStateAny(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(nil, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_lifecycleRule_withStateArchived(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionState(googleapi.Bool(false), &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_lifecycleRulesVirtualFields(t *testing.T) { + t.Parallel() + var bucket storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycleVirtualFieldsUpdate1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy","lifecycle_rule.1.condition.0.no_age","lifecycle_rule.1.condition.0.send_days_since_noncurrent_time_if_zero","lifecycle_rule.2.condition.0.send_days_since_noncurrent_time_if_zero","lifecycle_rule.1.condition.0.send_days_since_custom_time_if_zero","lifecycle_rule.2.condition.0.send_days_since_custom_time_if_zero","lifecycle_rule.1.condition.0.send_num_newer_versions_if_zero","lifecycle_rule.2.condition.0.send_num_newer_versions_if_zero"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycleVirtualFieldsUpdate2(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketLifecycleConditionNoAge(nil, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy","lifecycle_rule.1.condition.0.no_age","lifecycle_rule.0.condition.0.send_days_since_noncurrent_time_if_zero","lifecycle_rule.0.condition.0.send_days_since_custom_time_if_zero","lifecycle_rule.0.condition.0.send_num_newer_versions_if_zero"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_storageClass(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + var updated storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_storageClass(bucketName, "MULTI_REGIONAL", "US"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_storageClass(bucketName, "NEARLINE", "US"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + // storage_class-only change should not recreate + testAccCheckStorageBucketWasUpdated(&updated, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_storageClass(bucketName, "REGIONAL", "US-CENTRAL1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + // Location change causes recreate + testAccCheckStorageBucketWasRecreated(&updated, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_update_requesterPays(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + var updated storage.Bucket + bucketName := fmt.Sprintf("tf-test-requester-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_requesterPays(bucketName, true), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_requesterPays(bucketName, false), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + testAccCheckStorageBucketWasUpdated(&updated, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_update(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + var recreated storage.Bucket + var updated storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "false"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &recreated), + testAccCheckStorageBucketWasRecreated(&recreated, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycle1(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + testAccCheckStorageBucketWasUpdated(&updated, &recreated), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycle2(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + testAccCheckStorageBucketWasUpdated(&updated, &recreated), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes_withLifecycle1Update(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + testAccCheckStorageBucketWasUpdated(&updated, &recreated), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &updated), + testAccCheckStorageBucketWasUpdated(&updated, &recreated), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "force_destroy", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_forceDestroy(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + Config: testAccStorageBucket_customAttributes(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketPutItem(t, bucketName), + ), + }, + { + Config: testAccStorageBucket_customAttributes(fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t))), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketMissing(t, bucketName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_forceDestroyWithVersioning(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_forceDestroyWithVersioning(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + Config: testAccStorageBucket_forceDestroyWithVersioning(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketPutItem(t, bucketName), + ), + }, + { + Config: testAccStorageBucket_forceDestroyWithVersioning(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketPutItem(t, bucketName), + ), + }, + }, + }) +} + +func TestAccStorageBucket_forceDestroyObjectDeleteError(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_forceDestroyWithRetentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketPutItem(t, bucketName), + ), + }, + { + Config: testAccStorageBucket_forceDestroyWithRetentionPolicy(bucketName), + Destroy: true, + ExpectError: regexp.MustCompile("could not delete non-empty bucket due to error when deleting contents"), + }, + { + Config: testAccStorageBucket_forceDestroy(bucketName), + }, + }, + }) +} + +func TestAccStorageBucket_enable_object_retention(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := acctest.TestBucketName(t) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_enable_object_retention(bucketName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_enable_object_retention(bucketName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_versioning(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_versioning(bucketName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.#", "1"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.0.enabled", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_versioning_empty(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.#", "1"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.0.enabled", "true"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_versioning(bucketName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.#", "1"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.0.enabled", "false"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_versioning_empty(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.#", "1"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "versioning.0.enabled", "false"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_logging(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_logging(bucketName, "log-bucket"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.#", "1"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.0.log_bucket", "log-bucket"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.0.log_object_prefix", bucketName), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_loggingWithPrefix(bucketName, "another-log-bucket", "object-prefix"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.#", "1"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.0.log_bucket", "another-log-bucket"), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.0.log_object_prefix", "object-prefix"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "logging.#", "0"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_cors(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testGoogleStorageBucketsCors(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_basic(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_defaultEventBasedHold(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_defaultEventBasedHold(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_encryption(t *testing.T) { + // when rotation is set, next rotation time is set using time.Now + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "organization": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + "random_int": acctest.RandInt(t), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_encryption(context), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_publicAccessPrevention(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_publicAccessPrevention(bucketName, "enforced"), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_uniformBucketAccessOnly(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_uniformBucketAccessOnly(bucketName, true), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_uniformBucketAccessOnly(bucketName, false), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_labels(t *testing.T) { + t.Parallel() + + bucketName := fmt.Sprintf("tf-test-acl-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + // Going from two labels + { + Config: testAccStorageBucket_updateLabels(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "labels", "terraform_labels"}, + }, + // Down to only one label (test single label deletion) + { + Config: testAccStorageBucket_labels(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "labels", "terraform_labels"}, + }, + // And make sure deleting all labels work + { + Config: testAccStorageBucket_basic(bucketName), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccStorageBucket_retentionPolicy(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_retentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketRetentionPolicy(t, bucketName), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_website(t *testing.T) { + t.Parallel() + + bucketSuffix := fmt.Sprintf("tf-website-test-%d", acctest.RandInt(t)) + errRe := regexp.MustCompile("one of\n`website.0.main_page_suffix,website.0.not_found_page` must be specified") + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_websiteNoAttributes(bucketSuffix), + ExpectError: errRe, + }, + { + Config: testAccStorageBucket_websiteOneAttribute(bucketSuffix), + }, + { + ResourceName: "google_storage_bucket.website", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_websiteOneAttributeUpdate(bucketSuffix), + }, + { + ResourceName: "google_storage_bucket.website", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_website(bucketSuffix), + }, + { + ResourceName: "google_storage_bucket.website", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_websiteRemoved(bucketSuffix), + }, + { + ResourceName: "google_storage_bucket.website", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func TestAccStorageBucket_retentionPolicyLocked(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + var newBucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_lockedRetentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + testAccCheckStorageBucketRetentionPolicy(t, bucketName), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_retentionPolicy(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &newBucket), + testAccCheckStorageBucketWasRecreated(&newBucket, &bucket), + ), + }, + }, + }) +} + +func TestAccStorageBucket_SoftDeletePolicy(t *testing.T) { + t.Parallel() + + var bucket storage.Bucket + bucketName := fmt.Sprintf("tf-test-acc-bucket-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccStorageBucketDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageBucket_basic(bucketName), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "soft_delete_policy.0.retention_duration_seconds", "604800"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_SoftDeletePolicy(bucketName,7776000), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "soft_delete_policy.0.retention_duration_seconds", "7776000"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + { + Config: testAccStorageBucket_SoftDeletePolicy(bucketName,0), + Check: resource.ComposeTestCheckFunc( + testAccCheckStorageBucketExists( + t, "google_storage_bucket.bucket", bucketName, &bucket), + resource.TestCheckResourceAttr( + "google_storage_bucket.bucket", "soft_delete_policy.0.retention_duration_seconds", "0"), + ), + }, + { + ResourceName: "google_storage_bucket.bucket", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"force_destroy"}, + }, + }, + }) +} + +func testAccCheckStorageBucketExists(t *testing.T, n string, bucketName string, bucket *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Project_ID is set") + } + + config := acctest.GoogleProviderConfig(t) + + found, err := config.NewStorageClient(config.UserAgent).Buckets.Get(rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Id != rs.Primary.ID { + return fmt.Errorf("Bucket not found") + } + + if found.Name != bucketName { + return fmt.Errorf("expected name %s, got %s", bucketName, found.Name) + } + + *bucket = *found + return nil + } +} + +func testAccCheckStorageBucketWasUpdated(newBucket *storage.Bucket, b *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + if newBucket.TimeCreated != b.TimeCreated { + return fmt.Errorf("expected storage bucket to have been updated (had same creation time), instead was recreated - old creation time %s, new creation time %s", newBucket.TimeCreated, b.TimeCreated) + } + return nil + } +} + +func testAccCheckStorageBucketWasRecreated(newBucket *storage.Bucket, b *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + if newBucket.TimeCreated == b.TimeCreated { + return fmt.Errorf("expected storage bucket to have been recreated, instead had same creation time (%s)", b.TimeCreated) + } + return nil + } +} + +func testAccCheckStorageBucketPutItem(t *testing.T, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + data := bytes.NewBufferString("test") + dataReader := bytes.NewReader(data.Bytes()) + object := &storage.Object{Name: "bucketDestroyTestFile"} + + // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails + if res, err := config.NewStorageClient(config.UserAgent).Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { + log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Objects.Insert failed: %v", err) + } + + return nil + } +} + +func testAccCheckStorageBucketRetentionPolicy(t *testing.T, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + data := bytes.NewBufferString("test") + dataReader := bytes.NewReader(data.Bytes()) + object := &storage.Object{Name: "bucketDestroyTestFile"} + + // This needs to use Media(io.Reader) call, otherwise it does not go to /upload API and fails + if res, err := config.NewStorageClient(config.UserAgent).Objects.Insert(bucketName, object).Media(dataReader).Do(); err == nil { + log.Printf("[INFO] Created object %v at location %v\n\n", res.Name, res.SelfLink) + } else { + return fmt.Errorf("Objects.Insert failed: %v", err) + } + + // Test deleting immediately, this should fail because of the 10 second retention + if err := config.NewStorageClient(config.UserAgent).Objects.Delete(bucketName, objectName).Do(); err == nil { + return fmt.Errorf("Objects.Delete succeeded: %v", object.Name) + } + + // Wait 10 seconds and delete again + time.Sleep(10000 * time.Millisecond) + + if err := config.NewStorageClient(config.UserAgent).Objects.Delete(bucketName, object.Name).Do(); err == nil { + log.Printf("[INFO] Deleted object %v at location %v\n\n", object.Name, object.SelfLink) + } else { + return fmt.Errorf("Objects.Delete failed: %v", err) + } + + return nil + } +} + +func testAccCheckStorageBucketMissing(t *testing.T, bucketName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + _, err := config.NewStorageClient(config.UserAgent).Buckets.Get(bucketName).Do() + if err == nil { + return fmt.Errorf("Found %s", bucketName) + } + + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + return nil + } + + return err + } +} + +func testAccCheckStorageBucketLifecycleConditionState(expected *bool, b *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + actual := b.Lifecycle.Rule[0].Condition.IsLive + if expected == nil && b.Lifecycle.Rule[0].Condition.IsLive == nil { + return nil + } + if expected == nil { + return fmt.Errorf("expected condition isLive to be unset, instead got %t", *actual) + } + if actual == nil { + return fmt.Errorf("expected condition isLive to be %t, instead got nil (unset)", *expected) + } + if *expected != *actual { + return fmt.Errorf("expected condition isLive to be %t, instead got %t", *expected, *actual) + } + return nil + } +} + +func testAccCheckStorageBucketLifecycleConditionNoAge(expected *int64, b *storage.Bucket) resource.TestCheckFunc { + return func(s *terraform.State) error { + actual := b.Lifecycle.Rule[1].Condition.Age + if expected == nil && b.Lifecycle.Rule[1].Condition.Age == nil { + return nil + } + if expected == nil { + return fmt.Errorf("expected condition Age to be unset, instead got %d", *actual) + } + if actual == nil { + return fmt.Errorf("expected condition Age to be %d, instead got nil (unset)", *expected) + } + if *expected != *actual { + return fmt.Errorf("expected condition Age to be %d, instead got %d", *expected, *actual) + } + return nil + } +} + +func testAccStorageBucketDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + config := acctest.GoogleProviderConfig(t) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_storage_bucket" { + continue + } + + _, err := config.NewStorageClient(config.UserAgent).Buckets.Get(rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Bucket still exists") + } + } + + return nil + } +} + +func testAccStorageBucket_basic(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" +} +`, bucketName) +} + +func testAccStorageBucket_basicWithAutoclass(bucketName string, autoclass bool) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + autoclass { + enabled = %t + } +} +`, bucketName, autoclass) +} + +func testAccStorageBucket_basicWithAutoclass_update(bucketName string, autoclass bool) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + autoclass { + enabled = %t + terminal_storage_class = "ARCHIVE" + } +} +`, bucketName, autoclass) +} + +func testAccStorageBucket_requesterPays(bucketName string, pays bool) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + requester_pays = %t + force_destroy = true +} +`, bucketName, pays) +} + +func testAccStorageBucket_lowercaseLocation(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "eu" + force_destroy = true +} +`, bucketName) +} + +func testAccStorageBucket_dualLocation(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + custom_placement_config { + data_locations = ["ASIA-EAST1", "ASIA-SOUTHEAST1"] + } +} +`, bucketName) +} + +func testAccStorageBucket_dualLocation_lowercase(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + custom_placement_config { + data_locations = ["asia-east1", "asia-southeast1"] + } +} +`, bucketName) +} + +func testAccStorageBucket_dualLocation_rpo(bucketName string,rpo string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + custom_placement_config { + data_locations = ["ASIA-EAST1", "ASIA-SOUTHEAST1"] + } + rpo = "%s" +} +`, bucketName,rpo) +} + +func testAccStorageBucket_multiLocation_rpo(bucketName string,rpo string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "ASIA" + force_destroy = true + rpo = "%s" +} +`, bucketName,rpo) +} + +func testAccStorageBucket_customAttributes(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes_withLifecycle1(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + } + } +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes_withLifecycle1Update(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 0 + } + } +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes_withLifecycle2(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + num_newer_versions = 2 + } + } +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes_withLifecycleVirtualFieldsUpdate1(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + no_age = false + days_since_noncurrent_time = 0 + send_days_since_noncurrent_time_if_zero = false + days_since_custom_time = 0 + send_days_since_custom_time_if_zero = false + num_newer_versions = 0 + send_num_newer_versions_if_zero = false + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + no_age = true + days_since_noncurrent_time = 0 + send_days_since_noncurrent_time_if_zero = true + days_since_custom_time = 0 + send_days_since_custom_time_if_zero = true + num_newer_versions = 0 + send_num_newer_versions_if_zero = true + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + send_days_since_noncurrent_time_if_zero = true + send_days_since_custom_time_if_zero = true + send_num_newer_versions_if_zero = true + } + } +} +`, bucketName) +} + +func testAccStorageBucket_customAttributes_withLifecycleVirtualFieldsUpdate2(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "EU" + force_destroy = "true" + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + no_age = false + days_since_noncurrent_time = 0 + send_days_since_noncurrent_time_if_zero = true + days_since_custom_time = 0 + send_days_since_custom_time_if_zero = true + num_newer_versions = 0 + send_num_newer_versions_if_zero = true + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + age = 10 + no_age = true + custom_time_before = "2022-09-01" + days_since_noncurrent_time = 0 + send_days_since_noncurrent_time_if_zero = false + days_since_custom_time = 0 + send_days_since_custom_time_if_zero = false + num_newer_versions = 0 + send_num_newer_versions_if_zero = false + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + custom_time_before = "2022-09-01" + send_days_since_noncurrent_time_if_zero = false + send_days_since_custom_time_if_zero = false + send_num_newer_versions_if_zero = false + } + } +} +`, bucketName) +} + +func testAccStorageBucket_storageClass(bucketName, storageClass, location string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + storage_class = "%s" + location = "%s" + force_destroy = true +} +`, bucketName, storageClass, location) +} + +func testGoogleStorageBucketsCors(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + cors { + origin = ["abc", "def"] + method = ["a1a"] + response_header = ["123", "456", "789"] + max_age_seconds = 10 + } + + cors { + origin = ["ghi", "jkl"] + method = ["z9z"] + response_header = ["000"] + max_age_seconds = 5 + } +} +`, bucketName) +} + +func testAccStorageBucket_defaultEventBasedHold(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + default_event_based_hold = true + force_destroy = true +} +`, bucketName) +} + +func testAccStorageBucket_forceDestroyWithVersioning(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = "true" + versioning { + enabled = "true" + } +} +`, bucketName) +} + +func testAccStorageBucket_enable_object_retention(bucketName string, enabled string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = "true" + enable_object_retention = "%s" +} +`, bucketName, enabled) +} + +func testAccStorageBucket_versioning(bucketName, enabled string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + versioning { + enabled = "%s" + } +} +`, bucketName, enabled) +} + +func testAccStorageBucket_versioning_empty(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true +} +`, bucketName) +} + +func testAccStorageBucket_logging(bucketName string, logBucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + logging { + log_bucket = "%s" + } +} +`, bucketName, logBucketName) +} + +func testAccStorageBucket_loggingWithPrefix(bucketName string, logBucketName string, prefix string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + logging { + log_bucket = "%s" + log_object_prefix = "%s" + } +} +`, bucketName, logBucketName, prefix) +} + +func testAccStorageBucket_lifecycleRulesMultiple(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + matches_storage_class = ["COLDLINE"] + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_storage_class = [] + age = 10 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + custom_time_before = "2019-01-01" + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + noncurrent_time_before = "2019-01-01" + } + } + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + created_before = "2019-01-01" + days_since_custom_time = 3 + } + } + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + num_newer_versions = 10 + } + } + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "ARCHIVE" + } + condition { + with_state = "ARCHIVED" + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_prefix = ["test"] + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_suffix = ["test"] + age = 2 + } + } + lifecycle_rule { + action { + type = "AbortIncompleteMultipartUpload" + } + condition { + age = 1 + } + } +} +`, bucketName) +} + +func testAccStorageBucket_lifecycleRulesMultiple_update(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + matches_storage_class = ["COLDLINE"] + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_storage_class = [] + age = 10 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + custom_time_before = "2019-01-12" + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + noncurrent_time_before = "2019-01-12" + } + } + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + created_before = "2019-01-01" + days_since_custom_time = 5 + } + } + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "NEARLINE" + } + condition { + num_newer_versions = 10 + } + } + lifecycle_rule { + action { + type = "SetStorageClass" + storage_class = "ARCHIVE" + } + condition { + with_state = "ARCHIVED" + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_prefix = ["test"] + age = 2 + } + } + lifecycle_rule { + action { + type = "Delete" + } + condition { + matches_suffix = ["test"] + age = 2 + } + } +} +`, bucketName) +} + +func testAccStorageBucket_lifecycleRule_emptyArchived(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + lifecycle_rule { + action { + type = "Delete" + } + + condition { + age = 10 + } + } +} +`, bucketName) +} + +func testAccStorageBucket_lifecycleRule_withStateArchived(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + lifecycle_rule { + action { + type = "Delete" + } + + condition { + age = 10 + with_state = "ARCHIVED" + } + } +} +`, bucketName) +} + +func testAccStorageBucket_lifecycleRule_withStateLive(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + lifecycle_rule { + action { + type = "Delete" + } + + condition { + age = 10 + with_state = "LIVE" + days_since_noncurrent_time = 5 + } + } + lifecycle_rule { + action { + type = "Delete" + } + + condition { + age = 2 + noncurrent_time_before = "2019-01-01" + } + } +} +`, bucketName) +} + +func testAccStorageBucket_lifecycleRule_withStateAny(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + lifecycle_rule { + action { + type = "Delete" + } + + condition { + age = 10 + with_state = "ANY" + } + } +} +`, bucketName) +} + +func testAccStorageBucket_labels(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + labels = { + my-label = "my-label-value" + } +} +`, bucketName) +} + +func testAccStorageBucket_uniformBucketAccessOnly(bucketName string, enabled bool) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = %t + force_destroy = true +} +`, bucketName, enabled) +} + +func testAccStorageBucket_publicAccessPrevention(bucketName string, prevention string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + public_access_prevention = "%s" + force_destroy = true +} +`, bucketName, prevention) +} + +func testAccStorageBucket_encryption(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_project" "acceptance" { + name = "tf-test-%{random_suffix}" + project_id = "tf-test-%{random_suffix}" + org_id = "%{organization}" + billing_account = "%{billing_account}" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + name = "tf-test-%{random_suffix}" + project = google_project_service.acceptance.project + location = "us" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "tf-test-%{random_suffix}" + key_ring = google_kms_key_ring.key_ring.id + rotation_period = "1000000s" +} + +data "google_storage_project_service_account" "gcs_account" { +} + +resource "google_kms_crypto_key_iam_member" "iam" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:${data.google_storage_project_service_account.gcs_account.email_address}" +} + +resource "google_storage_bucket" "bucket" { + name = "tf-test-crypto-bucket-%{random_int}" + location = "US" + force_destroy = true + encryption { + default_kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [google_kms_crypto_key_iam_member.iam] +} +`, context) +} + +func testAccStorageBucket_updateLabels(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + labels = { + my-label = "my-updated-label-value" + a-new-label = "a-new-label-value" + } +} +`, bucketName) +} + +func testAccStorageBucket_website(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "STANDARD" + force_destroy = true + + website { + main_page_suffix = "index.html" + not_found_page = "404.html" + } +} +`, bucketName) +} + +func testAccStorageBucket_retentionPolicy(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + + retention_policy { + retention_period = 10 + } +} +`, bucketName) +} + +func testAccStorageBucket_lockedRetentionPolicy(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + + retention_policy { + is_locked = true + retention_period = 10 + } +} +`, bucketName) +} + +func testAccStorageBucket_SoftDeletePolicy(bucketName string, duration int) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + + soft_delete_policy { + retention_duration_seconds = %d + } +} +`, bucketName, duration) +} + +func testAccStorageBucket_websiteNoAttributes(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "STANDARD" + force_destroy = true + + website { + } +} +`, bucketName) +} + +func testAccStorageBucket_websiteRemoved(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "STANDARD" + force_destroy = true +} +`, bucketName) +} + +func testAccStorageBucket_websiteOneAttribute(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "STANDARD" + force_destroy = true + + website { + main_page_suffix = "index.html" + } +} +`, bucketName) +} + +func testAccStorageBucket_websiteOneAttributeUpdate(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "website" { + name = "%s.gcp.tfacc.hashicorptest.com" + location = "US" + storage_class = "STANDARD" + force_destroy = true + + website { + main_page_suffix = "default.html" + } +} +`, bucketName) +} + +func testAccStorageBucket_forceDestroy(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true +} +`, bucketName) +} + +func testAccStorageBucket_forceDestroyWithRetentionPolicy(bucketName string) string { + return fmt.Sprintf(` +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + force_destroy = true + + retention_policy { + retention_period = 3600 + } +} +`, bucketName) +} diff --git a/mmv1/third_party/terraform/services/storagetransfer/go/resource_storage_transfer_job.go.tmpl b/mmv1/third_party/terraform/services/storagetransfer/go/resource_storage_transfer_job.go.tmpl new file mode 100644 index 000000000000..a901c6885194 --- /dev/null +++ b/mmv1/third_party/terraform/services/storagetransfer/go/resource_storage_transfer_job.go.tmpl @@ -0,0 +1,1313 @@ +package storagetransfer + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "google.golang.org/api/storagetransfer/v1" +) + +var ( + objectConditionsKeys = []string{ + "transfer_spec.0.object_conditions.0.min_time_elapsed_since_last_modification", + "transfer_spec.0.object_conditions.0.max_time_elapsed_since_last_modification", + "transfer_spec.0.object_conditions.0.include_prefixes", + "transfer_spec.0.object_conditions.0.exclude_prefixes", + "transfer_spec.0.object_conditions.0.last_modified_since", + "transfer_spec.0.object_conditions.0.last_modified_before", + } + + transferOptionsKeys = []string{ + "transfer_spec.0.transfer_options.0.overwrite_objects_already_existing_in_sink", + "transfer_spec.0.transfer_options.0.delete_objects_unique_in_sink", + "transfer_spec.0.transfer_options.0.delete_objects_from_source_after_transfer", + "transfer_spec.0.transfer_options.0.overwrite_when", + } + + transferSpecDataSourceKeys = []string{ + "transfer_spec.0.gcs_data_source", + "transfer_spec.0.aws_s3_data_source", + "transfer_spec.0.http_data_source", + "transfer_spec.0.azure_blob_storage_data_source", + "transfer_spec.0.posix_data_source", + } + transferSpecDataSinkKeys = []string{ + "transfer_spec.0.gcs_data_sink", + "transfer_spec.0.posix_data_sink", + } + awsS3AuthKeys = []string{ + "transfer_spec.0.aws_s3_data_source.0.aws_access_key", + "transfer_spec.0.aws_s3_data_source.0.role_arn", + } + {{- if ne $.TargetVersionName "ga" }} + azureOptionCredentials = []string{ + "transfer_spec.0.azure_blob_storage_data_source.0.azure_credentials", + "transfer_spec.0.azure_blob_storage_data_source.0.credentials_secret", + } + {{- end }} +) + +func ResourceStorageTransferJob() *schema.Resource { + return &schema.Resource{ + Create: resourceStorageTransferJobCreate, + Read: resourceStorageTransferJobRead, + Update: resourceStorageTransferJobUpdate, + Delete: resourceStorageTransferJobDelete, + Importer: &schema.ResourceImporter{ + State: resourceStorageTransferJobStateImporter, + }, + + CustomizeDiff: customdiff.All( + tpgresource.DefaultProviderProject, + ), + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The name of the Transfer Job.`, + }, + "description": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(0, 1024), + Description: `Unique description to identify the Transfer Job.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `The project in which the resource belongs. If it is not provided, the provider project is used.`, + }, + "event_stream": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"schedule"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Specifies a unique name of the resource such as AWS SQS ARN in the form 'arn:aws:sqs:region:account_id:queue_name', or Pub/Sub subscription resource name in the form 'projects/{project}/subscriptions/{sub}'", + }, + "event_stream_start_time": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies the date and time that Storage Transfer Service starts listening for events from this stream. If no start time is specified or start time is in the past, Storage Transfer Service starts listening immediately", + ValidateFunc: validation.IsRFC3339Time, + }, + "event_stream_expiration_time": { + Type: schema.TypeString, + Optional: true, + Description: "Specifies the data and time at which Storage Transfer Service stops listening for events from this stream. After this time, any transfers in progress will complete, but no new transfers are initiated", + ValidateFunc: validation.IsRFC3339Time, + }, + }, + }, + }, + "transfer_spec": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "object_conditions": objectConditionsSchema(), + "transfer_options": transferOptionsSchema(), + "source_agent_pool_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Specifies the agent pool name associated with the posix data source. When unspecified, the default name is used.`, + }, + "sink_agent_pool_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + Description: `Specifies the agent pool name associated with the posix data source. When unspecified, the default name is used.`, + }, + "gcs_data_sink": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: gcsDataSchema(), + ExactlyOneOf: transferSpecDataSinkKeys, + Description: `A Google Cloud Storage data sink.`, + }, + "posix_data_sink": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: posixDataSchema(), + ExactlyOneOf: transferSpecDataSinkKeys, + Description: `A POSIX filesystem data sink.`, + }, + "gcs_data_source": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: gcsDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, + Description: `A Google Cloud Storage data source.`, + }, + "aws_s3_data_source": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: awsS3DataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, + Description: `An AWS S3 data source.`, + }, + "http_data_source": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: httpDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, + Description: `A HTTP URL data source.`, + }, + "posix_data_source": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: posixDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, + Description: `A POSIX filesystem data source.`, + }, + "azure_blob_storage_data_source": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: azureBlobStorageDataSchema(), + ExactlyOneOf: transferSpecDataSourceKeys, + Description: `An Azure Blob Storage data source.`, + }, + }, + }, + Description: `Transfer specification.`, + }, + "notification_config": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pubsub_topic": { + Type: schema.TypeString, + Required: true, + Description: `The Topic.name of the Pub/Sub topic to which to publish notifications.`, + }, + "event_types": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringInSlice([]string{"TRANSFER_OPERATION_SUCCESS", "TRANSFER_OPERATION_FAILED", "TRANSFER_OPERATION_ABORTED"}, false), + }, + Description: `Event types for which a notification is desired. If empty, send notifications for all event types. The valid types are "TRANSFER_OPERATION_SUCCESS", "TRANSFER_OPERATION_FAILED", "TRANSFER_OPERATION_ABORTED".`, + }, + "payload_format": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice([]string{"NONE", "JSON"}, false), + Description: `The desired format of the notification message payloads. One of "NONE" or "JSON".`, + }, + }, + }, + Description: `Notification configuration.`, + }, + "schedule": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + ConflictsWith: []string{"event_stream"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "schedule_start_date": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: dateObjectSchema(), + Description: `The first day the recurring transfer is scheduled to run. If schedule_start_date is in the past, the transfer will run for the first time on the following day.`, + }, + "schedule_end_date": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: dateObjectSchema(), + Description: `The last day the recurring transfer will be run. If schedule_end_date is the same as schedule_start_date, the transfer will be executed only once.`, + }, + "start_time_of_day": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: timeObjectSchema(), + DiffSuppressFunc: diffSuppressEmptyStartTimeOfDay, + Description: `The time in UTC at which the transfer will be scheduled to start in a day. Transfers may start later than this time. If not specified, recurring and one-time transfers that are scheduled to run today will run immediately; recurring transfers that are scheduled to run on a future date will start at approximately midnight UTC on that date. Note that when configuring a transfer with the Cloud Platform Console, the transfer's start time in a day is specified in your local timezone.`, + }, + "repeat_interval": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateDuration(), + Optional: true, + Description: `Interval between the start of each scheduled transfer. If unspecified, the default value is 24 hours. This value may not be less than 1 hour. A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + Default: "86400s", + }, + }, + }, + Description: `Schedule specification defining when the Transfer Job should be scheduled to start, end and what time to run.`, + }, + "status": { + Type: schema.TypeString, + Optional: true, + Default: "ENABLED", + ValidateFunc: validation.StringInSlice([]string{"ENABLED", "DISABLED", "DELETED"}, false), + Description: `Status of the job. Default: ENABLED. NOTE: The effect of the new job status takes place during a subsequent job run. For example, if you change the job status from ENABLED to DISABLED, and an operation spawned by the transfer is running, the status change would not affect the current operation.`, + }, + "creation_time": { + Type: schema.TypeString, + Computed: true, + Description: `When the Transfer Job was created.`, + }, + "last_modification_time": { + Type: schema.TypeString, + Computed: true, + Description: `When the Transfer Job was last modified.`, + }, + "deletion_time": { + Type: schema.TypeString, + Computed: true, + Description: `When the Transfer Job was deleted.`, + }, + }, + UseJSONNumber: true, + } +} + +func objectConditionsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "min_time_elapsed_since_last_modification": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateDuration(), + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Description: `A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "max_time_elapsed_since_last_modification": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateDuration(), + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Description: `A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s".`, + }, + "include_prefixes": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Elem: &schema.Schema{ + MaxItems: 1000, + Type: schema.TypeString, + }, + Description: `If include_refixes is specified, objects that satisfy the object conditions must have names that start with one of the include_prefixes and that do not start with any of the exclude_prefixes. If include_prefixes is not specified, all objects except those that have names starting with one of the exclude_prefixes must satisfy the object conditions.`, + }, + "exclude_prefixes": { + Type: schema.TypeList, + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Elem: &schema.Schema{ + MaxItems: 1000, + Type: schema.TypeString, + }, + Description: `exclude_prefixes must follow the requirements described for include_prefixes.`, + }, + "last_modified_since": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateRFC3339Date, + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Description: `If specified, only objects with a "last modification time" on or after this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "last_modified_before": { + Type: schema.TypeString, + ValidateFunc: verify.ValidateRFC3339Date, + Optional: true, + AtLeastOneOf: objectConditionsKeys, + Description: `If specified, only objects with a "last modification time" before this timestamp and objects that don't have a "last modification time" are transferred. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + }, + }, + Description: `Only objects that satisfy these object conditions are included in the set of data source and data sink objects. Object conditions based on objects' last_modification_time do not exclude objects in a data sink.`, + } +} + +func transferOptionsSchema() *schema.Schema { + return &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "overwrite_objects_already_existing_in_sink": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: transferOptionsKeys, + Description: `Whether overwriting objects that already exist in the sink is allowed.`, + }, + "delete_objects_unique_in_sink": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: transferOptionsKeys, + ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_from_source_after_transfer"}, + Description: `Whether objects that exist only in the sink should be deleted. Note that this option and delete_objects_from_source_after_transfer are mutually exclusive.`, + }, + "delete_objects_from_source_after_transfer": { + Type: schema.TypeBool, + Optional: true, + AtLeastOneOf: transferOptionsKeys, + ConflictsWith: []string{"transfer_spec.transfer_options.delete_objects_unique_in_sink"}, + Description: `Whether objects should be deleted from the source after they are transferred to the sink. Note that this option and delete_objects_unique_in_sink are mutually exclusive.`, + }, + "overwrite_when": { + Type: schema.TypeString, + Optional: true, + AtLeastOneOf: transferOptionsKeys, + ValidateFunc: validation.StringInSlice([]string{"DIFFERENT", "NEVER", "ALWAYS"}, false), + Description: `When to overwrite objects that already exist in the sink. If not set, overwrite behavior is determined by overwriteObjectsAlreadyExistingInSink.`, + }, + }, + }, + Description: `Characteristics of how to treat files from datasource and sink during job. If the option delete_objects_unique_in_sink is true, object conditions based on objects' last_modification_time are ignored and do not exclude objects in a data source or a data sink.`, + } +} + +func timeObjectSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hours": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 24), + Description: `Hours of day in 24 hour format. Should be from 0 to 23.`, + }, + "minutes": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 59), + Description: `Minutes of hour of day. Must be from 0 to 59.`, + }, + "seconds": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 60), + Description: `Seconds of minutes of the time. Must normally be from 0 to 59.`, + }, + "nanos": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 999999999), + Description: `Fractions of seconds in nanoseconds. Must be from 0 to 999,999,999.`, + }, + }, + } +} + +func dateObjectSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "year": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 9999), + Description: `Year of date. Must be from 1 to 9999.`, + }, + + "month": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 12), + Description: `Month of year. Must be from 1 to 12.`, + }, + + "day": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 31), + Description: `Day of month. Must be from 1 to 31 and valid for the year and month.`, + }, + }, + } +} + +func gcsDataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name": { + Required: true, + Type: schema.TypeString, + Description: `Google Cloud Storage bucket name.`, + }, + "path": { + Optional: true, + Computed: true, + Type: schema.TypeString, + Description: `Google Cloud Storage path in bucket to transfer`, + }, + }, + } +} + +func awsS3DataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name": { + Required: true, + Type: schema.TypeString, + Description: `S3 Bucket name.`, + }, + "path": { + Optional: true, + Type: schema.TypeString, + Description: `S3 Bucket path in bucket to transfer.`, + }, + "aws_access_key": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "access_key_id": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + Description: `AWS Key ID.`, + }, + "secret_access_key": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + Description: `AWS Secret Access Key.`, + }, + }, + }, + ExactlyOneOf: awsS3AuthKeys, + Description: `AWS credentials block.`, + }, + "role_arn": { + Type: schema.TypeString, + Optional: true, + ExactlyOneOf: awsS3AuthKeys, + Description: `The Amazon Resource Name (ARN) of the role to support temporary credentials via 'AssumeRoleWithWebIdentity'. For more information about ARNs, see [IAM ARNs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-arns). When a role ARN is provided, Transfer Service fetches temporary credentials for the session using a 'AssumeRoleWithWebIdentity' call for the provided role using the [GoogleServiceAccount][] for this project.`, + }, + }, + } +} + +func httpDataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "list_url": { + Type: schema.TypeString, + Required: true, + Description: `The URL that points to the file that stores the object list entries. This file must allow public access. Currently, only URLs with HTTP and HTTPS schemes are supported.`, + }, + }, + } +} + +func posixDataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "root_directory": { + Type: schema.TypeString, + Required: true, + Description: `Root directory path to the filesystem.`, + }, + }, + } +} + +func azureBlobStorageDataSchema() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "storage_account": { + Required: true, + Type: schema.TypeString, + Description: `The name of the Azure Storage account.`, + }, + "container": { + Required: true, + Type: schema.TypeString, + Description: `The container to transfer from the Azure Storage account.`, + }, + "path": { + Optional: true, + Computed: true, + Type: schema.TypeString, + Description: `Root path to transfer objects. Must be an empty string or full path name that ends with a '/'. This field is treated as an object prefix. As such, it should generally not begin with a '/'.`, + }, + "azure_credentials": { + Type: schema.TypeList, + {{- if ne $.TargetVersionName "ga" }} + Optional: true, + ExactlyOneOf: azureOptionCredentials, + {{- else }} + Required: true, + {{- end }} + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "sas_token": { + Type: schema.TypeString, + Required: true, + Sensitive: true, + Description: `Azure shared access signature.`, + }, + }, + }, + Description: ` Credentials used to authenticate API requests to Azure.`, + }, + {{- if ne $.TargetVersionName "ga" }} + "credentials_secret": { + Optional: true, + Type: schema.TypeString, + Description: `The Resource name of a secret in Secret Manager containing SAS Credentials in JSON form. Service Agent must have permissions to access secret. If credentials_secret is specified, do not specify azure_credentials.`, + ExactlyOneOf: azureOptionCredentials, + }, + {{- end }} + }, + } +} + +func diffSuppressEmptyStartTimeOfDay(k, old, new string, d *schema.ResourceData) bool { + return k == "schedule.0.start_time_of_day.#" && old == "1" && new == "0" +} + +func resourceStorageTransferJobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + transferJob := &storagetransfer.TransferJob{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + ProjectId: project, + Status: d.Get("status").(string), + Schedule: expandTransferSchedules(d.Get("schedule").([]interface{})), + EventStream: expandEventStream(d.Get("event_stream").([]interface{})), + TransferSpec: expandTransferSpecs(d.Get("transfer_spec").([]interface{})), + NotificationConfig: expandTransferJobNotificationConfig(d.Get("notification_config").([]interface{})), + } + + var res *storagetransfer.TransferJob + + err = transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() error { + res, err = config.NewStorageTransferClient(userAgent).TransferJobs.Create(transferJob).Do() + return err + }, + }) + + if err != nil { + fmt.Printf("Error creating transfer job %v: %v", transferJob, err) + return err + } + + if err := d.Set("name", res.Name); err != nil { + return fmt.Errorf("Error setting name: %s", err) + } + + name := tpgresource.GetResourceNameFromSelfLink(res.Name) + d.SetId(fmt.Sprintf("%s/%s", project, name)) + + return resourceStorageTransferJobRead(d, meta) +} + +func resourceStorageTransferJobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + name := d.Get("name").(string) + res, err := config.NewStorageTransferClient(userAgent).TransferJobs.Get(name, project).Do() + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("Transfer Job %q", name)) + } + + if res.Status == "DELETED" { + d.SetId("") + return nil + } + + if err := d.Set("project", res.ProjectId); err != nil { + return fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("description", res.Description); err != nil { + return fmt.Errorf("Error setting description: %s", err) + } + if err := d.Set("status", res.Status); err != nil { + return fmt.Errorf("Error setting status: %s", err) + } + if err := d.Set("last_modification_time", res.LastModificationTime); err != nil { + return fmt.Errorf("Error setting last_modification_time: %s", err) + } + if err := d.Set("creation_time", res.CreationTime); err != nil { + return fmt.Errorf("Error setting creation_time: %s", err) + } + if err := d.Set("deletion_time", res.DeletionTime); err != nil { + return fmt.Errorf("Error setting deletion_time: %s", err) + } + + err = d.Set("schedule", flattenTransferSchedule(res.Schedule)) + if err != nil { + return err + } + + err = d.Set("event_stream", flattenTransferEventStream(res.EventStream)) + if err != nil { + return err + } + + err = d.Set("transfer_spec", flattenTransferSpec(res.TransferSpec, d)) + if err != nil { + return err + } + + err = d.Set("notification_config", flattenTransferJobNotificationConfig(res.NotificationConfig)) + if err != nil { + return err + } + + return nil +} + +func resourceStorageTransferJobUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + transferJob := &storagetransfer.TransferJob{} + fieldMask := []string{} + + if d.HasChange("event_stream") { + fieldMask = append(fieldMask, "event_stream") + if v, ok := d.GetOk("event_stream"); ok { + transferJob.EventStream = expandEventStream(v.([]interface{})) + } + } + + if d.HasChange("description") { + fieldMask = append(fieldMask, "description") + if v, ok := d.GetOk("description"); ok { + transferJob.Description = v.(string) + } + } + + if d.HasChange("status") { + fieldMask = append(fieldMask, "status") + if v, ok := d.GetOk("status"); ok { + transferJob.Status = v.(string) + } + } + + if d.HasChange("schedule") { + fieldMask = append(fieldMask, "schedule") + if v, ok := d.GetOk("schedule"); ok { + transferJob.Schedule = expandTransferSchedules(v.([]interface{})) + } + } + + if d.HasChange("transfer_spec") { + fieldMask = append(fieldMask, "transfer_spec") + if v, ok := d.GetOk("transfer_spec"); ok { + transferJob.TransferSpec = expandTransferSpecs(v.([]interface{})) + } + } + + if d.HasChange("notification_config") { + fieldMask = append(fieldMask, "notification_config") + if v, ok := d.GetOk("notification_config"); ok { + transferJob.NotificationConfig = expandTransferJobNotificationConfig(v.([]interface{})) + } else { + transferJob.NotificationConfig = nil + } + } + + if len(fieldMask) == 0 { + return nil + } + + updateRequest := &storagetransfer.UpdateTransferJobRequest{ + ProjectId: project, + TransferJob: transferJob, + } + + updateRequest.UpdateTransferJobFieldMask = strings.Join(fieldMask, ",") + + res, err := config.NewStorageTransferClient(userAgent).TransferJobs.Patch(d.Get("name").(string), updateRequest).Do() + if err != nil { + return err + } + + log.Printf("[DEBUG] Patched transfer job: %v\n\n", res.Name) + return nil +} + +func resourceStorageTransferJobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + transferJobName := d.Get("name").(string) + + transferJob := &storagetransfer.TransferJob{ + Status: "DELETED", + } + + fieldMask := "status" + + updateRequest := &storagetransfer.UpdateTransferJobRequest{ + ProjectId: project, + TransferJob: transferJob, + } + + updateRequest.UpdateTransferJobFieldMask = fieldMask + + // Update transfer job with status set to DELETE + log.Printf("[DEBUG] Setting status to DELETE for: %v\n\n", transferJobName) + err = retry.Retry(1*time.Minute, func() *retry.RetryError { + _, err := config.NewStorageTransferClient(userAgent).TransferJobs.Patch(transferJobName, updateRequest).Do() + if err != nil { + return retry.RetryableError(err) + } + + return nil + }) + + if err != nil { + fmt.Printf("Error deleting transfer job %v: %v\n\n", transferJob, err) + return err + } + + log.Printf("[DEBUG] Deleted transfer job %v\n\n", transferJob) + + return nil +} + +func resourceStorageTransferJobStateImporter(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + parts := strings.Split(d.Id(), "/") + switch len(parts) { + case 2: + if err := d.Set("project", parts[0]); err != nil { + return nil, fmt.Errorf("Error setting project: %s", err) + } + if err := d.Set("name", fmt.Sprintf("transferJobs/%s", parts[1])); err != nil { + return nil, fmt.Errorf("Error setting name: %s", err) + } + default: + return nil, fmt.Errorf("Invalid transfer job specifier. Expecting {projectId}/{transferJobName}") + } + return []*schema.ResourceData{d}, nil +} + +func expandDates(dates []interface{}) *storagetransfer.Date { + if len(dates) == 0 || dates[0] == nil { + return nil + } + + dateMap := dates[0].(map[string]interface{}) + date := &storagetransfer.Date{} + if v, ok := dateMap["day"]; ok { + date.Day = int64(v.(int)) + } + + if v, ok := dateMap["month"]; ok { + date.Month = int64(v.(int)) + } + + if v, ok := dateMap["year"]; ok { + date.Year = int64(v.(int)) + } + + log.Printf("[DEBUG] not nil date: %#v", dates) + return date +} + +func flattenDate(date *storagetransfer.Date) []map[string]interface{} { + data := map[string]interface{}{ + "year": date.Year, + "month": date.Month, + "day": date.Day, + } + + return []map[string]interface{}{data} +} + +func expandTimeOfDays(times []interface{}) *storagetransfer.TimeOfDay { + if len(times) == 0 || times[0] == nil { + return nil + } + + timeMap := times[0].(map[string]interface{}) + time := &storagetransfer.TimeOfDay{} + if v, ok := timeMap["hours"]; ok { + time.Hours = int64(v.(int)) + } + + if v, ok := timeMap["minutes"]; ok { + time.Minutes = int64(v.(int)) + } + + if v, ok := timeMap["seconds"]; ok { + time.Seconds = int64(v.(int)) + } + + if v, ok := timeMap["nanos"]; ok { + time.Nanos = int64(v.(int)) + } + + return time +} + +func flattenTimeOfDay(timeOfDay *storagetransfer.TimeOfDay) []map[string]interface{} { + data := map[string]interface{}{ + "hours": timeOfDay.Hours, + "minutes": timeOfDay.Minutes, + "seconds": timeOfDay.Seconds, + "nanos": timeOfDay.Nanos, + } + + return []map[string]interface{}{data} +} + +func expandTransferSchedules(transferSchedules []interface{}) *storagetransfer.Schedule { + if len(transferSchedules) == 0 || transferSchedules[0] == nil { + return nil + } + + schedule := transferSchedules[0].(map[string]interface{}) + return &storagetransfer.Schedule{ + ScheduleStartDate: expandDates(schedule["schedule_start_date"].([]interface{})), + ScheduleEndDate: expandDates(schedule["schedule_end_date"].([]interface{})), + StartTimeOfDay: expandTimeOfDays(schedule["start_time_of_day"].([]interface{})), + RepeatInterval: schedule["repeat_interval"].(string), + } +} + +func flattenTransferSchedule(transferSchedule *storagetransfer.Schedule) []map[string]interface{} { + if transferSchedule == nil || reflect.DeepEqual(transferSchedule, &storagetransfer.Schedule{}) { + return nil + } + + data := map[string]interface{}{ + "schedule_start_date": flattenDate(transferSchedule.ScheduleStartDate), + } + + if transferSchedule.ScheduleEndDate != nil { + data["schedule_end_date"] = flattenDate(transferSchedule.ScheduleEndDate) + } + + if transferSchedule.StartTimeOfDay != nil { + data["start_time_of_day"] = flattenTimeOfDay(transferSchedule.StartTimeOfDay) + } + + if transferSchedule.RepeatInterval != "" { + data["repeat_interval"] = transferSchedule.RepeatInterval + } + + return []map[string]interface{}{data} +} + +func expandEventStream(e []interface{}) *storagetransfer.EventStream { + if len(e) == 0 || e[0] == nil { + return nil + } + + eventStream := e[0].(map[string]interface{}) + return &storagetransfer.EventStream{ + Name: eventStream["name"].(string), + EventStreamStartTime: eventStream["event_stream_start_time"].(string), + EventStreamExpirationTime: eventStream["event_stream_expiration_time"].(string), + } +} + +func flattenTransferEventStream(eventStream *storagetransfer.EventStream) []map[string]interface{} { + if eventStream == nil || reflect.DeepEqual(eventStream, &storagetransfer.EventStream{}) { + return nil + } + + data := map[string]interface{}{ + "name": eventStream.Name, + } + + if eventStream.EventStreamStartTime != "" { + data["event_stream_start_time"] = eventStream.EventStreamStartTime + } + + if eventStream.EventStreamExpirationTime != "" { + data["event_stream_expiration_time"] = eventStream.EventStreamExpirationTime + } + + return []map[string]interface{}{data} +} + +func expandGcsData(gcsDatas []interface{}) *storagetransfer.GcsData { + if len(gcsDatas) == 0 || gcsDatas[0] == nil { + return nil + } + + gcsData := gcsDatas[0].(map[string]interface{}) + var apiData = &storagetransfer.GcsData{ + BucketName: gcsData["bucket_name"].(string), + } + var path = gcsData["path"].(string) + apiData.Path = path + + return apiData +} + +func flattenGcsData(gcsData *storagetransfer.GcsData) []map[string]interface{} { + data := map[string]interface{}{ + "bucket_name": gcsData.BucketName, + "path": gcsData.Path, + } + return []map[string]interface{}{data} +} + +func expandAwsAccessKeys(awsAccessKeys []interface{}) *storagetransfer.AwsAccessKey { + if len(awsAccessKeys) == 0 || awsAccessKeys[0] == nil { + return nil + } + + awsAccessKey := awsAccessKeys[0].(map[string]interface{}) + return &storagetransfer.AwsAccessKey{ + AccessKeyId: awsAccessKey["access_key_id"].(string), + SecretAccessKey: awsAccessKey["secret_access_key"].(string), + } +} + +func flattenAwsAccessKeys(d *schema.ResourceData) []map[string]interface{} { + data := map[string]interface{}{ + "access_key_id": d.Get("transfer_spec.0.aws_s3_data_source.0.aws_access_key.0.access_key_id"), + "secret_access_key": d.Get("transfer_spec.0.aws_s3_data_source.0.aws_access_key.0.secret_access_key"), + } + + return []map[string]interface{}{data} +} + +func expandAwsS3Data(awsS3Datas []interface{}) *storagetransfer.AwsS3Data { + if len(awsS3Datas) == 0 || awsS3Datas[0] == nil { + return nil + } + + awsS3Data := awsS3Datas[0].(map[string]interface{}) + return &storagetransfer.AwsS3Data{ + BucketName: awsS3Data["bucket_name"].(string), + AwsAccessKey: expandAwsAccessKeys(awsS3Data["aws_access_key"].([]interface{})), + RoleArn: awsS3Data["role_arn"].(string), + Path: awsS3Data["path"].(string), + } +} + +func flattenAwsS3Data(awsS3Data *storagetransfer.AwsS3Data, d *schema.ResourceData) []map[string]interface{} { + data := map[string]interface{}{ + "bucket_name": awsS3Data.BucketName, + "path": awsS3Data.Path, + "role_arn": awsS3Data.RoleArn, + } + if _, exist := d.GetOkExists("transfer_spec.0.aws_s3_data_source.0.aws_access_key"); exist{ + data["aws_access_key"] = flattenAwsAccessKeys(d) + } + return []map[string]interface{}{data} +} + +func expandHttpData(httpDatas []interface{}) *storagetransfer.HttpData { + if len(httpDatas) == 0 || httpDatas[0] == nil { + return nil + } + + httpData := httpDatas[0].(map[string]interface{}) + return &storagetransfer.HttpData{ + ListUrl: httpData["list_url"].(string), + } +} + +func flattenHttpData(httpData *storagetransfer.HttpData) []map[string]interface{} { + data := map[string]interface{}{ + "list_url": httpData.ListUrl, + } + + return []map[string]interface{}{data} +} + +func expandPosixData(posixDatas []interface{}) *storagetransfer.PosixFilesystem { + if len(posixDatas) == 0 || posixDatas[0] == nil { + return nil + } + + posixData := posixDatas[0].(map[string]interface{}) + return &storagetransfer.PosixFilesystem{ + RootDirectory: posixData["root_directory"].(string), + } +} + +func flattenPosixData(posixData *storagetransfer.PosixFilesystem) []map[string]interface{} { + data := map[string]interface{}{ + "root_directory": posixData.RootDirectory, + } + + return []map[string]interface{}{data} +} + +func expandAzureCredentials(azureCredentials []interface{}) *storagetransfer.AzureCredentials { + if len(azureCredentials) == 0 || azureCredentials[0] == nil { + return nil + } + + azureCredential := azureCredentials[0].(map[string]interface{}) + return &storagetransfer.AzureCredentials{ + SasToken: azureCredential["sas_token"].(string), + } +} + +func flattenAzureCredentials(d *schema.ResourceData) []map[string]interface{} { + {{- if ne $.TargetVersionName "ga" }} + if d.Get("transfer_spec.0.azure_blob_storage_data_source.0.azure_credentials.0.sas_token") == "" { + return []map[string]interface{}{} + } + {{- end }} + data := map[string]interface{}{ + "sas_token": d.Get("transfer_spec.0.azure_blob_storage_data_source.0.azure_credentials.0.sas_token"), + } + + return []map[string]interface{}{data} +} + +func expandAzureBlobStorageData(azureBlobStorageDatas []interface{}) *storagetransfer.AzureBlobStorageData { + if len(azureBlobStorageDatas) == 0 || azureBlobStorageDatas[0] == nil { + return nil + } + + azureBlobStorageData := azureBlobStorageDatas[0].(map[string]interface{}) + + return &storagetransfer.AzureBlobStorageData{ + Container: azureBlobStorageData["container"].(string), + Path: azureBlobStorageData["path"].(string), + StorageAccount: azureBlobStorageData["storage_account"].(string), + AzureCredentials: expandAzureCredentials(azureBlobStorageData["azure_credentials"].([]interface{})), + {{- if ne $.TargetVersionName "ga" }} + CredentialsSecret: azureBlobStorageData["credentials_secret"].(string), + {{- end }} + } +} + +func flattenAzureBlobStorageData(azureBlobStorageData *storagetransfer.AzureBlobStorageData, d *schema.ResourceData) []map[string]interface{} { + data := map[string]interface{}{ + "container": azureBlobStorageData.Container, + "path": azureBlobStorageData.Path, + "storage_account": azureBlobStorageData.StorageAccount, + "azure_credentials": flattenAzureCredentials(d), + {{- if ne $.TargetVersionName "ga" }} + "credentials_secret": azureBlobStorageData.CredentialsSecret, + {{- end }} + } + + return []map[string]interface{}{data} +} + +func expandObjectConditions(conditions []interface{}) *storagetransfer.ObjectConditions { + if len(conditions) == 0 || conditions[0] == nil { + return nil + } + + condition := conditions[0].(map[string]interface{}) + return &storagetransfer.ObjectConditions{ + ExcludePrefixes: tpgresource.ConvertStringArr(condition["exclude_prefixes"].([]interface{})), + IncludePrefixes: tpgresource.ConvertStringArr(condition["include_prefixes"].([]interface{})), + MaxTimeElapsedSinceLastModification: condition["max_time_elapsed_since_last_modification"].(string), + MinTimeElapsedSinceLastModification: condition["min_time_elapsed_since_last_modification"].(string), + LastModifiedSince: condition["last_modified_since"].(string), + LastModifiedBefore: condition["last_modified_before"].(string), + } +} + +func flattenObjectCondition(condition *storagetransfer.ObjectConditions) []map[string]interface{} { + data := map[string]interface{}{ + "exclude_prefixes": condition.ExcludePrefixes, + "include_prefixes": condition.IncludePrefixes, + "max_time_elapsed_since_last_modification": condition.MaxTimeElapsedSinceLastModification, + "min_time_elapsed_since_last_modification": condition.MinTimeElapsedSinceLastModification, + "last_modified_since": condition.LastModifiedSince, + "last_modified_before": condition.LastModifiedBefore, + } + return []map[string]interface{}{data} +} + +func expandTransferOptions(options []interface{}) *storagetransfer.TransferOptions { + if len(options) == 0 || options[0] == nil { + return nil + } + + option := options[0].(map[string]interface{}) + return &storagetransfer.TransferOptions{ + DeleteObjectsFromSourceAfterTransfer: option["delete_objects_from_source_after_transfer"].(bool), + DeleteObjectsUniqueInSink: option["delete_objects_unique_in_sink"].(bool), + OverwriteObjectsAlreadyExistingInSink: option["overwrite_objects_already_existing_in_sink"].(bool), + OverwriteWhen: option["overwrite_when"].(string), + } +} + +func flattenTransferOption(option *storagetransfer.TransferOptions) []map[string]interface{} { + data := map[string]interface{}{ + "delete_objects_from_source_after_transfer": option.DeleteObjectsFromSourceAfterTransfer, + "delete_objects_unique_in_sink": option.DeleteObjectsUniqueInSink, + "overwrite_objects_already_existing_in_sink": option.OverwriteObjectsAlreadyExistingInSink, + "overwrite_when": option.OverwriteWhen, + } + + return []map[string]interface{}{data} +} + +func expandTransferSpecs(transferSpecs []interface{}) *storagetransfer.TransferSpec { + if len(transferSpecs) == 0 || transferSpecs[0] == nil { + return nil + } + + transferSpec := transferSpecs[0].(map[string]interface{}) + return &storagetransfer.TransferSpec{ + SourceAgentPoolName: transferSpec["source_agent_pool_name"].(string), + SinkAgentPoolName: transferSpec["sink_agent_pool_name"].(string), + GcsDataSink: expandGcsData(transferSpec["gcs_data_sink"].([]interface{})), + PosixDataSink: expandPosixData(transferSpec["posix_data_sink"].([]interface{})), + ObjectConditions: expandObjectConditions(transferSpec["object_conditions"].([]interface{})), + TransferOptions: expandTransferOptions(transferSpec["transfer_options"].([]interface{})), + GcsDataSource: expandGcsData(transferSpec["gcs_data_source"].([]interface{})), + AwsS3DataSource: expandAwsS3Data(transferSpec["aws_s3_data_source"].([]interface{})), + HttpDataSource: expandHttpData(transferSpec["http_data_source"].([]interface{})), + AzureBlobStorageDataSource: expandAzureBlobStorageData(transferSpec["azure_blob_storage_data_source"].([]interface{})), + PosixDataSource: expandPosixData(transferSpec["posix_data_source"].([]interface{})), + } +} + +func flattenTransferSpec(transferSpec *storagetransfer.TransferSpec, d *schema.ResourceData) []map[string]interface{} { + + data := map[string]interface{}{} + + data["sink_agent_pool_name"] = transferSpec.SinkAgentPoolName + data["source_agent_pool_name"] = transferSpec.SourceAgentPoolName + + if transferSpec.GcsDataSink != nil { + data["gcs_data_sink"] = flattenGcsData(transferSpec.GcsDataSink) + } + if transferSpec.PosixDataSink != nil { + data["posix_data_sink"] = flattenPosixData(transferSpec.PosixDataSink) + } + + if transferSpec.ObjectConditions != nil { + data["object_conditions"] = flattenObjectCondition(transferSpec.ObjectConditions) + } + if transferSpec.TransferOptions != nil && + (usingPosix(transferSpec) == false || + (usingPosix(transferSpec) == true && reflect.DeepEqual(transferSpec.TransferOptions, &storagetransfer.TransferOptions{}) == false)) { + data["transfer_options"] = flattenTransferOption(transferSpec.TransferOptions) + } + if transferSpec.GcsDataSource != nil { + data["gcs_data_source"] = flattenGcsData(transferSpec.GcsDataSource) + } else if transferSpec.AwsS3DataSource != nil { + data["aws_s3_data_source"] = flattenAwsS3Data(transferSpec.AwsS3DataSource, d) + } else if transferSpec.HttpDataSource != nil { + data["http_data_source"] = flattenHttpData(transferSpec.HttpDataSource) + } else if transferSpec.AzureBlobStorageDataSource != nil { + data["azure_blob_storage_data_source"] = flattenAzureBlobStorageData(transferSpec.AzureBlobStorageDataSource, d) + } else if transferSpec.PosixDataSource != nil { + data["posix_data_source"] = flattenPosixData(transferSpec.PosixDataSource) + } + + return []map[string]interface{}{data} +} + +func usingPosix(transferSpec *storagetransfer.TransferSpec) bool { + return transferSpec.PosixDataSource != nil || transferSpec.PosixDataSink != nil +} + +func expandTransferJobNotificationConfig(notificationConfigs []interface{}) *storagetransfer.NotificationConfig { + if len(notificationConfigs) == 0 || notificationConfigs[0] == nil { + return nil + } + + notificationConfig := notificationConfigs[0].(map[string]interface{}) + var apiData = &storagetransfer.NotificationConfig{ + PayloadFormat: notificationConfig["payload_format"].(string), + PubsubTopic: notificationConfig["pubsub_topic"].(string), + } + + if notificationConfig["event_types"] != nil { + apiData.EventTypes = tpgresource.ConvertStringArr(notificationConfig["event_types"].(*schema.Set).List()) + } + + log.Printf("[DEBUG] apiData: %v\n\n", apiData) + return apiData +} + +func flattenTransferJobNotificationConfig(notificationConfig *storagetransfer.NotificationConfig) []map[string]interface{} { + if notificationConfig == nil { + return nil + } + + data := map[string]interface{}{ + "payload_format": notificationConfig.PayloadFormat, + "pubsub_topic": notificationConfig.PubsubTopic, + } + + if notificationConfig.EventTypes != nil { + data["event_types"] = tpgresource.ConvertStringArrToInterface(notificationConfig.EventTypes) + } + + return []map[string]interface{}{data} +} diff --git a/mmv1/third_party/terraform/services/tags/go/resource_tags_location_tag_bindings.go.tmpl b/mmv1/third_party/terraform/services/tags/go/resource_tags_location_tag_bindings.go.tmpl new file mode 100644 index 000000000000..6a68145bbe7a --- /dev/null +++ b/mmv1/third_party/terraform/services/tags/go/resource_tags_location_tag_bindings.go.tmpl @@ -0,0 +1,388 @@ +package tags + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func ResourceTagsLocationTagBinding() *schema.Resource { + return &schema.Resource{ + Create: resourceTagsLocationTagBindingCreate, + Read: resourceTagsLocationTagBindingRead, + Delete: resourceTagsLocationTagBindingDelete, + + Importer: &schema.ResourceImporter{ + State: resourceTagsLocationTagBindingImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "parent": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The full resource name of the resource the TagValue is bound to. E.g. //cloudresourcemanager.googleapis.com/projects/123`, + }, + "tag_value": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The TagValue of the TagBinding. Must be of the form tagValues/456.`, + }, + "location": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + Description: `The geographic location where the transfer config should reside. +Examples: US, EU, asia-northeast1. The default value is US.`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `The generated id for the TagBinding. This is a string of the form: 'tagBindings/{full-resource-name}/{tag-value-name}'`, + }, + }, + UseJSONNumber: true, + } +} + +func resourceTagsLocationTagBindingCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + parentProp, err := expandNestedTagsLocationTagBindingParent(d.Get("parent"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("parent"); !tpgresource.IsEmptyValue(reflect.ValueOf(parentProp)) && (ok || !reflect.DeepEqual(v, parentProp)) { + obj["parent"] = parentProp + } + tagValueProp, err := expandNestedTagsLocationTagBindingTagValue(d.Get("tag_value"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("tag_value"); !tpgresource.IsEmptyValue(reflect.ValueOf(tagValueProp)) && (ok || !reflect.DeepEqual(v, tagValueProp)) { + obj["tagValue"] = tagValueProp + } + + lockName, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{"{{"}}parent{{"}}"}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}TagsLocationBasePath{{"}}"}}tagBindings") + log.Printf("url for TagsLocation: %s", url) + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new LocationTagBinding: %#v", obj) + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return fmt.Errorf("Error creating LocationTagBinding: %s", err) + } + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + + var opRes map[string]interface{} + err = TagsLocationOperationWaitTimeWithResponse( + config, res, &opRes, "Creating LocationTagBinding", userAgent, + d.Timeout(schema.TimeoutCreate)) + + if err != nil { + d.SetId("") + return fmt.Errorf("Error waiting to create LocationTagBinding: %s", err) + } + + if _, ok := opRes["tagBindings"]; ok { + opRes, err = flattenNestedTagsLocationTagBinding(d, meta, opRes) + if err != nil { + return fmt.Errorf("Error getting nested object from operation response: %s", err) + } + if opRes == nil { + // Object isn't there any more - remove it from the state. + d.SetId("") + return fmt.Errorf("Error decoding response from operation, could not find nested object") + } + } + if err := d.Set("name", flattenNestedTagsLocationTagBindingName(opRes["name"], d, config)); err != nil { + return err + } + + id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}location{{"}}"}}/{{"{{"}}name{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating LocationTagBinding %q: %#v", d.Id(), res) + + return resourceTagsLocationTagBindingRead(d, meta) +} + +func resourceTagsLocationTagBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}TagsLocationBasePath{{"}}"}}tagBindings/?parent={{"{{"}}parent{{"}}"}}&pageSize=300") + if err != nil { + return err + } + + billingProject := "" + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + } + log.Printf("[DEBUG] Skipping res with name for import = %#v,)", res) + + p, ok := res["tagBindings"] + if !ok || p == nil { + return nil + } + pView := p.([]interface{}) + + //if there are more than 300 bindings - handling pagination over here + if pageToken, ok := res["nextPageToken"].(string); ok { + for pageToken != "" { + url, err = transport_tpg.AddQueryParams(url, map[string]string{"pageToken": fmt.Sprintf("%s", res["nextPageToken"])}) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + } + resp, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("TagsLocationTagBinding %q", d.Id())) + } + if resp == nil { + d.SetId("") + return nil + } + v, ok := resp["tagBindings"] + if !ok || v == nil { + return nil + } + pView = append(pView, v.([]interface{})...) + if token, ok := res["nextPageToken"]; ok { + pageToken = token.(string) + } else { + pageToken = "" + } + } + } + + newMap := make(map[string]interface{}, 1) + newMap["tagBindings"] = pView + + res, err = flattenNestedTagsLocationTagBinding(d, meta, newMap) + if err != nil { + return err + } + + if err := d.Set("name", flattenNestedTagsLocationTagBindingName(res["name"], d, config)); err != nil { + return fmt.Errorf("Error reading LocationTagBinding: %s", err) + } + if err := d.Set("parent", flattenNestedTagsLocationTagBindingParent(res["parent"], d, config)); err != nil { + return fmt.Errorf("Error reading LocationTagBinding: %s", err) + } + if err := d.Set("tag_value", flattenNestedTagsLocationTagBindingTagValue(res["tagValue"], d, config)); err != nil { + return fmt.Errorf("Error reading LocationTagBinding: %s", err) + } + + return nil +} + +func resourceTagsLocationTagBindingDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + lockName, err := tpgresource.ReplaceVars(d, config, "tagBindings/{{"{{"}}parent{{"}}"}}") + if err != nil { + return err + } + transport_tpg.MutexStore.Lock(lockName) + defer transport_tpg.MutexStore.Unlock(lockName) + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}TagsLocationBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting LocationTagBinding %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutDelete), + }) + if err != nil { + return transport_tpg.HandleNotFoundError(err, d, "LocationTagBinding") + } + + err = TagsLocationOperationWaitTime( + config, res, "Deleting LocationTagBinding", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting LocationTagBinding %q: %#v", d.Id(), res) + return nil +} + +func resourceTagsLocationTagBindingImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*transport_tpg.Config) + if err := tpgresource.ParseImportId([]string{"(?P[^/]+)/tagBindings/(?P[^/]+)/tagValues/(?P[^/]+)"}, d, config); err != nil { + return nil, err + } + + parent := d.Get("parent").(string) + parentProper := strings.ReplaceAll(parent, "%2F", "/") + d.Set("parent", parentProper) + d.Set("name", fmt.Sprintf("tagBindings/%s/tagValues/%s", parent, d.Get("tag_value").(string))) + id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}location{{"}}"}}/{{"{{"}}name{{"}}"}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenNestedTagsLocationTagBindingName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedTagsLocationTagBindingParent(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenNestedTagsLocationTagBindingTagValue(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func expandNestedTagsLocationTagBindingParent(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandNestedTagsLocationTagBindingTagValue(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func flattenNestedTagsLocationTagBinding(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { + var v interface{} + var ok bool + + v, ok = res["tagBindings"] + if !ok || v == nil { + return nil, nil + } + + switch v.(type) { + case []interface{}: + log.Printf("[DEBUG] Hey it's in break = %#v,)", v) + break + case map[string]interface{}: + // Construct list out of single nested resource + v = []interface{}{v} + default: + return nil, fmt.Errorf("expected list or map for value tagBindings. Actual value: %v", v) + } + + _, item, err := resourceTagsLocationTagBindingFindNestedObjectInList(d, meta, v.([]interface{})) + if err != nil { + return nil, err + } + return item, nil +} + +func resourceTagsLocationTagBindingFindNestedObjectInList(d *schema.ResourceData, meta interface{}, items []interface{}) (index int, item map[string]interface{}, err error) { + expectedName := d.Get("name") + expectedFlattenedName := flattenNestedTagsLocationTagBindingName(expectedName, d, meta.(*transport_tpg.Config)) + + // Search list for this resource. + for idx, itemRaw := range items { + if itemRaw == nil { + continue + } + + item := itemRaw.(map[string]interface{}) + itemName := flattenNestedTagsLocationTagBindingName(item["name"], d, meta.(*transport_tpg.Config)) + // IsEmptyValue check so that if one is nil and the other is "", that's considered a match + if !(tpgresource.IsEmptyValue(reflect.ValueOf(itemName)) && tpgresource.IsEmptyValue(reflect.ValueOf(expectedFlattenedName))) && !reflect.DeepEqual(itemName, expectedFlattenedName) { + log.Printf("[DEBUG] Skipping item with name= %#v, looking for %#v)", itemName, expectedFlattenedName) + continue + } + return idx, item, nil + } + return -1, nil, nil +} diff --git a/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types.go.tmpl b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types.go.tmpl new file mode 100644 index 000000000000..a0bc2d1cde43 --- /dev/null +++ b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types.go.tmpl @@ -0,0 +1,95 @@ +package tpuv2 + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceTpuV2AcceleratorTypes() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTpuV2AcceleratorTypesRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceTpuV2AcceleratorTypesRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}TpuV2BasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}zone{{"}}"}}/acceleratorTypes") + if err != nil { + return err + } + + typesRaw, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenTpuV2AcceleratorTypes) + if err != nil { + return fmt.Errorf("error listing TPU v2 accelerator types: %s", err) + } + + types := make([]string, len(typesRaw)) + for i, typeRaw := range typesRaw { + types[i] = typeRaw.(string) + } + sort.Strings(types) + + log.Printf("[DEBUG] Received Google TPU v2 accelerator types: %q", types) + + if err := d.Set("types", types); err != nil { + return fmt.Errorf("error setting types: %s", err) + } + if err := d.Set("zone", zone); err != nil { + return fmt.Errorf("error setting zone: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s", project, zone)) + + return nil +} + +func flattenTpuV2AcceleratorTypes(resp map[string]interface{}) []interface{} { + typeObjList := resp["acceleratorTypes"].([]interface{}) + types := make([]interface{}, len(typeObjList)) + for i, typ := range typeObjList { + typeObj := typ.(map[string]interface{}) + types[i] = typeObj["type"] + } + return types +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types_test.go.tmpl b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types_test.go.tmpl new file mode 100644 index 000000000000..48a3b43f8e82 --- /dev/null +++ b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_accelerator_types_test.go.tmpl @@ -0,0 +1,72 @@ +package tpuv2_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccTpuV2AcceleratorTypes_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccTpuV2AcceleratorTypesConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckTpuV2AcceleratorTypes("data.google_tpu_v2_accelerator_types.available"), + ), + }, + }, + }) +} + +func testAccCheckTpuV2AcceleratorTypes(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("can't find TPU v2 accelerator types data source: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("data source id not set") + } + + count, ok := rs.Primary.Attributes["types.#"] + if !ok { + return errors.New("can't find 'types' attribute") + } + + cnt, err := strconv.Atoi(count) + if err != nil { + return errors.New("failed to read number of types") + } + if cnt < 2 { + return fmt.Errorf("expected at least 2 types, received %d, this is most likely a bug", cnt) + } + + for i := 0; i < cnt; i++ { + idx := fmt.Sprintf("types.%d", i) + _, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("expected %q, type not found", idx) + } + } + return nil + } +} + +const testAccTpuV2AcceleratorTypesConfig = ` +data "google_tpu_v2_accelerator_types" "available" { + provider = google-beta +} +` +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions.go.tmpl b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions.go.tmpl new file mode 100644 index 000000000000..2a365a0f00d2 --- /dev/null +++ b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions.go.tmpl @@ -0,0 +1,95 @@ +package tpuv2 + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "fmt" + "log" + "sort" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceTpuV2RuntimeVersions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTpuV2RuntimeVersionsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "zone": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceTpuV2RuntimeVersionsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return err + } + + zone, err := tpgresource.GetZone(d, config) + if err != nil { + return err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}TpuV2BasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}zone{{"}}"}}/runtimeVersions") + if err != nil { + return err + } + + versionsRaw, err := tpgresource.PaginatedListRequest(project, url, userAgent, config, flattenTpuV2RuntimeVersions) + if err != nil { + return fmt.Errorf("error listing TPU v2 runtime versions: %s", err) + } + + versions := make([]string, len(versionsRaw)) + for i, ver := range versionsRaw { + versions[i] = ver.(string) + } + sort.Strings(versions) + + log.Printf("[DEBUG] Received Google TPU v2 runtime versions: %q", versions) + + if err := d.Set("versions", versions); err != nil { + return fmt.Errorf("error setting versions: %s", err) + } + if err := d.Set("zone", zone); err != nil { + return fmt.Errorf("error setting zone: %s", err) + } + if err := d.Set("project", project); err != nil { + return fmt.Errorf("error setting project: %s", err) + } + d.SetId(fmt.Sprintf("projects/%s/zones/%s", project, zone)) + + return nil +} + +func flattenTpuV2RuntimeVersions(resp map[string]interface{}) []interface{} { + verObjList := resp["runtimeVersions"].([]interface{}) + versions := make([]interface{}, len(verObjList)) + for i, v := range verObjList { + verObj := v.(map[string]interface{}) + versions[i] = verObj["version"] + } + return versions +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions_test.go.tmpl b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions_test.go.tmpl new file mode 100644 index 000000000000..cf73db89e4c4 --- /dev/null +++ b/mmv1/third_party/terraform/services/tpuv2/go/data_source_tpu_v2_runtime_versions_test.go.tmpl @@ -0,0 +1,72 @@ +package tpuv2_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "errors" + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccTpuV2RuntimeVersions_basic(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccTpuV2RuntimeVersionsConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckTpuV2RuntimeVersions("data.google_tpu_v2_runtime_versions.available"), + ), + }, + }, + }) +} + +func testAccCheckTpuV2RuntimeVersions(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("can't find TPU v2 runtime versions data source: %s", n) + } + + if rs.Primary.ID == "" { + return errors.New("data source id not set") + } + + count, ok := rs.Primary.Attributes["versions.#"] + if !ok { + return errors.New("can't find 'versions' attribute") + } + + cnt, err := strconv.Atoi(count) + if err != nil { + return errors.New("failed to read number of versions") + } + if cnt < 2 { + return fmt.Errorf("expected at least 2 versions, received %d, this is most likely a bug", cnt) + } + + for i := 0; i < cnt; i++ { + idx := fmt.Sprintf("versions.%d", i) + _, ok := rs.Primary.Attributes[idx] + if !ok { + return fmt.Errorf("expected %q, version not found", idx) + } + } + return nil + } +} + +const testAccTpuV2RuntimeVersionsConfig = ` +data "google_tpu_v2_runtime_versions" "available" { + provider = google-beta +} +` +{{- end }} diff --git a/mmv1/third_party/terraform/services/tpuv2/go/resource_tpu_v2_vm_test.go.tmpl b/mmv1/third_party/terraform/services/tpuv2/go/resource_tpu_v2_vm_test.go.tmpl new file mode 100644 index 000000000000..db3405ed03c0 --- /dev/null +++ b/mmv1/third_party/terraform/services/tpuv2/go/resource_tpu_v2_vm_test.go.tmpl @@ -0,0 +1,168 @@ +package tpuv2_test + +{{ if ne $.TargetVersionName `ga` -}} +import ( + "testing" + "strconv" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccTpuV2Vm_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckTpuV2VmDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccTpuV2Vm_full(context), + }, + { + ResourceName: "google_tpu_v2_vm.tpu", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "zone"}, + }, + { + Config: testAccTpuV2Vm_update(context, true), + }, + { + ResourceName: "google_tpu_v2_vm.tpu", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "zone"}, + }, + { + Config: testAccTpuV2Vm_update(context, false), + }, + { + ResourceName: "google_tpu_v2_vm.tpu", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels", "zone"}, + }, + }, + }) +} + +func testAccTpuV2Vm_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_tpu_v2_vm" "tpu" { + provider = google-beta + + name = "tf-test-tpu-%{random_suffix}" + zone = "us-central1-c" + description = "Text description of the TPU." + + runtime_version = "tpu-vm-tf-2.13.0" + accelerator_type = "v2-8" + + scheduling_config { + preemptible = true + } + + data_disks { + source_disk = google_compute_disk.disk.id + mode = "READ_ONLY" + } + + labels = { + foo = "bar" + } + + metadata = { + foo = "bar" + } + + tags = ["foo"] + + lifecycle { + prevent_destroy = true + } +} + +resource "google_compute_disk" "disk" { + provider = google-beta + + name = "tf-test-tpu-disk-%{random_suffix}" + image = "debian-cloud/debian-11" + size = 10 + type = "pd-ssd" + zone = "us-central1-c" +} +`, context) +} + +func testAccTpuV2Vm_update(context map[string]interface{}, preventDestroy bool) string { + context["prevent_destroy"] = strconv.FormatBool(preventDestroy) + + return acctest.Nprintf(` +resource "google_tpu_v2_vm" "tpu" { + provider = google-beta + + name = "tf-test-tpu-%{random_suffix}" + zone = "us-central1-c" + description = "Text description of the TPU updated." + + runtime_version = "tpu-vm-tf-2.13.0" + accelerator_type = "v2-8" + + scheduling_config { + preemptible = true + } + + data_disks { + source_disk = google_compute_disk.disk.id + mode = "READ_WRITE" + } + + data_disks { + source_disk = google_compute_disk.disk2.id + mode = "READ_ONLY" + } + + labels = { + baz = "bar" + } + + metadata = { + baz = "bar" + } + + tags = ["baz"] + + lifecycle { + prevent_destroy = %{prevent_destroy} + } +} + +resource "google_compute_disk" "disk" { + provider = google-beta + + name = "tf-test-tpu-disk-%{random_suffix}" + image = "debian-cloud/debian-11" + size = 10 + type = "pd-ssd" + zone = "us-central1-c" +} + +resource "google_compute_disk" "disk2" { + provider = google-beta + + name = "tf-test-tpu-disk2-%{random_suffix}" + image = "debian-cloud/debian-11" + size = 10 + type = "pd-ssd" + zone = "us-central1-c" +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/vertexai/go/iam_vertex_endpoint_test.go.tmpl b/mmv1/third_party/terraform/services/vertexai/go/iam_vertex_endpoint_test.go.tmpl new file mode 100644 index 000000000000..f4c9dfb56400 --- /dev/null +++ b/mmv1/third_party/terraform/services/vertexai/go/iam_vertex_endpoint_test.go.tmpl @@ -0,0 +1,363 @@ +package vertexai_test + +{{ if ne $.TargetVersionName `ga` -}} + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccVertexAIEndpointIamBinding(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIEndpointIamBinding_basic(context), + }, + { + ResourceName: "google_vertex_ai_endpoint_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/endpoints/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-endpoint-name%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccVertexAIEndpointIamBinding_update(context), + }, + { + ResourceName: "google_vertex_ai_endpoint_iam_binding.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/endpoints/%s roles/viewer", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-endpoint-name%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccVertexAIEndpointIamMember(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccVertexAIEndpointIamMember_basic(context), + }, + { + ResourceName: "google_vertex_ai_endpoint_iam_member.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/endpoints/%s roles/viewer user:admin@hashicorptest.com", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-endpoint-name%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccVertexAIEndpointIamPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/viewer", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIEndpointIamPolicy_basic(context), + Check: resource.TestCheckResourceAttrSet("data.google_vertex_ai_endpoint_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_vertex_ai_endpoint_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/endpoints/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-endpoint-name%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccVertexAIEndpointIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_vertex_ai_endpoint_iam_policy.foo", + ImportStateId: fmt.Sprintf("projects/%s/locations/%s/endpoints/%s", envvar.GetTestProjectFromEnv(), envvar.GetTestRegionFromEnv(), fmt.Sprintf("tf-test-endpoint-name%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccVertexAIEndpointIamMember_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "tf-test-endpoint-name%{random_suffix}" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + region = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.vertex_network.name}" + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.vertex_network.id +} + +resource "google_compute_network" "vertex_network" { + name = "tf-test-network-name%{random_suffix}" +} + +data "google_project" "project" {} + +resource "google_vertex_ai_endpoint_iam_member" "foo" { +project = google_vertex_ai_endpoint.endpoint.project +location = google_vertex_ai_endpoint.endpoint.location +endpoint = google_vertex_ai_endpoint.endpoint.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccVertexAIEndpointIamPolicy_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "tf-test-endpoint-name%{random_suffix}" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + region = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.vertex_network.name}" + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.vertex_network.id +} + +resource "google_compute_network" "vertex_network" { + name = "tf-test-network-name%{random_suffix}" +} + +data "google_project" "project" {} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } +} + +resource "google_vertex_ai_endpoint_iam_policy" "foo" { +project = google_vertex_ai_endpoint.endpoint.project +location = google_vertex_ai_endpoint.endpoint.location +endpoint = google_vertex_ai_endpoint.endpoint.name + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_vertex_ai_endpoint_iam_policy" "foo" { +project = google_vertex_ai_endpoint.endpoint.project +location = google_vertex_ai_endpoint.endpoint.location +endpoint = google_vertex_ai_endpoint.endpoint.name + depends_on = [ + google_vertex_ai_endpoint_iam_policy.foo + ] +} +`, context) +} + +func testAccVertexAIEndpointIamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "tf-test-endpoint-name%{random_suffix}" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + region = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.vertex_network.name}" + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.vertex_network.id +} + +resource "google_compute_network" "vertex_network" { + name = "tf-test-network-name%{random_suffix}" +} + +data "google_project" "project" {} + +data "google_iam_policy" "foo" { +} + +resource "google_vertex_ai_endpoint_iam_policy" "foo" { +project = google_vertex_ai_endpoint.endpoint.project +location = google_vertex_ai_endpoint.endpoint.location +endpoint = google_vertex_ai_endpoint.endpoint.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccVertexAIEndpointIamBinding_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "tf-test-endpoint-name%{random_suffix}" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + region = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.vertex_network.name}" + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.vertex_network.id +} + +resource "google_compute_network" "vertex_network" { + name = "tf-test-network-name%{random_suffix}" +} + +data "google_project" "project" {} + +resource "google_vertex_ai_endpoint_iam_binding" "foo" { + project = google_vertex_ai_endpoint.endpoint.project + location = google_vertex_ai_endpoint.endpoint.location + endpoint = google_vertex_ai_endpoint.endpoint.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccVertexAIEndpointIamBinding_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "tf-test-endpoint-name%{random_suffix}" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + region = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${google_compute_network.vertex_network.name}" + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = google_compute_network.vertex_network.id +} + +resource "google_compute_network" "vertex_network" { + name = "tf-test-network-name%{random_suffix}" +} + +data "google_project" "project" {} + +resource "google_vertex_ai_endpoint_iam_binding" "foo" { +project = google_vertex_ai_endpoint.endpoint.project +location = google_vertex_ai_endpoint.endpoint.location +endpoint = google_vertex_ai_endpoint.endpoint.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} + +{{ end }} diff --git a/mmv1/third_party/terraform/services/vertexai/go/resource_vertex_ai_metadata_store_test.go.tmpl b/mmv1/third_party/terraform/services/vertexai/go/resource_vertex_ai_metadata_store_test.go.tmpl new file mode 100644 index 000000000000..a1109bbbe47c --- /dev/null +++ b/mmv1/third_party/terraform/services/vertexai/go/resource_vertex_ai_metadata_store_test.go.tmpl @@ -0,0 +1,93 @@ +package vertexai_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "fmt" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "testing" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccVertexAIMetadataStore_vertexAiMetadataStoreExample(t *testing.T) { + t.Parallel() + + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + name := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckVertexAIMetadataStoreDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIMetadataStore_vertexAiMetadataStoreExample(name, kms.CryptoKey.Name), + }, + { + ResourceName: "google_vertex_ai_metadata_store.store", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} + +func testAccVertexAIMetadataStore_vertexAiMetadataStoreExample(name, kmsKey string) string { + return fmt.Sprintf(` +resource "google_vertex_ai_metadata_store" "store" { + name = "%s" + description = "Magic" + region = "us-central1" + encryption_spec { + kms_key_name = "%s" + } +} +`, name, kmsKey) +} + +func testAccCheckVertexAIMetadataStoreDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_vertex_ai_metadata_store" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}VertexAIBasePath{{"}}"}}{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("VertexAIMetadataStore still exists at %s", url) + } + } + + return nil + } +} + + +{{ end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/vertexai/go/vertex_ai_operation.go.tmpl b/mmv1/third_party/terraform/services/vertexai/go/vertex_ai_operation.go.tmpl new file mode 100644 index 000000000000..30f0f3f30d73 --- /dev/null +++ b/mmv1/third_party/terraform/services/vertexai/go/vertex_ai_operation.go.tmpl @@ -0,0 +1,77 @@ +package vertexai + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +type VertexAIOperationWaiter struct { + Config *transport_tpg.Config + UserAgent string + Project string + tpgresource.CommonOperationWaiter +} + +func (w *VertexAIOperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + + region := tpgresource.GetRegionFromRegionalSelfLink(w.CommonOperationWaiter.Op.Name) + + // Returns the proper get. +{{- if eq $.TargetVersionName "ga" }} + url := fmt.Sprintf("https://%s-aiplatform.googleapis.com/v1/%s", region, w.CommonOperationWaiter.Op.Name) +{{- else }} + url := fmt.Sprintf("https://%s-aiplatform.googleapis.com/v1beta1/%s", region, w.CommonOperationWaiter.Op.Name) +{{- end }} + + return transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: w.Config, + Method: "GET", + Project: w.Project, + RawURL: url, + UserAgent: w.UserAgent, + }) +} + +func createVertexAIWaiter(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string) (*VertexAIOperationWaiter, error) { + w := &VertexAIOperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func VertexAIOperationWaitTimeWithResponse(config *transport_tpg.Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createVertexAIWaiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := tpgresource.OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func VertexAIOperationWaitTime(config *transport_tpg.Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createVertexAIWaiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return tpgresource.OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_shielded_config_test.go b/mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_shielded_config_test.go new file mode 100644 index 000000000000..c3e3eb7a30c9 --- /dev/null +++ b/mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_shielded_config_test.go @@ -0,0 +1,228 @@ +package workbench_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccWorkbenchInstance_shielded_config_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_shielded_config_false(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_shielded_config_remove(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_none(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_shielded_config_double_apply(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_shielded_config_none(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_none(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_false(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_false(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_shielded_config_true(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_shielded_config_true(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + shielded_instance_config { + enable_secure_boot = true + enable_vtpm = true + enable_integrity_monitoring = true + } + } +} +`, context) +} + +func testAccWorkbenchInstance_shielded_config_false(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = false + enable_integrity_monitoring = false + } + } + +} +`, context) +} + +func testAccWorkbenchInstance_shielded_config_none(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_test.go b/mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_test.go new file mode 100644 index 000000000000..a931960089f2 --- /dev/null +++ b/mmv1/third_party/terraform/services/workbench/go/resource_workbench_instance_test.go @@ -0,0 +1,626 @@ +package workbench_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccWorkbenchInstance_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_update(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" +} +`, context) +} + +func testAccWorkbenchInstance_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + machine_type = "n1-standard-16" + + accelerator_configs{ + type = "NVIDIA_TESLA_T4" + core_count = 1 + } + + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = true + enable_integrity_monitoring = false + } + + boot_disk { + disk_size_gb = 310 + } + + data_disks { + disk_size_gb = 330 + } + + metadata = { + terraform = "true" + } + + } + + labels = { + k = "val" + } + +} +`, context) +} + +func TestAccWorkbenchInstance_updateGpu(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basicGpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_updateGpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_basicGpu(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + machine_type = "n1-standard-1" // cant be e2 because of accelerator + accelerator_configs { + type = "NVIDIA_TESLA_T4" + core_count = 1 + } + + } +} +`, context) +} + +func testAccWorkbenchInstance_updateGpu(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + machine_type = "n1-standard-16" + + accelerator_configs{ + type = "NVIDIA_TESLA_P4" + core_count = 1 + } + + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = true + enable_integrity_monitoring = false + } + + } + + labels = { + k = "val" + } + +} +`, context) +} + +func TestAccWorkbenchInstance_removeGpu(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_Gpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_updateGpu(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_Gpu(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + machine_type = "n1-standard-1" // cant be e2 because of accelerator + accelerator_configs { + type = "NVIDIA_TESLA_T4" + core_count = 1 + } + + } +} +`, context) +} + +func testAccWorkbenchInstance_removeGpu(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + machine_type = "n1-standard-16" + + } + +} +`, context) +} + +func TestAccWorkbenchInstance_updateMetadata(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_updateMetadata(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_updateMetadata(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + metadata = { + terraform = "true" + } + } + + labels = { + k = "val" + } + +} +`, context) +} + +func TestAccWorkbenchInstance_updateState(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateState(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "STOPPED"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_updateState(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + desired_state = "STOPPED" + +} +`, context) +} + +func TestAccWorkbenchInstance_empty_accelerator(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_empty_accelerator(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkbenchInstance_empty_accelerator(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_empty_accelerator(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + + gce_setup { + accelerator_configs{ + } + } +} +`, context) +} + +func TestAccWorkbenchInstance_updateBootDisk(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateBootDisk(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_updateDataDisk(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateDataDisk(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func TestAccWorkbenchInstance_updateBothDisks(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkbenchInstance_basic(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + { + Config: testAccWorkbenchInstance_updateBothDisks(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "google_workbench_instance.instance", "state", "ACTIVE"), + ), + }, + { + ResourceName: "google_workbench_instance.instance", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "instance_owners", "location", "instance_id", "request_id", "labels", "terraform_labels","desired_state"}, + }, + }, + }) +} + +func testAccWorkbenchInstance_updateBootDisk(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + boot_disk { + disk_size_gb = 310 + } + } +} +`, context) +} + +func testAccWorkbenchInstance_updateDataDisk(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + data_disks { + disk_size_gb = 330 + } + } +} +`, context) +} + +func testAccWorkbenchInstance_updateBothDisks(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_workbench_instance" "instance" { + name = "tf-test-workbench-instance%{random_suffix}" + location = "us-central1-a" + gce_setup { + boot_disk { + disk_size_gb = 310 + } + + data_disks { + disk_size_gb = 330 + } + } +} +`, context) +} diff --git a/mmv1/third_party/terraform/services/workflows/go/resource_workflows_workflow_test.go b/mmv1/third_party/terraform/services/workflows/go/resource_workflows_workflow_test.go new file mode 100644 index 000000000000..521c81231a90 --- /dev/null +++ b/mmv1/third_party/terraform/services/workflows/go/resource_workflows_workflow_test.go @@ -0,0 +1,222 @@ +package workflows_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/workflows" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccWorkflowsWorkflow_Update(t *testing.T) { + // Custom test written to test diffs + t.Parallel() + + workflowName := fmt.Sprintf("tf-test-acc-workflow-%d", acctest.RandInt(t)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckWorkflowsWorkflowDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkflowsWorkflow_Update(workflowName), + }, + { + Config: testAccWorkflowsWorkflow_Updated(workflowName), + }, + }, + }) +} + +func testAccWorkflowsWorkflow_Update(name string) string { + return fmt.Sprintf(` +resource "google_workflows_workflow" "example" { + name = "%s" + region = "us-central1" + description = "Magic" + call_log_level = "LOG_ERRORS_ONLY" + user_env_vars = { + url = "https://timeapi.io/api/Time/current/zone?timeZone=Europe/Amsterdam" + } + source_contents = <<-EOF + # This is a sample workflow, feel free to replace it with your source code + # + # This workflow does the following: + # - reads current time and date information from an external API and stores + # the response in CurrentDateTime variable + # - retrieves a list of Wikipedia articles related to the day of the week + # from CurrentDateTime + # - returns the list of articles as an output of the workflow + # FYI, In terraform you need to escape the $$ or it will cause errors. + + - getCurrentTime: + call: http.get + args: + url: $${sys.get_env("url")} + result: CurrentDateTime + - readWikipedia: + call: http.get + args: + url: https://en.wikipedia.org/w/api.php + query: + action: opensearch + search: $${CurrentDateTime.body.dayOfTheWeek} + result: WikiResult + - returnOutput: + return: $${WikiResult.body[1]} +EOF +} +`, name) +} + +func testAccWorkflowsWorkflow_Updated(name string) string { + return fmt.Sprintf(` +resource "google_workflows_workflow" "example" { + name = "%s" + region = "us-central1" + description = "Magic" + call_log_level = "LOG_ERRORS_ONLY" + user_env_vars = { + url = "https://timeapi.io/api/Time/current/zone?timeZone=Europe/Amsterdam" + } + source_contents = <<-EOF + # This is a sample workflow, feel free to replace it with your source code + # + # This workflow does the following: + # - reads current time and date information from an external API and stores + # the response in CurrentDateTime variable + # - retrieves a list of Wikipedia articles related to the day of the week + # from CurrentDateTime + # - returns the list of articles as an output of the workflow + # FYI, In terraform you need to escape the $$ or it will cause errors. + + - getCurrentTime: + call: http.get + args: + url: $${sys.get_env("url")} + result: CurrentDateTime + - readWikipedia: + call: http.get + args: + url: https:/fi.wikipedia.org/w/api.php + query: + action: opensearch + search: $${CurrentDateTime.body.dayOfTheWeek} + result: WikiResult + - returnOutput: + return: $${WikiResult.body[1]} +EOF +} +`, name) +} + +func TestWorkflowsWorkflowStateUpgradeV0(t *testing.T) { + t.Parallel() + + cases := map[string]struct { + Attributes map[string]interface{} + Expected map[string]string + Meta interface{} + }{ + "shorten long name": { + Attributes: map[string]interface{}{ + "name": "projects/my-project/locations/us-central1/workflows/my-workflow", + }, + Expected: map[string]string{ + "name": "my-workflow", + }, + Meta: &transport_tpg.Config{}, + }, + "short name stays": { + Attributes: map[string]interface{}{ + "name": "my-workflow", + }, + Expected: map[string]string{ + "name": "my-workflow", + }, + Meta: &transport_tpg.Config{}, + }, + } + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + actual, err := workflows.ResourceWorkflowsWorkflowUpgradeV0(context.Background(), tc.Attributes, tc.Meta) + + if err != nil { + t.Error(err) + } + + for _, expectedName := range tc.Expected { + if actual["name"] != expectedName { + t.Errorf("expected: name -> %#v\n got: name -> %#v\n in: %#v", + expectedName, actual["name"], actual) + } + } + }) + } +} + +func TestAccWorkflowsWorkflow_CMEK(t *testing.T) { + // Custom test written to test diffs + t.Parallel() + + workflowName := fmt.Sprintf("tf-test-acc-workflow-%d", acctest.RandInt(t)) + kms := acctest.BootstrapKMSKeyInLocation(t, "us-central1") + if acctest.BootstrapPSARole(t, "service-", "gcp-sa-workflows", "roles/cloudkms.cryptoKeyEncrypterDecrypter") { + t.Fatal("Stopping the test because a role was added to the policy.") + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckWorkflowsWorkflowDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkflowsWorkflow_CMEK(workflowName, kms.CryptoKey.Name), + }, + }, + }) +} + +func testAccWorkflowsWorkflow_CMEK(workflowName, kmsKeyName string) string { + return fmt.Sprintf(` +resource "google_workflows_workflow" "example" { + name = "%s" + region = "us-central1" + description = "Magic" + crypto_key_name = "%s" + source_contents = <<-EOF + # This is a sample workflow, feel free to replace it with your source code + # + # This workflow does the following: + # - reads current time and date information from an external API and stores + # the response in CurrentDateTime variable + # - retrieves a list of Wikipedia articles related to the day of the week + # from CurrentDateTime + # - returns the list of articles as an output of the workflow + # FYI, In terraform you need to escape the $$ or it will cause errors. + + - getCurrentTime: + call: http.get + args: + url: https://us-central1-workflowsample.cloudfunctions.net/datetime + result: CurrentDateTime + - readWikipedia: + call: http.get + args: + url: https:/fi.wikipedia.org/w/api.php + query: + action: opensearch + search: $${CurrentDateTime.body.dayOfTheWeek} + result: WikiResult + - returnOutput: + return: $${WikiResult.body[1]} +EOF +} +`, workflowName, kmsKeyName) +} + diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_cluster_test.go.tmpl new file mode 100644 index 000000000000..66ed1107491b --- /dev/null +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_cluster_test.go.tmpl @@ -0,0 +1,151 @@ +package workstations_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccWorkstationsWorkstationCluster_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationCluster_workstationClusterBasicExample(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkstationsWorkstationCluster_update(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func TestAccWorkstationsWorkstationCluster_Private_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationClusterDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationCluster_workstationClusterPrivateExample(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkstationsWorkstationCluster_private_update(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationCluster_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = google-beta +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} +`, context) +} + +func testAccWorkstationsWorkstationCluster_private_update(context map[string]interface{}) string { + return acctest.Nprintf(` +data "google_project" "project" { + provider = google-beta +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + private_cluster_config { + allowed_projects = ["${data.google_project.project.project_id}"] + enable_private_endpoint = true + } + + labels = { + foo = "bar" + } +} + +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl new file mode 100644 index 000000000000..576a21ca1262 --- /dev/null +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl @@ -0,0 +1,1295 @@ +package workstations_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccWorkstationsWorkstationConfig_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_basic(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + labels = { + foo = "bar" + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_displayName(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "display_name": "Display Name N", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_displayName(context, ""), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_displayName(context, "2"), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_displayName(context map[string]interface{}, update string) string { + context["display_name"] = context["display_name"].(string) + update + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + display_name = "%{display_name} %{random_suffix}" + + labels = { + foo = "bar" + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_persistentDirectories(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_persistentDirectories(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_persistentDirectories(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + persistent_directories { + mount_path = "/home" + } + + labels = { + foo = "bar" + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_ephemeralDirectories(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_ephemeralDirectories(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_ephemeralDirectories(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_compute_disk" "test_source_disk" { + provider = google-beta + name = "tf-test-workstation-source-disk%{random_suffix}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "test_source_snapshot" { + provider = google-beta + name = "tf-test-workstation-source-snapshot%{random_suffix}" + source_disk = google_compute_disk.test_source_disk.name + zone = "us-central1-a" +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + ephemeral_directories { + mount_path = "/cache" + gce_pd { + source_snapshot = google_compute_snapshot.test_source_snapshot.id + read_only = true + } + } + + labels = { + foo = "bar" + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_ephemeralDirectories_withSourceImage(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_ephemeralDirectories_withSourceImage(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_ephemeralDirectories_withSourceImage(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_compute_disk" "test_source_disk" { + provider = google-beta + name = "tf-test-workstation-source-disk%{random_suffix}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_image" "test_source_image" { + provider = google-beta + name = "tf-test-workstation-source-image%{random_suffix}" + source_disk = google_compute_disk.test_source_disk.name + storage_locations = ["us-central1"] +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + ephemeral_directories { + mount_path = "/cache" + gce_pd { + disk_type = "pd-standard" + source_image = google_compute_image.test_source_image.id + read_only = true + } + } + + labels = { + foo = "bar" + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_serviceAccount(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_serviceAccount(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_serviceAccount(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name + } + + resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + } + + resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" + } + + resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + enable_audit_agent = true + + host { + gce_instance { + service_account = google_service_account.default.email + service_account_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + } + } + } +`, context) +} + +func TestAccWorkstationsWorkstationConfig_boost(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_boost(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_boost(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name + } + + resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + } + + resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + host { + gce_instance { + boost_configs { + id = "boost-1" + machine_type = "n1-standard-2" + accelerators { + type = "nvidia-tesla-t4" + count = 1 + } + } + boost_configs { + id = "boost-2" + machine_type = "n1-standard-2" + pool_size = 2 + boot_disk_size_gb = 30 + enable_nested_virtualization = true + } + } + } + } +`, context) +} + +func TestAccWorkstationsWorkstationConfig_disableTcpConnections(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_disableTcpConnections(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_disableTcpConnections(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name + } + + resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + } + + resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" + } + + resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + disable_tcp_connections = true + + host { + gce_instance { + service_account = google_service_account.default.email + service_account_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + } + } + } +`, context) +} + + +func TestAccWorkstationsWorkstationConfig_readinessChecks(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_readinessChecks(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_readinessChecks(context map[string]interface{}) string { + return acctest.Nprintf(` + resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false + } + + resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name + } + + resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + } + + resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" + } + + resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + readiness_checks { + path = "/" + port = 80 + } + + host { + gce_instance { + service_account = google_service_account.default.email + service_account_scopes = ["https://www.googleapis.com/auth/cloud-platform"] + } + } + } +`, context) +} + +func TestAccWorkstationsWorkstationConfig_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_workstationConfigBasicExample(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_update(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_workstationConfigBasicExample(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "annotations", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + host { + gce_instance { + machine_type = "n1-standard-4" + boot_disk_size_gb = 35 + disable_public_ip_addresses = true + enable_nested_virtualization = true + } + } + + labels = { + foo = "bar" + } + + lifecycle { + prevent_destroy = true + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_updateHostDetails(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_updateHostDetailsDefault(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_updateHostDetailsUpdated(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_updateHostDetailsUnsetInstanceConfigs(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_updateHostDetailsDefault(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + host { + gce_instance { + machine_type = "e2-standard-2" + boot_disk_size_gb = 35 + pool_size = 0 + + service_account = google_service_account.default.email + disable_public_ip_addresses = false + + shielded_instance_config { + enable_secure_boot = false + enable_vtpm = false + enable_integrity_monitoring = false + } + + confidential_instance_config { + enable_confidential_compute = false + } + } + } +} +`, context) +} + +func testAccWorkstationsWorkstationConfig_updateHostDetailsUpdated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +# No longer explicitly used in google_workstations_workstation_config resource block below, but the +# service account needs to keep existing to allow the field to default from the API without error +resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + host { + gce_instance { + machine_type = "n2d-standard-2" + boot_disk_size_gb = 35 + pool_size = 1 + + disable_public_ip_addresses = true + tags = ["foo", "bar"] + + shielded_instance_config { + enable_secure_boot = true + enable_vtpm = true + enable_integrity_monitoring = true + } + + confidential_instance_config { + enable_confidential_compute = true + } + + boost_configs { + id = "boost-1" + machine_type = "n2d-standard-2" + } + } + } +} +`, context) +} + +func testAccWorkstationsWorkstationConfig_updateHostDetailsUnsetInstanceConfigs(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +# No longer explicitly used in google_workstations_workstation_config resource block below, but the +# service account needs to keep existing to allow the field to default from the API without error +resource "google_service_account" "default" { + provider = google-beta + + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Service Account" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + host { + gce_instance { + machine_type = "n2d-standard-2" + boot_disk_size_gb = 35 + pool_size = 1 + + disable_public_ip_addresses = true + tags = ["foo", "bar"] + + shielded_instance_config {} + confidential_instance_config {} + } + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_updateWorkingDir(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_withCustomWorkingDir(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_unsetWorkingDir(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_withCustomWorkingDir(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + container { + image = "us-central1-docker.pkg.dev/cloud-workstations-images/predefined/code-oss:latest" + working_dir = "/test" + } +} +`, context) +} + +func testAccWorkstationsWorkstationConfig_unsetWorkingDir(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + container { + image = "us-central1-docker.pkg.dev/cloud-workstations-images/predefined/code-oss:latest" + } +} +`, context) +} + +func TestAccWorkstationsWorkstationConfig_updatePersistentDirectorySourceSnapshot(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_withSourceDiskSnapshot(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + { + Config: testAccWorkstationsWorkstationConfig_withUpdatedSourceDiskSnapshot(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_withSourceDiskSnapshot(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_compute_disk" "test_source_disk" { + provider = google-beta + name = "tf-test-workstation-source-disk%{random_suffix}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "test_source_snapshot" { + provider = google-beta + name = "tf-test-workstation-source-snapshot%{random_suffix}" + source_disk = google_compute_disk.test_source_disk.name + zone = "us-central1-a" +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + persistent_directories { + mount_path = "/home" + + gce_pd { + source_snapshot = google_compute_snapshot.test_source_snapshot.id + reclaim_policy = "DELETE" + } + } +} +`, context) +} + +func testAccWorkstationsWorkstationConfig_withUpdatedSourceDiskSnapshot(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_compute_disk" "test_source_disk" { + provider = google-beta + name = "tf-test-workstation-source-disk%{random_suffix}" + size = 10 + type = "pd-ssd" + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "test_source_snapshot" { + provider = google-beta + name = "tf-test-workstation-source-snapshot%{random_suffix}" + source_disk = google_compute_disk.test_source_disk.name + zone = "us-central1-a" +} + +resource "google_compute_snapshot" "test_source_snapshot2" { + provider = google-beta + name = "tf-test-workstation-source-snapshot2%{random_suffix}" + source_disk = google_compute_disk.test_source_disk.name + zone = "us-central1-a" +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + persistent_directories { + mount_path = "/home" + + gce_pd { + source_snapshot = google_compute_snapshot.test_source_snapshot2.id + reclaim_policy = "RETAIN" + } + } +} +`, context) +} +{{- end }} diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_test.go.tmpl new file mode 100644 index 000000000000..d92dcc634aad --- /dev/null +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_test.go.tmpl @@ -0,0 +1,150 @@ +package workstations_test +{{- if ne $.TargetVersionName "ga" }} + +import ( + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccWorkstationsWorkstation_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstation_basic(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + { + Config: testAccWorkstationsWorkstation_modified(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "labels", "terraform_labels"}, + }, + }, + }) +} + + +func testAccWorkstationsWorkstation_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation" "default" { + workstation_id = "tf-test-workstation%{random_suffix}" + workstation_config_id = google_workstations_workstation_config.default.workstation_config_id + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + labels = { + foo = "bar" + } + + env = { + name = "bar" + } +} +`, context) +} + +func testAccWorkstationsWorkstation_modified(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_network" "default" { + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation_config" "default" { + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + labels = { + foo = "bar" + } +} + +resource "google_workstations_workstation" "default" { + workstation_id = "tf-test-workstation%{random_suffix}" + workstation_config_id = google_workstations_workstation_config.default.workstation_config_id + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + display_name = "workstation%{random_suffix}" + + labels = { + foo = "bar" + } + + env = { + name = "test" + } +} +`, context) +} +{{- end }} From 18baeb9b331e9f7a65dd0c04abc03880ed7a4081 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Thu, 20 Jun 2024 12:52:54 -0700 Subject: [PATCH 179/356] make index_configs O+C in google_logging_organization_bucket_config (#11000) --- .../terraform/services/logging/resource_logging_bucket_config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/third_party/terraform/services/logging/resource_logging_bucket_config.go b/mmv1/third_party/terraform/services/logging/resource_logging_bucket_config.go index 78258975e396..6aa57eb95346 100644 --- a/mmv1/third_party/terraform/services/logging/resource_logging_bucket_config.go +++ b/mmv1/third_party/terraform/services/logging/resource_logging_bucket_config.go @@ -93,6 +93,7 @@ See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/ro Type: schema.TypeSet, MaxItems: 20, Optional: true, + Computed: true, Description: `A list of indexed fields and related configuration data.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ From 340f756f9cc26e0151fed90ad4b17b624e490322 Mon Sep 17 00:00:00 2001 From: hao-nan-li <100219545+hao-nan-li@users.noreply.github.com> Date: Thu, 20 Jun 2024 14:21:45 -0700 Subject: [PATCH 180/356] Update dcl dependencies to v1.68.0 (#11009) --- mmv1/third_party/terraform/go.mod.erb | 2 +- mmv1/third_party/terraform/go.sum | 4 ++-- tpgtools/go.mod | 2 +- tpgtools/go.sum | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index 1e6bde117210..eca77fa58285 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -5,7 +5,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.24.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index cd2fce52ae52..30534d3e3b1c 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -18,8 +18,8 @@ cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodE dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 h1:LIPIYi4hy7ttUSrziY/TYwMDuEvvV593n80kRmz6nZ4= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= diff --git a/tpgtools/go.mod b/tpgtools/go.mod index 67f7b6571bd9..5e9a41311a90 100644 --- a/tpgtools/go.mod +++ b/tpgtools/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( bitbucket.org/creachadair/stringset v0.0.11 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 github.com/golang/glog v1.1.2 github.com/hashicorp/hcl v1.0.0 github.com/kylelemons/godebug v1.1.0 diff --git a/tpgtools/go.sum b/tpgtools/go.sum index 0e90e5744d3a..9a48751b2af4 100644 --- a/tpgtools/go.sum +++ b/tpgtools/go.sum @@ -6,8 +6,8 @@ cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdi cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 h1:FBKsgWIOEdtpx2YuF+aBH33K0Ih25D3xuKyp9peH4jc= -github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 h1:LIPIYi4hy7ttUSrziY/TYwMDuEvvV593n80kRmz6nZ4= +github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0/go.mod h1:pL2Qt5HT+x6xrTd806oMiM3awW6kNIXB/iiuClz6m6k= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= From 1a794995ea685e6bce2a0d06b0bd660c29c07846 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Thu, 20 Jun 2024 14:43:40 -0700 Subject: [PATCH 181/356] added set_hash_func to copy script in go rewrite (#11011) --- mmv1/description-copy.go | 15 +++++++++++---- mmv1/main.go | 1 + .../templates/terraform/yaml_conversion_field.erb | 2 +- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/mmv1/description-copy.go b/mmv1/description-copy.go index 5e16c0758e4d..6e5a97657eba 100644 --- a/mmv1/description-copy.go +++ b/mmv1/description-copy.go @@ -92,12 +92,19 @@ func CopyText(identifier string) { for scanner.Scan() { line := scanner.Text() if firstLine { - if line != "NOT CONVERTED - RUN YAML MODE" { - // log.Printf("skipping %s", goPath) - break - } else { + if line == "NOT CONVERTED - RUN YAML MODE" { + firstLine = false + w.WriteString(fmt.Sprintf("%s\n", "NOT CONVERTED #2 - RUN YAML MODE")) + continue + } else if line == "NOT CONVERTED #2 - RUN YAML MODE" { firstLine = false + w.WriteString(fmt.Sprintf("%s\n", "NOT CONVERTED #3 - RUN YAML MODE")) continue + } else if line == "NOT CONVERTED #3 - RUN YAML MODE" { + firstLine = false + continue + } else { + break } } if strings.Contains(line, identifier) { diff --git a/mmv1/main.go b/mmv1/main.go index 284ab3383b0c..82ed82ec160f 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -43,6 +43,7 @@ func main() { if *yamlMode { CopyText("description:") CopyText("note:") + CopyText("set_hash_func:") } if *templateMode { diff --git a/mmv1/templates/terraform/yaml_conversion_field.erb b/mmv1/templates/terraform/yaml_conversion_field.erb index e2b69f12eeb0..3d9b5d39cf1a 100644 --- a/mmv1/templates/terraform/yaml_conversion_field.erb +++ b/mmv1/templates/terraform/yaml_conversion_field.erb @@ -130,7 +130,7 @@ state_func: '<%= property.state_func %>' <% end -%> <% unless property.set_hash_func.nil? -%> - set_hash_func: '<%= property.set_hash_func %>' + set_hash_func: <% end -%> <% unless property.custom_flatten.nil? -%> custom_flatten: '<%= object.convert_go_file( property.custom_flatten )%>' From 57e00f48d84b9b88945fb51f1ff1aba4c9981927 Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Fri, 21 Jun 2024 15:24:50 +0000 Subject: [PATCH 182/356] Add support for ```logging.googleapis.com/LogBucket``` to TGC (#11008) --- mmv1/provider/terraform_tgc.rb | 6 +- mmv1/templates/tgc/resource_converters.go.erb | 2 + .../logging_billing_account_bucket_config.go | 175 ++++++++++++++++++ .../tgc/logging_project_bucket_config.go | 175 ++++++++++++++++++ ...logging_billing_account_bucket_config.json | 20 ++ ...e_logging_billing_account_bucket_config.tf | 19 ++ ..._google_logging_project_bucket_config.json | 20 ++ ...le_google_logging_project_bucket_config.tf | 19 ++ 8 files changed, 435 insertions(+), 1 deletion(-) create mode 100644 mmv1/third_party/tgc/logging_billing_account_bucket_config.go create mode 100644 mmv1/third_party/tgc/logging_project_bucket_config.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.tf create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 150b1c80a1ce..075888bfb87d 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -332,7 +332,11 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/logging_folder_bucket_config.go', 'third_party/tgc/logging_folder_bucket_config.go'], ['converters/google/resources/logging_organization_bucket_config.go', - 'third_party/tgc/logging_organization_bucket_config.go'] + 'third_party/tgc/logging_organization_bucket_config.go'], + ['converters/google/resources/logging_project_bucket_config.go', + 'third_party/tgc/logging_project_bucket_config.go'], + ['converters/google/resources/logging_billing_account_bucket_config.go', + 'third_party/tgc/logging_billing_account_bucket_config.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index fd999cc3a5ad..7b27f8edc4ff 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -116,6 +116,8 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_compute_node_group": {compute.ResourceConverterComputeNodeGroup()}, "google_logging_folder_bucket_config": {resourceConverterLogFolderBucket()}, "google_logging_organization_bucket_config": {resourceConverterLogOrganizationBucket()}, + "google_logging_project_bucket_config": {resourceConverterLogProjectBucket()}, + "google_logging_billing_account_bucket_config": {resourceConverterLogBillingAccountBucket()}, "google_cloud_tasks_queue": {cloudtasks.ResourceConverterCloudTasksQueue()}, "google_pubsub_topic": {pubsub.ResourceConverterPubsubTopic()}, "google_kms_crypto_key": {kms.ResourceConverterKMSCryptoKey()}, diff --git a/mmv1/third_party/tgc/logging_billing_account_bucket_config.go b/mmv1/third_party/tgc/logging_billing_account_bucket_config.go new file mode 100644 index 000000000000..741e11a59bb5 --- /dev/null +++ b/mmv1/third_party/tgc/logging_billing_account_bucket_config.go @@ -0,0 +1,175 @@ +package google + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const logBillingAccountBucketAssetType string = "logging.googleapis.com/LogBucket" + +func resourceConverterLogBillingAccountBucket() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: logBillingAccountBucketAssetType, + Convert: GetLogBillingAccountBucketCaiObject, + } +} + +func GetLogBillingAccountBucketCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//logging.googleapis.com/projects/{{project}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetLogBillingAccountBucketApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: logBillingAccountBucketAssetType, + Resource: &cai.AssetResource{ + Version: "v2", + DiscoveryDocumentURI: "https://logging.googleapis.com/$discovery/rest?version=v2", + DiscoveryName: "LogBucket", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetLogBillingAccountBucketApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + billingAccountProp, err := expandLogBillingAccountBucketBillingAccountId(d.Get("billing_account"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("billing_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(billingAccountProp)) && (ok || !reflect.DeepEqual(v, billingAccountProp)) { + obj["id"] = billingAccountProp + } + + nameProp, err := expandLogBillingAccountBucketName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + bucketIdProp, err := expandLogBillingAccountBucketBucketId(d.Get("bucket_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("bucket_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketIdProp)) && (ok || !reflect.DeepEqual(v, bucketIdProp)) { + obj["bucketId"] = bucketIdProp + } + + locationProp, err := expandLogBillingAccountBucketLocation(d.Get("location"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + + descriptionProp, err := expandLogBillingAccountBucketDescription(d.Get("description"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + retentionDaysProp, err := expandLogBillingAccountBucketRetentionDays(d.Get("retention_days"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("retention_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionDaysProp)) && (ok || !reflect.DeepEqual(v, retentionDaysProp)) { + obj["retentionDays"] = retentionDaysProp + } + + indexConfigsProp, err := expandLogBillingAccountBucketIndexConfigs(d.Get("index_configs"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("index_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexConfigsProp)) && (ok || !reflect.DeepEqual(v, indexConfigsProp)) { + obj["indexConfigs"] = indexConfigsProp + } + + lifecycleStateProp, err := expandLogBillingAccountBucketLifecycleState(d.Get("lifecycle_state"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("lifecycle_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifecycleStateProp)) && (ok || !reflect.DeepEqual(v, lifecycleStateProp)) { + obj["lifecycleState"] = lifecycleStateProp + } + + return obj, nil +} + +func expandLogBillingAccountBucketBillingAccountId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return nil, err + } + + return v, nil +} + +func expandLogBillingAccountBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketLifecycleState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketIndexConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFieldPath, err := expandLogBillingAccountBucketFieldPath(original["field_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fieldPath"] = transformedFieldPath + } + + transformedType, err := expandLogBillingAccountBucketType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandLogBillingAccountBucketType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketFieldPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketRetentionDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogBillingAccountBucketBucketId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/logging_project_bucket_config.go b/mmv1/third_party/tgc/logging_project_bucket_config.go new file mode 100644 index 000000000000..77e96884a679 --- /dev/null +++ b/mmv1/third_party/tgc/logging_project_bucket_config.go @@ -0,0 +1,175 @@ +package google + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const logProjectBucketAssetType string = "logging.googleapis.com/LogBucket" + +func resourceConverterLogProjectBucket() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: logProjectBucketAssetType, + Convert: GetLogProjectBucketCaiObject, + } +} + +func GetLogProjectBucketCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//logging.googleapis.com/projects/{{project}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetLogProjectBucketApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: logProjectBucketAssetType, + Resource: &cai.AssetResource{ + Version: "v2", + DiscoveryDocumentURI: "https://logging.googleapis.com/$discovery/rest?version=v2", + DiscoveryName: "LogBucket", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetLogProjectBucketApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + organizationProp, err := expandLogProjectBucketProjectId(d.Get("project"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(organizationProp)) && (ok || !reflect.DeepEqual(v, organizationProp)) { + obj["id"] = organizationProp + } + + nameProp, err := expandLogProjectBucketName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + bucketIdProp, err := expandLogProjectBucketBucketId(d.Get("bucket_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("bucket_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(bucketIdProp)) && (ok || !reflect.DeepEqual(v, bucketIdProp)) { + obj["bucketId"] = bucketIdProp + } + + locationProp, err := expandLogProjectBucketLocation(d.Get("location"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("location"); !tpgresource.IsEmptyValue(reflect.ValueOf(locationProp)) && (ok || !reflect.DeepEqual(v, locationProp)) { + obj["location"] = locationProp + } + + descriptionProp, err := expandLogProjectBucketDescription(d.Get("description"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + + retentionDaysProp, err := expandLogProjectBucketRetentionDays(d.Get("retention_days"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("retention_days"); !tpgresource.IsEmptyValue(reflect.ValueOf(retentionDaysProp)) && (ok || !reflect.DeepEqual(v, retentionDaysProp)) { + obj["retentionDays"] = retentionDaysProp + } + + indexConfigsProp, err := expandLogProjectBucketIndexConfigs(d.Get("index_configs"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("index_configs"); !tpgresource.IsEmptyValue(reflect.ValueOf(indexConfigsProp)) && (ok || !reflect.DeepEqual(v, indexConfigsProp)) { + obj["indexConfigs"] = indexConfigsProp + } + + lifecycleStateProp, err := expandLogProjectBucketLifecycleState(d.Get("lifecycle_state"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("lifecycle_state"); !tpgresource.IsEmptyValue(reflect.ValueOf(lifecycleStateProp)) && (ok || !reflect.DeepEqual(v, lifecycleStateProp)) { + obj["lifecycleState"] = lifecycleStateProp + } + + return obj, nil +} + +func expandLogProjectBucketProjectId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/buckets/{{bucket_id}}") + if err != nil { + return nil, err + } + + return v, nil +} + +func expandLogProjectBucketName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketLifecycleState(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketIndexConfigs(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFieldPath, err := expandLogProjectBucketFieldPath(original["field_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFieldPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["fieldPath"] = transformedFieldPath + } + + transformedType, err := expandLogProjectBucketType(original["type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["type"] = transformedType + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandLogProjectBucketType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketFieldPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketRetentionDays(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketDescription(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketLocation(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandLogProjectBucketBucketId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.json b/mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.json new file mode 100644 index 000000000000..cd993e2be994 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.json @@ -0,0 +1,20 @@ +[ + { + "name": "//logging.googleapis.com/projects/{{.Provider.project}}/locations/global/buckets/_Default", + "asset_type": "logging.googleapis.com/LogBucket", + "resource": { + "version": "v2", + "discovery_document_uri": "https://logging.googleapis.com/$discovery/rest?version=v2", + "discovery_name": "LogBucket", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "bucketId": "_Default", + "id": "projects/{{.Provider.project}}/locations/global/buckets/_Default", + "location": "global", + "retentionDays": 30 + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.tf b/mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.tf new file mode 100644 index 000000000000..24e45fcd3ec5 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_billing_account_bucket_config.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_logging_billing_account_bucket_config" "basic" { + billing_account = "{{.Project.BillingAccountName}}" + location = "global" + retention_days = 30 + bucket_id = "_Default" +} \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.json b/mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.json new file mode 100644 index 000000000000..cd993e2be994 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.json @@ -0,0 +1,20 @@ +[ + { + "name": "//logging.googleapis.com/projects/{{.Provider.project}}/locations/global/buckets/_Default", + "asset_type": "logging.googleapis.com/LogBucket", + "resource": { + "version": "v2", + "discovery_document_uri": "https://logging.googleapis.com/$discovery/rest?version=v2", + "discovery_name": "LogBucket", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "bucketId": "_Default", + "id": "projects/{{.Provider.project}}/locations/global/buckets/_Default", + "location": "global", + "retentionDays": 30 + } + }, + "ancestors": ["organizations/{{.OrgID}}"], + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}" + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.tf b/mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.tf new file mode 100644 index 000000000000..06f3667fcba1 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_logging_project_bucket_config.tf @@ -0,0 +1,19 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_logging_project_bucket_config" "basic" { + project = "{{.Provider.project}}" + location = "global" + retention_days = 30 + bucket_id = "_Default" +} \ No newline at end of file From 49765a0b8cb33902f5e87a6c76cdcff9b58c7d5d Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 21 Jun 2024 16:42:13 +0100 Subject: [PATCH 183/356] Unskip provider-defined functions acc tests in VCR (#10843) --- mmv1/third_party/terraform/functions/location_from_id_test.go | 2 -- mmv1/third_party/terraform/functions/name_from_id_test.go | 2 -- mmv1/third_party/terraform/functions/project_from_id_test.go | 2 -- mmv1/third_party/terraform/functions/region_from_id_test.go | 2 -- mmv1/third_party/terraform/functions/region_from_zone_test.go | 3 +-- mmv1/third_party/terraform/functions/zone_from_id_test.go | 2 -- 6 files changed, 1 insertion(+), 12 deletions(-) diff --git a/mmv1/third_party/terraform/functions/location_from_id_test.go b/mmv1/third_party/terraform/functions/location_from_id_test.go index 26b73d2bc26b..209fc794ae9a 100644 --- a/mmv1/third_party/terraform/functions/location_from_id_test.go +++ b/mmv1/third_party/terraform/functions/location_from_id_test.go @@ -11,8 +11,6 @@ import ( func TestAccProviderFunction_location_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) location := "us-central1" locationRegex := regexp.MustCompile(fmt.Sprintf("^%s$", location)) diff --git a/mmv1/third_party/terraform/functions/name_from_id_test.go b/mmv1/third_party/terraform/functions/name_from_id_test.go index 8eaf139918c3..f5084cb1d230 100644 --- a/mmv1/third_party/terraform/functions/name_from_id_test.go +++ b/mmv1/third_party/terraform/functions/name_from_id_test.go @@ -11,8 +11,6 @@ import ( func TestAccProviderFunction_name_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) context := map[string]interface{}{ "function_name": "name_from_id", diff --git a/mmv1/third_party/terraform/functions/project_from_id_test.go b/mmv1/third_party/terraform/functions/project_from_id_test.go index 34f0f624af59..0677aa783747 100644 --- a/mmv1/third_party/terraform/functions/project_from_id_test.go +++ b/mmv1/third_party/terraform/functions/project_from_id_test.go @@ -12,8 +12,6 @@ import ( func TestAccProviderFunction_project_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) projectId := envvar.GetTestProjectFromEnv() projectIdRegex := regexp.MustCompile(fmt.Sprintf("^%s$", projectId)) diff --git a/mmv1/third_party/terraform/functions/region_from_id_test.go b/mmv1/third_party/terraform/functions/region_from_id_test.go index 79ef135d9a27..96f6661c38fb 100644 --- a/mmv1/third_party/terraform/functions/region_from_id_test.go +++ b/mmv1/third_party/terraform/functions/region_from_id_test.go @@ -12,8 +12,6 @@ import ( func TestAccProviderFunction_region_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) region := envvar.GetTestRegionFromEnv() regionRegex := regexp.MustCompile(fmt.Sprintf("^%s$", region)) diff --git a/mmv1/third_party/terraform/functions/region_from_zone_test.go b/mmv1/third_party/terraform/functions/region_from_zone_test.go index 68c001ada13a..5ce3679d104f 100644 --- a/mmv1/third_party/terraform/functions/region_from_zone_test.go +++ b/mmv1/third_party/terraform/functions/region_from_zone_test.go @@ -11,8 +11,7 @@ import ( func TestAccProviderFunction_region_from_zone(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) + projectZone := "us-central1-a" projectRegion := "us-central1" projectRegionRegex := regexp.MustCompile(fmt.Sprintf("^%s$", projectRegion)) diff --git a/mmv1/third_party/terraform/functions/zone_from_id_test.go b/mmv1/third_party/terraform/functions/zone_from_id_test.go index 7058825a05bc..45247ef1aee0 100644 --- a/mmv1/third_party/terraform/functions/zone_from_id_test.go +++ b/mmv1/third_party/terraform/functions/zone_from_id_test.go @@ -12,8 +12,6 @@ import ( func TestAccProviderFunction_zone_from_id(t *testing.T) { t.Parallel() - // Skipping due to requiring TF 1.8.0 in VCR systems : https://github.com/hashicorp/terraform-provider-google/issues/17451 - acctest.SkipIfVcr(t) zone := envvar.GetTestZoneFromEnv() zoneRegex := regexp.MustCompile(fmt.Sprintf("^%s$", zone)) From c96e8a0c51a81713f1244e24a4369e21f53b5e61 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 21 Jun 2024 18:19:54 +0100 Subject: [PATCH 184/356] Add handwritten sweeper for google_vmwareengine_private_cloud (#11002) --- ...urce_vmwareengine_private_cloud_sweeper.go | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go new file mode 100644 index 000000000000..6df3f302205a --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_sweeper.go @@ -0,0 +1,137 @@ +package vmwareengine + +import ( + "context" + "fmt" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VmwareenginePrivateCloud", testSweepVmwareenginePrivateCloud) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVmwareenginePrivateCloud(region string) error { + resourceName := "VmwareenginePrivateCloud" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // List of location values includes: + // * zones used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "southamerica-west1-a", "me-west1-a"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + log.Printf("[INFO][SWEEPER_LOG] Beginning the process of sweeping location '%s'.", location) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + continue + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue + } + + resourceList, ok := res["privateClouds"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // We force delete the Private Cloud and ensure there's no delay in deletion + force := true + delayHours := 0 + deleteUrl = deleteUrl + fmt.Sprintf("?force=%t&delayHours=%d", force, delayHours) + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + } + + return nil +} From 03a2343cc98715b9d15509709ebdbcd50fef447c Mon Sep 17 00:00:00 2001 From: Daan Heikens Date: Fri, 21 Jun 2024 20:09:05 +0200 Subject: [PATCH 185/356] Add beta support for custom error response policies for URL maps (#10787) Co-authored-by: Riley Karson --- mmv1/products/compute/UrlMap.yaml | 190 +++++++++++++++ ...rl_map_custom_error_response_policy.tf.erb | 86 +++++++ .../resource_compute_url_map_test.go.erb | 227 +++++++++++++++++- 3 files changed, 502 insertions(+), 1 deletion(-) create mode 100644 mmv1/templates/terraform/examples/url_map_custom_error_response_policy.tf.erb diff --git a/mmv1/products/compute/UrlMap.yaml b/mmv1/products/compute/UrlMap.yaml index d3dad5414195..5b0cda5141d1 100644 --- a/mmv1/products/compute/UrlMap.yaml +++ b/mmv1/products/compute/UrlMap.yaml @@ -124,6 +124,16 @@ examples: http_health_check_name: "health-check" backend_bucket_name: "static-asset-backend-bucket" storage_bucket_name: "static-asset-bucket" + - !ruby/object:Provider::Terraform::Examples + name: "url_map_custom_error_response_policy" + primary_resource_id: "urlmap" + min_version: beta + vars: + url_map_name: "urlmap" + backend_service_name: "login" + http_health_check_name: "health-check" + storage_bucket_name: "static-asset-bucket" + error_backend_bucket_name: "error-backend-bucket" properties: - !ruby/object:Api::Type::Time name: 'creationTimestamp' @@ -304,6 +314,66 @@ properties: description: | An optional description of this resource. Provide this property when you create the resource. + - !ruby/object:Api::Type::NestedObject + name: 'defaultCustomErrorResponsePolicy' + min_version: beta + description: | + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + + This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. + + For example, consider a UrlMap with the following configuration: + + UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors + A RouteRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + + When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. + + defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. + properties: + - !ruby/object:Api::Type::Array + name: 'errorResponseRule' + api_name: errorResponseRules + description: | + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'matchResponseCodes' + description: | + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'path' + description: | + The full path to a file within backendBucket . For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters + - !ruby/object:Api::Type::Integer + name: 'overrideResponseCode' + description: | + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + - !ruby/object:Api::Type::ResourceRef + name: 'errorService' + resource: 'BackendBucket' + imports: 'selfLink' + description: | + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). - !ruby/object:Api::Type::NestedObject name: 'headerAction' description: | @@ -403,6 +473,64 @@ properties: \* is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here. + - !ruby/object:Api::Type::NestedObject + name: 'customErrorResponsePolicy' + min_version: beta + description: | + customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + If a policy for an error code is not configured for the PathRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. + For example, consider a UrlMap with the following configuration: + UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors + A PathRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in PathRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + customErrorResponsePolicy is supported only for global external Application Load Balancers. + properties: + - !ruby/object:Api::Type::Array + name: 'errorResponseRule' + api_name: errorResponseRules + description: | + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'matchResponseCodes' + description: | + Valid values include: + + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'path' + description: | + The full path to a file within backendBucket . For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters + - !ruby/object:Api::Type::Integer + name: 'overrideResponseCode' + description: | + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + - !ruby/object:Api::Type::ResourceRef + name: "errorService" + resource: "BackendBucket" + imports: 'selfLink' + description: | + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). - !ruby/object:Api::Type::NestedObject name: 'routeAction' description: | @@ -1915,6 +2043,68 @@ properties: The value must be between 0.0 and 100.0 inclusive. validation: !ruby/object:Provider::Terraform::Validation function: 'validation.FloatBetween(0, 100)' + - !ruby/object:Api::Type::NestedObject + name: 'defaultCustomErrorResponsePolicy' + min_version: beta + description: | + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + + This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. + + For example, consider a UrlMap with the following configuration: + + UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors + A RouteRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + + When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. + + defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. + properties: + - !ruby/object:Api::Type::Array + name: 'errorResponseRule' + api_name: errorResponseRules + description: | + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::Array + name: 'matchResponseCodes' + description: | + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'path' + description: | + The full path to a file within backendBucket. For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters. + - !ruby/object:Api::Type::Integer + name: 'overrideResponseCode' + description: | + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + - !ruby/object:Api::Type::ResourceRef + name: "errorService" + resource: "BackendBucket" + imports: 'selfLink' + description: | + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). - !ruby/object:Api::Type::Array name: "test" api_name: tests diff --git a/mmv1/templates/terraform/examples/url_map_custom_error_response_policy.tf.erb b/mmv1/templates/terraform/examples/url_map_custom_error_response_policy.tf.erb new file mode 100644 index 000000000000..32c94d52e68d --- /dev/null +++ b/mmv1/templates/terraform/examples/url_map_custom_error_response_policy.tf.erb @@ -0,0 +1,86 @@ +resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['url_map_name'] %>" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 5xx responses will be catched + path = "/*" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx", "5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/login" + override_response_code = 404 + } + error_response_rule { + match_response_codes = ["503"] # Only a 503 response will be catched on path example + path = "/example" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx"] + path = "/register" + override_response_code = 401 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "<%= ctx[:vars]['backend_service_name'] %>" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "<%= ctx[:vars]['http_health_check_name'] %>" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "<%= ctx[:vars]['error_backend_bucket_name'] %>" + bucket_name = google_storage_bucket.error.name + enable_cdn = true +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "<%= ctx[:vars]['storage_bucket_name'] %>" + location = "US" +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_url_map_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_url_map_test.go.erb index b5fd8df9f2aa..dfc72c901701 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_url_map_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_url_map_test.go.erb @@ -349,7 +349,6 @@ func TestAccComputeUrlMap_defaultUrlRedirect(t *testing.T) { acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), Steps: []resource.TestStep{ { Config: testAccComputeUrlMap_defaultUrlRedirectConfig(randomSuffix), @@ -363,6 +362,42 @@ func TestAccComputeUrlMap_defaultUrlRedirect(t *testing.T) { }) } +<% unless version == 'ga' -%> +func TestAccComputeUrlMap_urlMapCustomErrorResponsePolicyUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_urlMapCustomErrorResponsePolicy(context), + }, + { + ResourceName: "google_compute_url_map.urlmap", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_service"}, + }, + { + Config: testAccComputeUrlMap_urlMapCustomErrorResponsePolicyUpdate(context), + }, + { + ResourceName: "google_compute_url_map.urlmap", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_service"}, + }, + }, + }) +} +<% end -%> + func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { @@ -1686,3 +1721,193 @@ resource "google_compute_url_map" "foobar" { } `, randomSuffix) } + +<% unless version == 'ga' -%> +func testAccComputeUrlMap_urlMapCustomErrorResponsePolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_url_map" "urlmap" { + provider = google-beta + name = "urlmap%{random_suffix}" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 5xx responses will be catched + path = "/*" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx", "5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/login" + override_response_code = 404 + } + error_response_rule { + match_response_codes = ["503"] # Only a 503 response will be catched on path example + path = "/example" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx"] + path = "/register" + override_response_code = 401 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "login%{random_suffix}" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "tf-test-health-check%{random_suffix}" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "tf-test-error-backend-bucket%{random_suffix}" + bucket_name = google_storage_bucket.error.name + enable_cdn = true +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "tf-test-static-asset-bucket%{random_suffix}" + location = "US" +} +`, context) +} +<% end -%> + +<% unless version == 'ga' -%> +func testAccComputeUrlMap_urlMapCustomErrorResponsePolicyUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_url_map" "urlmap" { + provider = google-beta + name = "urlmap%{random_suffix}" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx", "4xx"] # All 5xx responses will be catched + path = "/test/*" + override_response_code = 503 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/*" + override_response_code = 502 + } + error_response_rule { + match_response_codes = ["4xx"] # Only a 503 response will be catched on path example + path = "/example/test" + override_response_code = 400 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] + path = "/register/example/*" + override_response_code = 403 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "login%{random_suffix}" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "tf-test-health-check%{random_suffix}" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "tf-test-error-backend-bucket-2%{random_suffix}" + bucket_name = google_storage_bucket.error.name + enable_cdn = true + + lifecycle { + create_before_destroy = true + } +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "tf-test-static-asset-bucket-2%{random_suffix}" + location = "US" +} +`, context) +} +<% end -%> \ No newline at end of file From 91be96d4e6fb248d8cee506c026b10095d080721 Mon Sep 17 00:00:00 2001 From: Bin Wu <46450037+wu-bin@users.noreply.github.com> Date: Fri, 21 Jun 2024 14:14:59 -0400 Subject: [PATCH 186/356] Add tlsEarlyData support to TargetHttpsProxy. (#10954) --- mmv1/products/compute/TargetHttpsProxy.yaml | 12 ++++++++++++ .../resource_compute_target_https_proxy_test.go.erb | 1 + 2 files changed, 13 insertions(+) diff --git a/mmv1/products/compute/TargetHttpsProxy.yaml b/mmv1/products/compute/TargetHttpsProxy.yaml index 50d1983c027b..a22e5c8cc870 100644 --- a/mmv1/products/compute/TargetHttpsProxy.yaml +++ b/mmv1/products/compute/TargetHttpsProxy.yaml @@ -130,6 +130,18 @@ properties: update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride' default_value: :NONE custom_flatten: 'templates/terraform/custom_flatten/default_if_empty.erb' + - !ruby/object:Api::Type::Enum + name: 'tlsEarlyData' + description: | + Specifies whether TLS 1.3 0-RTT Data (“Early Data”) should be accepted for this service. + Early Data allows a TLS resumption handshake to include the initial application payload + (a HTTP request) alongside the handshake, reducing the effective round trips to “zero”. + This applies to TLS 1.3 connections over TCP (HTTP/2) as well as over UDP (QUIC/h3). + values: + - :STRICT + - :PERMISSIVE + - :DISABLED + default_from_api: true - !ruby/object:Api::Type::Array name: 'certificateManagerCertificates' description: | diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_target_https_proxy_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_target_https_proxy_test.go.erb index bb973d853080..6ff35b6b607c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_target_https_proxy_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_target_https_proxy_test.go.erb @@ -226,6 +226,7 @@ resource "google_compute_target_https_proxy" "foobar" { google_compute_ssl_certificate.foobar2.self_link, ] quic_override = "ENABLE" + tls_early_data = "STRICT" } resource "google_compute_backend_service" "foobar" { From f0402098b9759305e87f3f257d0f22a68927695a Mon Sep 17 00:00:00 2001 From: kkram01 Date: Fri, 21 Jun 2024 23:45:43 +0530 Subject: [PATCH 187/356] promote dedicated_serving_endpoint to GA on featureOnlineStore (#11013) --- mmv1/products/vertexai/FeatureOnlineStore.yaml | 6 ++---- ...b => vertex_ai_featureonlinestore_with_optimized.tf.erb} | 4 ++-- 2 files changed, 4 insertions(+), 6 deletions(-) rename mmv1/templates/terraform/examples/{vertex_ai_featureonlinestore_with_beta_fields_optimized.tf.erb => vertex_ai_featureonlinestore_with_optimized.tf.erb} (89%) diff --git a/mmv1/products/vertexai/FeatureOnlineStore.yaml b/mmv1/products/vertexai/FeatureOnlineStore.yaml index 702626b7a1ad..663299718ce9 100644 --- a/mmv1/products/vertexai/FeatureOnlineStore.yaml +++ b/mmv1/products/vertexai/FeatureOnlineStore.yaml @@ -50,13 +50,12 @@ examples: ignore_read_extra: - force_destroy - !ruby/object:Provider::Terraform::Examples - name: vertex_ai_featureonlinestore_with_beta_fields_optimized + name: vertex_ai_featureonlinestore_with_optimized primary_resource_id: featureonlinestore vars: name: example_feature_online_store_optimized ignore_read_extra: - force_destroy - min_version: beta - !ruby/object:Provider::Terraform::Examples name: vertex_ai_featureonlinestore_with_beta_fields_bigtable primary_resource_id: featureonlinestore @@ -154,9 +153,8 @@ properties: - !ruby/object:Api::Type::NestedObject name: dedicatedServingEndpoint description: | - The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. + The dedicated serving endpoint for this FeatureOnlineStore, which is different from common vertex service endpoint. Only need to be set when you choose Optimized storage type or enable EmbeddingManagement. Will use public endpoint by default. default_from_api: true - min_version: beta properties: - !ruby/object:Api::Type::String name: publicEndpointDomainName diff --git a/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_with_beta_fields_optimized.tf.erb b/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_with_optimized.tf.erb similarity index 89% rename from mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_with_beta_fields_optimized.tf.erb rename to mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_with_optimized.tf.erb index 40be04127914..d3e5e3c85edc 100644 --- a/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_with_beta_fields_optimized.tf.erb +++ b/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_with_optimized.tf.erb @@ -1,5 +1,5 @@ resource "google_vertex_ai_feature_online_store" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta + provider = google name = "<%= ctx[:vars]['name'] %>" labels = { foo = "bar" @@ -15,7 +15,7 @@ resource "google_vertex_ai_feature_online_store" "<%= ctx[:primary_resource_id] } data "google_project" "project" { - provider = google-beta + provider = google } From 8b20d4a4e609e26d90b4d0692ff3ebf3201cac3c Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Fri, 21 Jun 2024 12:21:44 -0700 Subject: [PATCH 188/356] Convert handwritten utility erb files (#11012) --- GNUmakefile | 3 +- mmv1/google/template_utils.go | 7 + mmv1/main.go | 32 +- mmv1/provider/terraform.go | 133 +- mmv1/provider/terraform/common~copy.yaml | 20 +- mmv1/template-converter.go | 209 +- .../acctest/go/framework_test_utils.go | 75 + .../terraform/acctest/go/test_utils.go.tmpl | 268 +++ .../go/framework_provider_test.go.tmpl | 286 +++ .../fwtransport/go/framework_config_test.go | 1743 +++++++++++++++++ .../go/framework_provider_clients.go.tmpl | 55 + .../terraform/{ => go}/.copywrite.hcl.tmpl | 8 +- .../terraform/{ => go}/.goreleaser.yml.tmpl | 24 +- mmv1/third_party/terraform/{ => go}/go.mod | 0 .../terraform/{ => go}/main.go.tmpl | 2 +- .../{ => go}/release-metadata.hcl.tmpl | 0 .../{ => go}/terraform-registry-manifest.json | 0 .../provider/go/provider_test.go.tmpl | 500 +++++ .../provider/go/provider_validators.go | 47 + .../provider/provider_mmv1_resources.go.erb | 24 +- .../terraform/release-metadata.hcl.erb | 2 +- .../terraform/scripts/go/diff.go.tmpl | 173 ++ .../terraform/scripts/go/run_diff.sh.tmpl | 30 + .../go/resource_gke_hub_feature_test.go.tmpl | 43 + .../terraform/tpgiamresource/go/iam.go.tmpl | 567 ++++++ .../tpgiamresource/go/iam_test.go.tmpl | 1333 +++++++++++++ .../terraform/tpgiamresource/iam_test.go.erb | 38 +- .../go/common_diff_suppress.go.tmpl | 315 +++ .../go/tpgtools_custom_flattens.go.tmpl | 39 + .../go/provider_handwritten_endpoint.go.tmpl | 131 ++ 30 files changed, 5972 insertions(+), 135 deletions(-) create mode 100644 mmv1/third_party/terraform/acctest/go/framework_test_utils.go create mode 100644 mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl create mode 100644 mmv1/third_party/terraform/fwprovider/go/framework_provider_test.go.tmpl create mode 100644 mmv1/third_party/terraform/fwtransport/go/framework_config_test.go create mode 100644 mmv1/third_party/terraform/fwtransport/go/framework_provider_clients.go.tmpl rename mmv1/third_party/terraform/{ => go}/.copywrite.hcl.tmpl (92%) rename mmv1/third_party/terraform/{ => go}/.goreleaser.yml.tmpl (59%) rename mmv1/third_party/terraform/{ => go}/go.mod (100%) rename mmv1/third_party/terraform/{ => go}/main.go.tmpl (95%) rename mmv1/third_party/terraform/{ => go}/release-metadata.hcl.tmpl (100%) rename mmv1/third_party/terraform/{ => go}/terraform-registry-manifest.json (100%) create mode 100644 mmv1/third_party/terraform/provider/go/provider_test.go.tmpl create mode 100644 mmv1/third_party/terraform/provider/go/provider_validators.go create mode 100644 mmv1/third_party/terraform/scripts/go/diff.go.tmpl create mode 100644 mmv1/third_party/terraform/scripts/go/run_diff.sh.tmpl create mode 100644 mmv1/third_party/terraform/tpgiamresource/go/iam.go.tmpl create mode 100644 mmv1/third_party/terraform/tpgiamresource/go/iam_test.go.tmpl create mode 100644 mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl create mode 100644 mmv1/third_party/terraform/tpgresource/go/tpgtools_custom_flattens.go.tmpl create mode 100644 mmv1/third_party/terraform/transport/go/provider_handwritten_endpoint.go.tmpl diff --git a/GNUmakefile b/GNUmakefile index 68ee86de465d..e4e63fe494a8 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -137,4 +137,5 @@ doctor: refresh-go: cd mmv1;\ bundle exec compiler.rb -e terraform -o $(OUTPUT_PATH) -v $(VERSION) $(mmv1_compile) --go-yaml; \ - go run . --yaml --template \ No newline at end of file + go run . --yaml --template + go run . --yaml --handwritten \ No newline at end of file diff --git a/mmv1/google/template_utils.go b/mmv1/google/template_utils.go index ac47f6bf3374..1053894233ba 100644 --- a/mmv1/google/template_utils.go +++ b/mmv1/google/template_utils.go @@ -43,6 +43,12 @@ func subtract(a, b int) int { return a - b } +// plus returns the sum of a and b +// and used in Go templates +func plus(a, b int) int { + return a + b +} + var TemplateFunctions = template.FuncMap{ "title": SpaceSeparatedTitle, "replace": strings.Replace, @@ -58,5 +64,6 @@ var TemplateFunctions = template.FuncMap{ "format2regex": Format2Regex, "hasPrefix": strings.HasPrefix, "sub": subtract, + "plus": plus, "firstSentence": FirstSentence, } diff --git a/mmv1/main.go b/mmv1/main.go index 82ed82ec160f..7235b1324b9c 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -108,7 +108,11 @@ func main() { return false }) - var productsForVersion []map[string]interface{} + // In order to only copy/compile files once per provider this must be called outside + // of the products loop. This will get called with the provider from the final iteration + // of the loop + var providerToGenerate *provider.Terraform + var productsForVersion []*api.Product for _, productName := range allProductFiles { productYamlPath := path.Join(productName, "go_product.yaml") @@ -166,7 +170,9 @@ func main() { productApi.Validate() // TODO Q2: set other providers via flag - providerToGenerate := provider.NewTerraform(productApi, *version, startTime) + providerToGenerate = provider.NewTerraform(productApi, *version, startTime) + + productsForVersion = append(productsForVersion, productApi) if !slices.Contains(productsToGenerate, productName) { log.Printf("%s not specified, skipping generation", productName) @@ -175,32 +181,18 @@ func main() { log.Printf("%s: Generating files", productName) providerToGenerate.Generate(*outputPath, productName, generateCode, generateDocs) - - // we need to preserve a single provider instance to use outside of this loop. - productsForVersion = append(productsForVersion, map[string]interface{}{ - "Definitions": productApi, - "Provider": providerToGenerate, - }) } - - // TODO Q2: copy common files } - slices.SortFunc(productsForVersion, func(p1, p2 map[string]interface{}) int { - return strings.Compare(strings.ToLower(p1["Definitions"].(*api.Product).Name), strings.ToLower(p2["Definitions"].(*api.Product).Name)) + slices.SortFunc(productsForVersion, func(p1, p2 *api.Product) int { + return strings.Compare(strings.ToLower(p1.Name), strings.ToLower(p2.Name)) }) - // In order to only copy/compile files once per provider this must be called outside - // of the products loop. This will get called with the provider from the final iteration - // of the loop - finalProduct := productsForVersion[len(productsForVersion)-1] - provider := finalProduct["Provider"].(*provider.Terraform) - - provider.CopyCommonFiles(*outputPath, generateCode, generateDocs) + providerToGenerate.CopyCommonFiles(*outputPath, generateCode, generateDocs) log.Printf("Compiling common files for terraform") if generateCode { - provider.CompileCommonFiles(*outputPath, productsForVersion, "") + providerToGenerate.CompileCommonFiles(*outputPath, productsForVersion, "") // TODO Q2: product overrides } diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index febaa113a050..ff16e3614ab8 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -276,8 +276,8 @@ func (t Terraform) CopyCommonFiles(outputFolder string, generateCode, generateDo t.CopyFileList(outputFolder, files) } -// To compile a new folder, add the folder to foldersCopiedToRootDir or foldersCopiedToGoogleDir. -// To compile a file, add the file to singleFiles +// To copy a new folder, add the folder to foldersCopiedToRootDir or foldersCopiedToGoogleDir. +// To copy a file, add the file to singleFiles func (t Terraform) getCommonCopyFiles(versionName string, generateCode, generateDocs bool) map[string]string { // key is the target file and value is the source file commonCopyFiles := make(map[string]string, 0) @@ -319,9 +319,9 @@ func (t Terraform) getCommonCopyFiles(versionName string, generateCode, generate // Case 3: When copy a single file, save the target as key and source as value to the map singleFiles singleFiles := map[string]string{ "go.sum": "third_party/terraform/go.sum", - "go.mod": "third_party/terraform/go.mod", + "go.mod": "third_party/terraform/go/go.mod", ".go-version": "third_party/terraform/.go-version", - "terraform-registry-manifest.json": "third_party/terraform/terraform-registry-manifest.json", + "terraform-registry-manifest.json": "third_party/terraform/go/terraform-registry-manifest.json", } maps.Copy(commonCopyFiles, singleFiles) @@ -389,11 +389,11 @@ func (t Terraform) CopyFileList(outputFolder string, files map[string]string) { // common_compile_file, // override_path = nil // ) -func (t Terraform) CompileCommonFiles(outputFolder string, products []map[string]interface{}, overridePath string) { +func (t Terraform) CompileCommonFiles(outputFolder string, products []*api.Product, overridePath string) { t.generateResourcesForVersion(products) files := t.getCommonCompileFiles(t.TargetVersionName) templateData := NewTemplateData(outputFolder, t.Version) - t.CompileFileList(outputFolder, files, *templateData) + t.CompileFileList(outputFolder, files, *templateData, products) } // To compile a new folder, add the folder to foldersCompiledToRootDir or foldersCompiledToGoogleDir. @@ -424,10 +424,10 @@ func (t Terraform) getCommonCompileFiles(versionName string) map[string]string { // Case 3: When compile a single file, save the target as key and source as value to the map singleFiles singleFiles := map[string]string{ - "main.go": "third_party/terraform/main.go.tmpl", - ".goreleaser.yml": "third_party/terraform/.goreleaser.yml.tmpl", - ".release/release-metadata.hcl": "third_party/terraform/release-metadata.hcl.tmpl", - ".copywrite.hcl": "third_party/terraform/.copywrite.hcl.tmpl", + "main.go": "third_party/terraform/go/main.go.tmpl", + ".goreleaser.yml": "third_party/terraform/go/.goreleaser.yml.tmpl", + ".release/release-metadata.hcl": "third_party/terraform/go/release-metadata.hcl.tmpl", + ".copywrite.hcl": "third_party/terraform/go/.copywrite.hcl.tmpl", } maps.Copy(commonCompileFiles, singleFiles) @@ -441,7 +441,7 @@ func (t Terraform) getCompileFilesInFolder(folderPath, targetDir string) map[str fname := strings.TrimPrefix(strings.Replace(path, "/go/", "/", 1), "third_party/terraform/") fname = strings.TrimSuffix(fname, ".tmpl") target := fname - if targetDir != "" { + if targetDir != "." { target = fmt.Sprintf("%s/%s", targetDir, fname) } m[target] = path @@ -453,17 +453,16 @@ func (t Terraform) getCompileFilesInFolder(folderPath, targetDir string) map[str } // def compile_file_list(output_folder, files, file_template, pwd = Dir.pwd) -func (t Terraform) CompileFileList(outputFolder string, files map[string]string, fileTemplate TemplateData) { +func (t Terraform) CompileFileList(outputFolder string, files map[string]string, fileTemplate TemplateData, products []*api.Product) { + providerWithProducts := ProviderWithProducts{ + Terraform: t, + Products: products, + } + if err := os.MkdirAll(outputFolder, os.ModePerm); err != nil { log.Println(fmt.Errorf("error creating output directory %v: %v", outputFolder, err)) } - // TODO: is this needed? - // err := os.Chdir(outputFolder) - // if err != nil { - // log.Fatalf("Could not move into the directory %s", outputFolder) - // } - for target, source := range files { targetFile := filepath.Join(outputFolder, target) targetDir := filepath.Dir(targetFile) @@ -477,12 +476,10 @@ func (t Terraform) CompileFileList(outputFolder string, files map[string]string, formatFile := filepath.Ext(targetFile) == ".go" - fileTemplate.GenerateFile(targetFile, source, t, formatFile, templates...) + fileTemplate.GenerateFile(targetFile, source, providerWithProducts, formatFile, templates...) t.replaceImportPath(outputFolder, target) t.addHashicorpCopyRightHeader(outputFolder, target) } - // TODO: is this needed? - // Dir.chdir pwd } // def add_hashicorp_copyright_header(output_folder, target) @@ -635,34 +632,48 @@ func (t Terraform) ImportPathFromVersion(v string) string { return fmt.Sprintf("%s/%s", tpg, dir) } -// # Gets the list of services dependent on the version ga, beta, and private -// # If there are some resources of a servcie is in GA, -// # then this service is in GA. Otherwise, the service is in BETA +func (t Terraform) ProviderFromVersion() string { + var dir string + switch t.TargetVersionName { + case "ga": + dir = RESOURCE_DIRECTORY_GA + case "beta": + dir = RESOURCE_DIRECTORY_BETA + default: + dir = RESOURCE_DIRECTORY_PRIVATE + } + return dir +} + +// Gets the list of services dependent on the version ga, beta, and private +// If there are some resources of a servcie is in GA, +// then this service is in GA. Otherwise, the service is in BETA // def get_mmv1_services_in_version(products, version) -// -// services = [] -// products.map do |product| -// product_definition = product[:definitions] -// if version == 'ga' -// some_resource_in_ga = false -// product_definition.objects.each do |object| -// break if some_resource_in_ga -// -// if !object.exclude && -// !object.not_in_version?(product_definition.version_obj_or_closest(version)) -// some_resource_in_ga = true -// end -// end -// -// services << product[:definitions].name.downcase if some_resource_in_ga -// else -// services << product[:definitions].name.downcase -// end -// end -// services -// -// end -// +func (t Terraform) GetMmv1ServicesInVersion(products []*api.Product) []string { + var services []string + for _, product := range products { + if t.TargetVersionName == "ga" { + someResourceInGA := false + for _, object := range product.Objects { + if someResourceInGA { + break + } + + if !object.Exclude && !object.NotInVersion(product.VersionObjOrClosest(t.TargetVersionName)) { + someResourceInGA = true + } + } + + if someResourceInGA { + services = append(services, strings.ToLower(product.Name)) + } + } else { + services = append(services, strings.ToLower(product.Name)) + } + } + return services +} + // def generate_newyaml(pwd, data) // // # @api.api_name is the service folder name @@ -899,10 +910,8 @@ func (t Terraform) ImportPathFromVersion(v string) string { // # The variable resources_for_version is used to generate resources in file // # mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb // def generate_resources_for_version(products, version) -func (t *Terraform) generateResourcesForVersion(products []map[string]interface{}) { - // products.each do |product| - for _, product := range products { - productDefinition := product["Definitions"].(*api.Product) +func (t *Terraform) generateResourcesForVersion(products []*api.Product) { + for _, productDefinition := range products { service := strings.ToLower(productDefinition.Name) for _, object := range productDefinition.Objects { if object.Exclude || object.NotInVersion(productDefinition.VersionObjOrClosest(t.TargetVersionName)) { @@ -1009,3 +1018,23 @@ func (t Terraform) DCLVersion() string { return "" } } + +// Gets the provider versions supported by a version +func (t Terraform) SupportedProviderVersions() []string { + var supported []string + for i, v := range product.ORDER { + if i == 0 { + continue + } + supported = append(supported, v) + if v == t.TargetVersionName { + break + } + } + return supported +} + +type ProviderWithProducts struct { + Terraform + Products []*api.Product +} diff --git a/mmv1/provider/terraform/common~copy.yaml b/mmv1/provider/terraform/common~copy.yaml index 1565f5b67fea..b9ad0c850979 100644 --- a/mmv1/provider/terraform/common~copy.yaml +++ b/mmv1/provider/terraform/common~copy.yaml @@ -46,7 +46,14 @@ <% end -%> <% - Dir["third_party/terraform/provider/**/*.go"].each do |file_path| + Dir["third_party/terraform/provider/*.go"].each do |file_path| + fname = file_path.delete_prefix("third_party/terraform/provider") +-%> +'<%= dir -%>/provider/<%= fname -%>': 'third_party/terraform/provider/<%= fname -%>' +<% end -%> + +<% + Dir["third_party/terraform/provider/universe/*.go"].each do |file_path| fname = file_path.delete_prefix("third_party/terraform/provider") -%> '<%= dir -%>/provider/<%= fname -%>': 'third_party/terraform/provider/<%= fname -%>' @@ -130,12 +137,21 @@ <% end -%> <% - Dir["third_party/terraform/scripts/**/*.*"].each do |file_path| + Dir["third_party/terraform/scripts/*.*"].each do |file_path| + next if file_path.end_with?('.erb') + fname = file_path.delete_prefix('third_party/terraform/') +-%> +'<%= fname -%>': '<%= file_path -%>' +<% end -%> + +<% + Dir["third_party/terraform/scripts/affectedtests/*.*"].each do |file_path| next if file_path.end_with?('.erb') fname = file_path.delete_prefix('third_party/terraform/') -%> '<%= fname -%>': '<%= file_path -%>' <% end -%> + '<%= dir -%>/test-fixtures/': 'third_party/terraform/test-fixtures' <% end -%> <% if generate_docs -%> diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index fd09ad261b0a..16d06276cc19 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/golang/glog" + "golang.org/x/exp/slices" ) func find(root, ext string) []string { @@ -73,7 +74,8 @@ func convertTemplate(folder string) int { } func convertAllHandwrittenFiles() int { - folders := []string{} + // Add third_party/terraform to convert files in this folder + folders := []string{"third_party/terraform"} // Get all of the service folders servicesRoot := "third_party/terraform/services" @@ -86,11 +88,26 @@ func convertAllHandwrittenFiles() int { folders = append(folders, rubyDir) } + // Get all of the utility folders + utilsExceptionFolders := []string{".teamcity", "website", "META.d", "go", "services", "test-fixtures", "versionq"} + utilsRoot := "third_party/terraform" + utilsFolders, err := ioutil.ReadDir(utilsRoot) + if err != nil { + log.Fatal(err) + } + for _, utilsFolder := range utilsFolders { + if !utilsFolder.IsDir() || slices.Contains(utilsExceptionFolders, utilsFolder.Name()) { + continue + } + rubyDir := fmt.Sprintf("%s/%s", "third_party/terraform", utilsFolder.Name()) + folders = append(folders, rubyDir) + } + counts := 0 for _, folder := range folders { counts += convertHandwrittenFiles(folder) } - log.Printf("%d service handwritten files in total", counts) + log.Printf("%d handwritten files in total", counts) return counts } @@ -107,7 +124,9 @@ func convertHandwrittenFiles(folder string) int { for _, file := range files { filePath := path.Join(folder, file) - + if checkExceptionList(filePath) { + continue + } data, err := os.ReadFile(filePath) if err != nil { log.Fatalf("Cannot open the file: %v", file) @@ -193,6 +212,13 @@ func replace(data []byte) []byte { } data = r.ReplaceAll(data, []byte(`{{- if eq $.TargetVersionName "ga" }}`)) + // Replace <%= "-" + version unless version == 'ga' -%> + r, err = regexp.Compile(`<%= "-" \+ version unless version == 'ga'[\s-]*%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}`)) + // Replace \n\n<% unless version.nil? || version == ['|"]ga['|"] -%> r, err = regexp.Compile(`\n\n(\s*)<% unless version\.nil\? \|\| version == ['|"]ga['|"] -%>`) if err != nil { @@ -207,6 +233,167 @@ func replace(data []byte) []byte { } data = r.ReplaceAll(data, []byte(`{{- if or (ne $.TargetVersionName "") (eq $.TargetVersionName "ga") }}`)) + // Replace <% if version.nil? || version == ['|"]ga['|"] -%> + r, err = regexp.Compile(`<% if version\.nil\? \|\| version == ['|"]ga['|"] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga") }}`)) + + // Replace <% Api::Product::Version::ORDER[1..Api::Product::Version::ORDER.index(version)].each do |aliased_version| -%> + r, err = regexp.Compile(`<% Api::Product::Version::ORDER\[1\.\.Api::Product::Version::ORDER\.index\(version\)\]\.each do \|aliased_version\| -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ range $$aliasedVersion := $.SupportedProviderVersions -}}`)) + + // Replace <%= provider_name -?%> + r, err = regexp.Compile(`<%= provider_name -?%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $.ProviderFromVersion }}`)) + + // Replace <% products.each do |product| -%> + r, err = regexp.Compile(`<% products\.each do \|product\| -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- range $$product := $.Products }}`)) + + // Replace <% products.map.each do |product| -%> + r, err = regexp.Compile(`<% products\.map\.each do \|product\| -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- range $$product := $.Products }}`)) + + // Replace <% resources_for_version.each do |object| -%> + r, err = regexp.Compile(`<% resources_for_version\.each do \|object\| -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- range $$object := $.ResourcesForVersion }}`)) + + // Replace <% unless object[:resource_name].nil? -%> + r, err = regexp.Compile(`<% unless object\[\:resource_name\]\.nil\? -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $$object.ResourceName }}`)) + + // Replace <% unless object[:iam_class_name].nil? -%> + r, err = regexp.Compile(`<% unless object\[\:iam_class_name\]\.nil\? -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- if $$object.IamClassName }}`)) + + // Replace <%= object[:terraform_name] -%> + r, err = regexp.Compile(`<%= object\[\:terraform_name\] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$object.TerraformName }}`)) + + // Replace <%= object[:resource_name] -%> + r, err = regexp.Compile(`<%= object\[\:resource_name\] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$object.ResourceName }}`)) + + // Replace <%= object[:iam_class_name] -%> + r, err = regexp.Compile(`<%= object\[\:iam_class_name\] -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$object.IamClassName }}`)) + + // Replace <%= product[:definitions].name -%> + r, err = regexp.Compile(`<%= product\[\:definitions\]\.name -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$product.Name }}`)) + + // Replace <%= product[:definitions].name.underscore -%> + r, err = regexp.Compile(`<%= product\[\:definitions\]\.name\.underscore -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ underscore $$product.Name }}`)) + + // Replace <%= product[:definitions].name.underscore.upcase -%> + r, err = regexp.Compile(`<%= product\[\:definitions\]\.name\.underscore\.upcase -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ upper (underscore $$product.Name) }}`)) + + // Replace <%= product[:definitions].name.base_url -%> + r, err = regexp.Compile(`<%= product\[\:definitions\]\.base_url -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$product.BaseUrl }}`)) + + // Replace <%= product[:definitions].name.underscore.downcase -%> + r, err = regexp.Compile(`<%= product\[\:definitions\]\.name\.underscore\.downcase -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ lower (underscore $$product.Name) }}`)) + + // Replace <%= product[:definitions].name.downcase -%> + r, err = regexp.Compile(`<%= product\[\:definitions\]\.name\.downcase -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ lower $$product.Name }}`)) + + // Replace <% get_mmv1_services_in_version(products, version).each do |service| -%> + r, err = regexp.Compile(`<% get_mmv1_services_in_version\(products, version\)\.each do \|service\| -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{- range $$service := $.GetMmv1ServicesInVersion $.Products }}`)) + + // Replace <%= resource_count %> + r, err = regexp.Compile(`<%= resource_count %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $.ResourceCount }}`)) + + // Replace <%= iam_resource_count %> + r, err = regexp.Compile(`<%= iam_resource_count %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $.IAMResourceCount }}`)) + + // Replace <%= resource_count + iam_resource_count %> + r, err = regexp.Compile(`<%= resource_count \+ iam_resource_count %>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ plus $.ResourceCount $.IAMResourceCount }}`)) + + // Replace <%= service -%> + r, err = regexp.Compile(`<%= service -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$service }}`)) + + // Replace <%= aliased_version -%> + r, err = regexp.Compile(`<%= aliased_version -%>`) + if err != nil { + log.Fatalf("Cannot compile the regular expression: %v", err) + } + data = r.ReplaceAll(data, []byte(`{{ $$aliasedVersion }}`)) + // Replace <%= dcl_version(version) -%> r, err = regexp.Compile(`<%= dcl_version\(version\) -%>`) if err != nil { @@ -478,12 +665,12 @@ func replace(data []byte) []byte { } data = r.ReplaceAll(data, []byte(``)) - // Replace <%= "-" + version unless version == 'ga' -%> - r, err = regexp.Compile(`<%= "-" \+ version unless version == 'ga'[\s-]*%>`) + // Replace <% provider_name = version.nil? || version == 'ga' ? 'google' : 'google-' + version -%> + r, err = regexp.Compile(`<% provider_name = version.nil\? \|\| version == 'ga' \? 'google' : 'google-' \+ version -%>\n`) if err != nil { log.Fatalf("Cannot compile the regular expression: %v", err) } - data = r.ReplaceAll(data, []byte(`{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}`)) + data = r.ReplaceAll(data, []byte(``)) // Replace .erb r, err = regexp.Compile(`\.erb`) @@ -504,6 +691,16 @@ func checkExceptionList(filePath string) bool { "custom_flatten/bigquery_table_ref_query_destinationtable.go", "unordered_list_customize_diff", "default_if_empty", + + // TODO: remove the following files from the exception list after all of the services are migrated to Go + // It will generate diffs when partial services are migrated. + "provider/provider_mmv1_resources.go.erb", + "provider/provider.go.erb", + "fwmodels/provider_model.go.erb", + "fwprovider/framework_provider.go.erb", + "fwtransport/framework_config.go.erb", + "sweeper/gcp_sweeper_test.go.erb", + "transport/config.go.erb", } for _, t := range exceptionPaths { diff --git a/mmv1/third_party/terraform/acctest/go/framework_test_utils.go b/mmv1/third_party/terraform/acctest/go/framework_test_utils.go new file mode 100644 index 000000000000..54cf5d88b2bf --- /dev/null +++ b/mmv1/third_party/terraform/acctest/go/framework_test_utils.go @@ -0,0 +1,75 @@ +package acctest + +import ( + "context" + "fmt" + "log" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func GetFwTestProvider(t *testing.T) *frameworkTestProvider { + configsLock.RLock() + fwProvider, ok := fwProviders[t.Name()] + configsLock.RUnlock() + if ok { + return fwProvider + } + + var diags diag.Diagnostics + p := NewFrameworkTestProvider(t.Name()) + configureApiClient(context.Background(), &p.FrameworkProvider, &diags) + if diags.HasError() { + log.Fatalf("%d errors when configuring test provider client: first is %s", diags.ErrorsCount(), diags.Errors()[0].Detail()) + } + + return p +} + +// General test utils + +// TestExtractResourceAttr navigates a test's state to find the specified resource (or data source) attribute and makes the value +// accessible via the attributeValue string pointer. +func TestExtractResourceAttr(resourceName string, attributeName string, attributeValue *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] // To find a datasource, include `data.` at the start of the resourceName value + + if !ok { + return fmt.Errorf("resource name %s not found in state", resourceName) + } + + attrValue, ok := rs.Primary.Attributes[attributeName] + + if !ok { + return fmt.Errorf("attribute %s not found in resource %s state", attributeName, resourceName) + } + + *attributeValue = attrValue + + return nil + } +} + +// TestCheckAttributeValuesEqual compares two string pointers, which have been used to retrieve attribute values from the test's state. +func TestCheckAttributeValuesEqual(i *string, j *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if testStringValue(i) != testStringValue(j) { + return fmt.Errorf("attribute values are different, got %s and %s", testStringValue(i), testStringValue(j)) + } + + return nil + } +} + +// testStringValue returns string values from string pointers, handling nil pointers. +func testStringValue(sPtr *string) string { + if sPtr == nil { + return "" + } + + return *sPtr +} diff --git a/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl b/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl new file mode 100644 index 000000000000..bb43591b891d --- /dev/null +++ b/mmv1/third_party/terraform/acctest/go/test_utils.go.tmpl @@ -0,0 +1,268 @@ + +package acctest + +import ( + "archive/zip" + "bytes" + "context" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "os" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-mux/tf5muxserver" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func CheckDataSourceStateMatchesResourceState(dataSourceName, resourceName string) func(*terraform.State) error { + return CheckDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourceName, map[string]struct{}{}) +} + +func CheckDataSourceStateMatchesResourceStateWithIgnores(dataSourceName, resourceName string, ignoreFields map[string]struct{}) func(*terraform.State) error { + return func(s *terraform.State) error { + ds, ok := s.RootModule().Resources[dataSourceName] + if !ok { + return fmt.Errorf("can't find %s in state", dataSourceName) + } + + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("can't find %s in state", resourceName) + } + + dsAttr := ds.Primary.Attributes + rsAttr := rs.Primary.Attributes + + errMsg := "" + // Data sources are often derived from resources, so iterate over the resource fields to + // make sure all fields are accounted for in the data source. + // If a field exists in the data source but not in the resource, its expected value should + // be checked separately. + for k := range rsAttr { + if _, ok := ignoreFields[k]; ok { + continue + } + if _, ok := ignoreFields["labels.%"]; ok && strings.HasPrefix(k, "labels.") { + continue + } + if _, ok := ignoreFields["terraform_labels.%"]; ok && strings.HasPrefix(k, "terraform_labels.") { + continue + } + if k == "%" { + continue + } + if dsAttr[k] != rsAttr[k] { + // ignore data sources where an empty list is being compared against a null list. + if k[len(k)-1:] == "#" && (dsAttr[k] == "" || dsAttr[k] == "0") && (rsAttr[k] == "" || rsAttr[k] == "0") { + continue + } + errMsg += fmt.Sprintf("%s is %s; want %s\n", k, dsAttr[k], rsAttr[k]) + } + } + + if errMsg != "" { + return errors.New(errMsg) + } + + return nil + } +} + +// General test utils + +// MuxedProviders returns the correct test provider (between the sdk version or the framework version) +func MuxedProviders(testName string) (func() tfprotov5.ProviderServer, error) { + ctx := context.Background() + + providers := []func() tfprotov5.ProviderServer{ + providerserver.NewProtocol5(NewFrameworkTestProvider(testName)), // framework provider + GetSDKProvider(testName).GRPCProvider, // sdk provider + } + + muxServer, err := tf5muxserver.NewMuxServer(ctx, providers...) + + if err != nil { + return nil, err + } + + return muxServer.ProviderServer, nil +} + +func RandString(t *testing.T, length int) string { + if !IsVcrEnabled() { + return acctest.RandString(length) + } + envPath := os.Getenv("VCR_PATH") + vcrMode := os.Getenv("VCR_MODE") + s, err := vcrSource(t, envPath, vcrMode) + if err != nil { + // At this point we haven't created any resources, so fail fast + t.Fatal(err) + } + + r := rand.New(s.source) + result := make([]byte, length) + set := "abcdefghijklmnopqrstuvwxyz012346789" + for i := 0; i < length; i++ { + result[i] = set[r.Intn(len(set))] + } + return string(result) +} + +func RandInt(t *testing.T) int { + if !IsVcrEnabled() { + return acctest.RandInt() + } + envPath := os.Getenv("VCR_PATH") + vcrMode := os.Getenv("VCR_MODE") + s, err := vcrSource(t, envPath, vcrMode) + if err != nil { + // At this point we haven't created any resources, so fail fast + t.Fatal(err) + } + + return rand.New(s.source).Int() +} + +// ProtoV5ProviderFactories returns a muxed ProviderServer that uses the provider code from this repo (SDK and plugin-framework). +// Used to set ProtoV5ProviderFactories in a resource.TestStep within an acceptance test. +func ProtoV5ProviderFactories(t *testing.T) map[string]func() (tfprotov5.ProviderServer, error) { + return map[string]func() (tfprotov5.ProviderServer, error){ + "google": func() (tfprotov5.ProviderServer, error) { + provider, err := MuxedProviders(t.Name()) + return provider(), err + }, + } +} + +// ProtoV5ProviderBetaFactories returns the same as ProtoV5ProviderFactories only the provider is mapped with +// "google-beta" to ensure that registry examples use `google-beta` if the example is versioned as beta; +// normal beta tests should continue to use ProtoV5ProviderFactories +func ProtoV5ProviderBetaFactories(t *testing.T) map[string]func() (tfprotov5.ProviderServer, error) { + return map[string]func() (tfprotov5.ProviderServer, error){ +{{/* Add a google-#{version} provider for each version that is supported by this version. This allows us to run google-beta tests within a google-alpha provider. */ -}} +{{ range $aliasedVersion := $.SupportedProviderVersions -}} + "google-{{ $aliasedVersion }}": func() (tfprotov5.ProviderServer, error) { + provider, err := MuxedProviders(t.Name()) + return provider(), err + }, +{{- end }} + } +} + +// This is a Printf sibling (Nprintf; Named Printf), which handles strings like +// Nprintf("Hello %{target}!", map[string]interface{}{"target":"world"}) == "Hello world!". +// This is particularly useful for generated tests, where we don't want to use Printf, +// since that would require us to generate a very particular ordering of arguments. +func Nprintf(format string, params map[string]interface{}) string { + for key, val := range params { + format = strings.Replace(format, "%{"+key+"}", fmt.Sprintf("%v", val), -1) + } + return format +} + +func TestBucketName(t *testing.T) string { + return fmt.Sprintf("%s-%d", "tf-test-bucket", RandInt(t)) +} + +func CreateZIPArchiveForCloudFunctionSource(t *testing.T, sourcePath string) string { + source, err := ioutil.ReadFile(sourcePath) + if err != nil { + t.Fatal(err.Error()) + } + // Create a buffer to write our archive to. + buf := new(bytes.Buffer) + + // Create a new zip archive. + w := zip.NewWriter(buf) + + f, err := w.Create("index.js") + if err != nil { + t.Fatal(err.Error()) + } + _, err = f.Write(source) + if err != nil { + t.Fatal(err.Error()) + } + + // Make sure to check the error on Close. + err = w.Close() + if err != nil { + t.Fatal(err.Error()) + } + // Create temp file to write zip to + tmpfile, err := ioutil.TempFile("", "sourceArchivePrefix") + if err != nil { + t.Fatal(err.Error()) + } + + if _, err := tmpfile.Write(buf.Bytes()); err != nil { + t.Fatal(err.Error()) + } + if err := tmpfile.Close(); err != nil { + t.Fatal(err.Error()) + } + return tmpfile.Name() +} + +// providerConfigEnvNames returns a list of all the environment variables that could be set by a user to configure the provider +func providerConfigEnvNames() []string { + + envs := []string{} + + // Use existing collections of ENV names + envVarsSets := [][]string{ + envvar.CredsEnvVars, // credentials field + envvar.ProjectEnvVars, // project field + envvar.RegionEnvVars, // region field + envvar.ZoneEnvVars, // zone field + } + for _, set := range envVarsSets { + envs = append(envs, set...) + } + + // Add remaining ENVs + envs = append(envs, "GOOGLE_OAUTH_ACCESS_TOKEN") // access_token field + envs = append(envs, "GOOGLE_BILLING_PROJECT") // billing_project field + envs = append(envs, "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT") // impersonate_service_account field + envs = append(envs, "USER_PROJECT_OVERRIDE") // user_project_override field + envs = append(envs, "CLOUDSDK_CORE_REQUEST_REASON") // request_reason field + + envs = append(envs, "GOOGLE_APPLICATION_CREDENTIALS") // ADC used to configure clients when provider lacks credentials and access_token + + return envs +} + +// UnsetProviderConfigEnvs unsets any ENVs in the test environment that +// configure the provider. +// The testing package will restore the original values after the test +func UnsetTestProviderConfigEnvs(t *testing.T) { + envs := providerConfigEnvNames() + if len(envs) > 0 { + for _, k := range envs { + t.Setenv(k, "") + } + } +} + +func SetupTestEnvs(t *testing.T, envValues map[string]string) { + // Set ENVs + if len(envValues) > 0 { + for k, v := range envValues { + t.Setenv(k, v) + } + } +} + +// Returns a fake credentials JSON string with the client_email set to a test-specific value +func GenerateFakeCredentialsJson(testId string) string { + json := fmt.Sprintf(`{"private_key_id": "foo","private_key": "bar","client_email": "%s@example.com","client_id": "id@foo.com","type": "service_account"}`, testId) + return json +} diff --git a/mmv1/third_party/terraform/fwprovider/go/framework_provider_test.go.tmpl b/mmv1/third_party/terraform/fwprovider/go/framework_provider_test.go.tmpl new file mode 100644 index 000000000000..c5a53d10d2ec --- /dev/null +++ b/mmv1/third_party/terraform/fwprovider/go/framework_provider_test.go.tmpl @@ -0,0 +1,286 @@ +package fwprovider_test + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/fwresource" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccFrameworkProviderMeta_setModuleName(t *testing.T) { + // TODO: https://github.com/hashicorp/terraform-provider-google/issues/14158 + acctest.SkipIfVcr(t) + t.Parallel() + + moduleName := "my-module" + managedZoneName := fmt.Sprintf("tf-test-zone-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducerFramework(t), + Steps: []resource.TestStep{ + { + Config: testAccFrameworkProviderMeta_setModuleName(moduleName, managedZoneName, acctest.RandString(t, 10)), + }, + }, + }) +} + +func TestAccFrameworkProviderBasePath_setInvalidBasePath(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccCheckComputeAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.58.0", + Source: "hashicorp/google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}", + }, + }, + Config: testAccProviderBasePath_setBasePath("https://www.example.com/compute/beta/", acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("got HTTP response code 404 with body"), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Config: testAccProviderBasePath_setBasePath("https://www.example.com/compute/beta/", acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("got HTTP response code 404 with body"), + }, + }, + }) +} + +func TestAccFrameworkProviderBasePath_setBasePath(t *testing.T) { + // TODO: https://github.com/hashicorp/terraform-provider-google/issues/14158 + acctest.SkipIfVcr(t) + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + CheckDestroy: testAccCheckDNSManagedZoneDestroyProducerFramework(t), + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.58.0", + Source: "hashicorp/google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}", + }, + }, + Config: testAccFrameworkProviderBasePath_setBasePath("https://www.googleapis.com/dns/v1beta2/", acctest.RandString(t, 10)), + }, + { + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.58.0", + Source: "hashicorp/google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}", + }, + }, + ResourceName: "google_dns_managed_zone.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Config: testAccFrameworkProviderBasePath_setBasePath("https://www.googleapis.com/dns/v1beta2/", acctest.RandString(t, 10)), + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ResourceName: "google_dns_managed_zone.foo", + ImportState: true, + ImportStateVerify: true, + }, + { + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Config: testAccFrameworkProviderBasePath_setBasePathstep3("https://www.googleapis.com/dns/v1beta2/", acctest.RandString(t, 10)), + }, + }, + }) +} + +func testAccFrameworkProviderMeta_setModuleName(key, managedZoneName, recordSetName string) string { + return fmt.Sprintf(` +terraform { + provider_meta "google" { + module_name = "%s" + } +} + +provider "google" {} + +resource "google_dns_managed_zone" "zone" { + name = "%s-hashicorptest-com" + dns_name = "%s.hashicorptest.com." +} + +resource "google_dns_record_set" "rs" { + managed_zone = google_dns_managed_zone.zone.name + name = "%s.${google_dns_managed_zone.zone.dns_name}" + type = "A" + ttl = 300 + rrdatas = [ + "192.168.1.0", + ] +} + +data "google_dns_record_set" "rs" { + managed_zone = google_dns_record_set.rs.managed_zone + name = google_dns_record_set.rs.name + type = google_dns_record_set.rs.type +}`, key, managedZoneName, managedZoneName, recordSetName) +} + +func testAccFrameworkProviderBasePath_setBasePath(endpoint, name string) string { + return fmt.Sprintf(` +provider "google" { + alias = "dns_custom_endpoint" + dns_custom_endpoint = "%s" +} + +resource "google_dns_managed_zone" "foo" { + provider = google.dns_custom_endpoint + name = "tf-test-zone-%s" + dns_name = "tf-test-zone-%s.hashicorptest.com." + description = "QA DNS zone" +} + +data "google_dns_managed_zone" "qa" { + provider = google.dns_custom_endpoint + name = google_dns_managed_zone.foo.name +}`, endpoint, name, name) +} + +func testAccFrameworkProviderBasePath_setBasePathstep3(endpoint, name string) string { + return fmt.Sprintf(` +provider "google" { + alias = "dns_custom_endpoint" + dns_custom_endpoint = "%s" +} + +resource "google_dns_managed_zone" "foo" { + provider = google.dns_custom_endpoint + name = "tf-test-zone-%s" + dns_name = "tf-test-zone-%s.hashicorptest.com." + description = "QA DNS zone" +} +`, endpoint, name, name) +} + + +// Copy the function from the provider_test package to here +// as that function is in the _test.go file and not importable +func testAccProviderBasePath_setBasePath(endpoint, name string) string { + return fmt.Sprintf(` +provider "google" { + alias = "compute_custom_endpoint" + compute_custom_endpoint = "%s" +} + +resource "google_compute_address" "default" { + provider = google.compute_custom_endpoint + name = "tf-test-address-%s" +}`, endpoint, name) +} + +func testAccProviderMeta_setModuleName(key, name string) string { + return fmt.Sprintf(` +terraform { + provider_meta "google" { + module_name = "%s" + } +} + +resource "google_compute_address" "default" { + name = "tf-test-address-%s" +}`, key, name) +} + +// Copy the function testAccCheckComputeAddressDestroyProducer from the dns_test package to here, +// as that function is in the _test.go file and not importable. +// +// testAccCheckDNSManagedZoneDestroyProducerFramework is the framework version of the generated testAccCheckDNSManagedZoneDestroyProducer +// when we automate this, we'll use the automated version and can get rid of this +func testAccCheckDNSManagedZoneDestroyProducerFramework(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_dns_managed_zone" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + p := acctest.GetFwTestProvider(t) + + url, err := fwresource.ReplaceVarsForFrameworkTest(&p.FrameworkProvider.FrameworkProviderConfig, rs, "{{"{{"}}DNSBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/managedZones/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if !p.BillingProject.IsNull() && p.BillingProject.String() != "" { + billingProject = p.BillingProject.String() + } + + _, diags := fwtransport.SendFrameworkRequest(&p.FrameworkProvider.FrameworkProviderConfig, "GET", billingProject, url, p.UserAgent, nil) + if !diags.HasError() { + return fmt.Errorf("DNSManagedZone still exists at %s", url) + } + } + + return nil + } +} + +// Copy the Mmv1 generated function testAccCheckComputeAddressDestroyProducer from the compute_test package to here, +// as that function is in the _test.go file and not importable. +func testAccCheckComputeAddressDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_address" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/addresses/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ComputeAddress still exists at %s", url) + } + } + + return nil + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go b/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go new file mode 100644 index 000000000000..a5f7a186dd4d --- /dev/null +++ b/mmv1/third_party/terraform/fwtransport/go/framework_config_test.go @@ -0,0 +1,1743 @@ +package fwtransport_test + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/fwmodels" + "github.com/hashicorp/terraform-provider-google/google/fwtransport" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestFrameworkProvider_LoadAndValidateFramework_project(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue // Sometimes the value is mutated, and no longer matches the original value we supply + ExpectedConfigStructValue basetypes.StringValue // Sometimes the value in config struct differs from what is in the data model + ExpectError bool + }{ + "project value set in the provider schema is not overridden by environment variables": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringValue("project-from-config"), + }, + EnvVariables: map[string]string{ + "GOOGLE_PROJECT": "project-from-GOOGLE_PROJECT", + "GOOGLE_CLOUD_PROJECT": "project-from-GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT": "project-from-GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT": "project-from-CLOUDSDK_CORE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-config"), + ExpectedConfigStructValue: types.StringValue("project-from-config"), + }, + "project value can be set by environment variable: GOOGLE_PROJECT is used first": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + "GOOGLE_PROJECT": "project-from-GOOGLE_PROJECT", + "GOOGLE_CLOUD_PROJECT": "project-from-GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT": "project-from-GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT": "project-from-CLOUDSDK_CORE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-GOOGLE_PROJECT"), + ExpectedConfigStructValue: types.StringValue("project-from-GOOGLE_PROJECT"), + }, + "project value can be set by environment variable: GOOGLE_CLOUD_PROJECT is used second": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + // GOOGLE_PROJECT unset + "GOOGLE_CLOUD_PROJECT": "project-from-GOOGLE_CLOUD_PROJECT", + "GCLOUD_PROJECT": "project-from-GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT": "project-from-CLOUDSDK_CORE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-GOOGLE_CLOUD_PROJECT"), + ExpectedConfigStructValue: types.StringValue("project-from-GOOGLE_CLOUD_PROJECT"), + }, + "project value can be set by environment variable: GCLOUD_PROJECT is used third": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + // GOOGLE_PROJECT unset + // GOOGLE_CLOUD_PROJECT unset + "GCLOUD_PROJECT": "project-from-GCLOUD_PROJECT", + "CLOUDSDK_CORE_PROJECT": "project-from-CLOUDSDK_CORE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-GCLOUD_PROJECT"), + ExpectedConfigStructValue: types.StringValue("project-from-GCLOUD_PROJECT"), + }, + "project value can be set by environment variable: CLOUDSDK_CORE_PROJECT is used fourth": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + // GOOGLE_PROJECT unset + // GOOGLE_CLOUD_PROJECT unset + // GCLOUD_PROJECT unset + "CLOUDSDK_CORE_PROJECT": "project-from-CLOUDSDK_CORE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-CLOUDSDK_CORE_PROJECT"), + ExpectedConfigStructValue: types.StringValue("project-from-CLOUDSDK_CORE_PROJECT"), + }, + "when no project values are provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringNull(), // unset + }, + ExpectedDataModelValue: types.StringNull(), + ExpectedConfigStructValue: types.StringNull(), + }, + // Handling empty strings in config + "when project is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + "when project is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_PROJECT": "project-from-GOOGLE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + // Handling unknown values + "when project is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + Project: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_PROJECT": "project-from-GOOGLE_PROJECT", + }, + ExpectedDataModelValue: types.StringValue("project-from-GOOGLE_PROJECT"), + ExpectedConfigStructValue: types.StringValue("project-from-GOOGLE_PROJECT"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.Project.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want project in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Project.String()) + } + // Checking the value passed to the config structs + if !p.Project.Equal(tc.ExpectedConfigStructValue) { + t.Fatalf("want project in the `FrameworkProviderConfig` struct to be `%s`, but got the value `%s`", tc.ExpectedConfigStructValue, p.Project.String()) + } + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_credentials(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + const pathToMissingFile string = "./this/path/doesnt/exist.json" // Doesn't exist + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + // ExpectedConfigStructValue not used here, as credentials info isn't stored in the config struct + ExpectError bool + }{ + "credentials can be configured as a path to a credentials JSON file": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringValue(transport_tpg.TestFakeCredentialsPath), + }, + ExpectedDataModelValue: types.StringValue(transport_tpg.TestFakeCredentialsPath), + }, + "configuring credentials as a path to a non-existent file results in an error": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringValue(pathToMissingFile), + }, + ExpectError: true, + }, + "credentials set in the config are not overridden by environment variables": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringValue(acctest.GenerateFakeCredentialsJson("test")), + }, + EnvVariables: map[string]string{ + "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), + "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + }, + ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("test")), + }, + "when credentials is unset in the config, environment variables are used: GOOGLE_CREDENTIALS used first": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + "GOOGLE_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS"), + "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + }, + ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GOOGLE_CREDENTIALS")), + }, + "when credentials is unset in the config, environment variables are used: GOOGLE_CLOUD_KEYFILE_JSON used second": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + // GOOGLE_CREDENTIALS not set + "GOOGLE_CLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON"), + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + }, + ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GOOGLE_CLOUD_KEYFILE_JSON")), + }, + "when credentials is unset in the config, environment variables are used: GCLOUD_KEYFILE_JSON used third": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + // GOOGLE_CREDENTIALS not set + // GOOGLE_CLOUD_KEYFILE_JSON not set + "GCLOUD_KEYFILE_JSON": acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON"), + "GOOGLE_APPLICATION_CREDENTIALS": acctest.GenerateFakeCredentialsJson("GOOGLE_APPLICATION_CREDENTIALS"), + }, + ExpectedDataModelValue: types.StringValue(acctest.GenerateFakeCredentialsJson("GCLOUD_KEYFILE_JSON")), + }, + "when credentials is unset in the config (and access_token unset), GOOGLE_APPLICATION_CREDENTIALS is used for auth but not to set values in the config": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringNull(), // unset + }, + EnvVariables: map[string]string{ + // GOOGLE_CREDENTIALS not set + // GOOGLE_CLOUD_KEYFILE_JSON not set + // GCLOUD_KEYFILE_JSON not set + "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used by code + }, + ExpectedDataModelValue: types.StringNull(), + }, + // Error states + "when credentials is set to an empty string in the config the value isn't ignored and results in an error": { + ConfigValues: fwmodels.ProviderModel{ + Credentials: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_APPLICATION_CREDENTIALS": transport_tpg.TestFakeCredentialsPath, // needs to be a path to a file when used by code + }, + ExpectError: true, + }, + // NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset + // See https://cloud.google.com/docs/authentication/application-default-credentials#search_order + // Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs + // "error returned if credentials is set as an empty string and GOOGLE_APPLICATION_CREDENTIALS is unset": { + // ConfigValues: fwmodels.ProviderModel{ + // Credentials: types.StringValue(""), + // }, + // EnvVariables: map[string]string{ + // "GOOGLE_APPLICATION_CREDENTIALS": "", + // }, + // ExpectError: true, + // }, + // "error returned if neither credentials nor access_token set in the provider config, and GOOGLE_APPLICATION_CREDENTIALS is unset": { + // EnvVariables: map[string]string{ + // "GOOGLE_APPLICATION_CREDENTIALS": "", + // }, + // ExpectError: true, + // }, + // Handling unknown values - see separate `TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown` test + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + if !data.Credentials.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want credentials to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Credentials.String()) + } + // fwtransport.FrameworkProviderConfig does not store the credentials info, so test does not make assertions on config struct + }) + } +} + +// NOTE: these tests can't run in Cloud Build due to ADC locating credentials despite `GOOGLE_APPLICATION_CREDENTIALS` being unset +// See https://cloud.google.com/docs/authentication/application-default-credentials#search_order +// Also, when running these tests locally you need to run `gcloud auth application-default revoke` to ensure your machine isn't supplying ADCs +// func TestFrameworkProvider_LoadAndValidateFramework_credentials_unknown(t *testing.T) { +// // This test case is kept separate from other credentials tests, as it requires comparing +// // error messages returned by two different error states: +// // - When credentials = Null +// // - When credentials = Unknown + +// t.Run("the same error is returned whether credentials is set as a null or unknown value (and access_token isn't set)", func(t *testing.T) { +// // Arrange +// acctest.UnsetTestProviderConfigEnvs(t) + +// ctx := context.Background() +// tfVersion := "foobar" +// providerversion := "999" + +// impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + +// // Null data and error collection +// diagsNull := diag.Diagnostics{} +// dataNull := fwmodels.ProviderModel{ +// Credentials: types.StringNull(), +// } +// dataNull.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + +// // Unknown data and error collection +// diagsUnknown := diag.Diagnostics{} +// dataUnknown := fwmodels.ProviderModel{ +// Credentials: types.StringUnknown(), +// } +// dataUnknown.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + +// pNull := fwtransport.FrameworkProviderConfig{} +// pUnknown := fwtransport.FrameworkProviderConfig{} + +// // Act +// pNull.LoadAndValidateFramework(ctx, &dataNull, tfVersion, &diagsNull, providerversion) +// pUnknown.LoadAndValidateFramework(ctx, &dataUnknown, tfVersion, &diagsUnknown, providerversion) + +// // Assert +// if !diagsNull.HasError() { +// t.Fatalf("expect errors when credentials is null, but [%d] errors occurred", diagsNull.ErrorsCount()) +// } +// if !diagsUnknown.HasError() { +// t.Fatalf("expect errors when credentials is unknown, but [%d] errors occurred", diagsUnknown.ErrorsCount()) +// } + +// errNull := diagsNull.Errors() +// errUnknown := diagsUnknown.Errors() +// for i := 0; i < len(errNull); i++ { +// if errNull[i] != errUnknown[i] { +// t.Fatalf("expect errors to be the same for null and unknown credentials values, instead got \nnull=`%s` \nunknown=%s", errNull[i], errUnknown[i]) +// } +// } +// }) +// } + +func TestFrameworkProvider_LoadAndValidateFramework_billingProject(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + ExpectedConfigStructValue basetypes.StringValue + ExpectError bool + }{ + "billing_project value set in the provider schema is not overridden by environment variables": { + ConfigValues: fwmodels.ProviderModel{ + BillingProject: types.StringValue("billing-project-from-config"), + }, + EnvVariables: map[string]string{ + "GOOGLE_BILLING_PROJECT": "billing-project-from-env", + }, + ExpectedDataModelValue: types.StringValue("billing-project-from-config"), + ExpectedConfigStructValue: types.StringValue("billing-project-from-config"), + }, + "billing_project can be set by environment variable, when no value supplied via the config": { + ConfigValues: fwmodels.ProviderModel{ + BillingProject: types.StringNull(), + }, + EnvVariables: map[string]string{ + "GOOGLE_BILLING_PROJECT": "billing-project-from-env", + }, + ExpectedDataModelValue: types.StringValue("billing-project-from-env"), + ExpectedConfigStructValue: types.StringValue("billing-project-from-env"), + }, + "when no billing_project values are provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + BillingProject: types.StringNull(), + }, + ExpectedDataModelValue: types.StringNull(), + ExpectedConfigStructValue: types.StringNull(), + }, + // Handling empty strings in config + "when billing_project is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + BillingProject: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + "when billing_project is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + BillingProject: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_BILLING_PROJECT": "billing-project-from-env", + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.BillingProject.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want billing_project in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.BillingProject.String()) + } + // Checking the value passed to the config structs + if !p.BillingProject.Equal(tc.ExpectedConfigStructValue) { + t.Fatalf("want billing_project in the `FrameworkProviderConfig` struct to be `%s`, but got the value `%s`", tc.ExpectedConfigStructValue, p.BillingProject.String()) + } + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_region(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + ExpectedConfigStructValue basetypes.StringValue + ExpectError bool + }{ + "region value set in the provider config is not overridden by ENVs": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringValue("region-from-config"), + }, + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + }, + ExpectedDataModelValue: types.StringValue("region-from-config"), + ExpectedConfigStructValue: types.StringValue("region-from-config"), + }, + "region values can be supplied as a self link": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/regions/us-central1"), + }, + ExpectedDataModelValue: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/regions/us-central1"), + ExpectedConfigStructValue: types.StringValue("us-central1"), + }, + "region value can be set by environment variable: GOOGLE_REGION is used": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringNull(), + }, + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + }, + ExpectedDataModelValue: types.StringValue("region-from-env"), + ExpectedConfigStructValue: types.StringValue("region-from-env"), + }, + "when no region values are provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringNull(), + }, + ExpectedDataModelValue: types.StringNull(), + ExpectedConfigStructValue: types.StringNull(), + }, + // Handling empty strings in config + "when region is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + "when region is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + // Handling unknown values + "when region is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + Region: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_REGION": "region-from-env", + }, + ExpectedDataModelValue: types.StringValue("region-from-env"), + ExpectedConfigStructValue: types.StringValue("region-from-env"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.Region.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want region in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Region.String()) + } + // Checking the value passed to the config structs + if !p.Region.Equal(tc.ExpectedConfigStructValue) { + t.Fatalf("want region in the `FrameworkProviderConfig` struct to be `%s`, but got the value `%s`", tc.ExpectedConfigStructValue, p.Region.String()) + } + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_zone(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under test experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + ExpectedConfigStructValue basetypes.StringValue + ExpectError bool + }{ + "zone value set in the provider config is not overridden by ENVs": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringValue("zone-from-config"), + }, + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-env", + }, + ExpectedDataModelValue: types.StringValue("zone-from-config"), + ExpectedConfigStructValue: types.StringValue("zone-from-config"), + }, + "does not shorten zone values when provided as a self link": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1"), + }, + ExpectedDataModelValue: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1"), + ExpectedConfigStructValue: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/zones/us-central1"), // Value is not shortened from URI to name + }, + "when multiple zone environment variables are provided, `GOOGLE_ZONE` is used first": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringNull(), + }, + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-GOOGLE_ZONE", + "GCLOUD_ZONE": "zone-from-GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE": "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + ExpectedDataModelValue: types.StringValue("zone-from-GOOGLE_ZONE"), + ExpectedConfigStructValue: types.StringValue("zone-from-GOOGLE_ZONE"), + }, + "when multiple zone environment variables are provided, `GCLOUD_ZONE` is used second": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringNull(), + }, + EnvVariables: map[string]string{ + // GOOGLE_ZONE unset + "GCLOUD_ZONE": "zone-from-GCLOUD_ZONE", + "CLOUDSDK_COMPUTE_ZONE": "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + ExpectedDataModelValue: types.StringValue("zone-from-GCLOUD_ZONE"), + ExpectedConfigStructValue: types.StringValue("zone-from-GCLOUD_ZONE"), + }, + "when multiple zone environment variables are provided, `CLOUDSDK_COMPUTE_ZONE` is used third": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringNull(), + }, + EnvVariables: map[string]string{ + // GOOGLE_ZONE unset + // GCLOUD_ZONE unset + "CLOUDSDK_COMPUTE_ZONE": "zone-from-CLOUDSDK_COMPUTE_ZONE", + }, + ExpectedDataModelValue: types.StringValue("zone-from-CLOUDSDK_COMPUTE_ZONE"), + ExpectedConfigStructValue: types.StringValue("zone-from-CLOUDSDK_COMPUTE_ZONE"), + }, + "when no zone values are provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringNull(), + }, + ExpectedDataModelValue: types.StringNull(), + ExpectedConfigStructValue: types.StringNull(), + }, + // Handling empty strings in config + "when zone is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + "when zone is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-env", + }, + ExpectedDataModelValue: types.StringValue(""), + ExpectedConfigStructValue: types.StringValue(""), + }, + // Handling unknown values + "when zone is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + Zone: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_ZONE": "zone-from-env", + }, + ExpectedDataModelValue: types.StringValue("zone-from-env"), + ExpectedConfigStructValue: types.StringValue("zone-from-env"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.Zone.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want zone in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Zone.String()) + } + // Checking the value passed to the config structs + if !p.Zone.Equal(tc.ExpectedConfigStructValue) { + t.Fatalf("want zone in the `FrameworkProviderConfig` struct to be `%s`, but got the value `%s`", tc.ExpectedConfigStructValue, p.Zone.String()) + } + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_accessToken(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue // Sometimes the value is mutated, and no longer matches the original value we supply + // ExpectedConfigStructValue not used here, as credentials info isn't stored in the config struct + ExpectError bool + }{ + "access_token configured in the provider can be invalid without resulting in errors": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringValue("This is not a valid token string"), + }, + ExpectedDataModelValue: types.StringValue("This is not a valid token string"), + }, + "access_token set in the provider config is not overridden by environment variables": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringValue("value-from-config"), + }, + EnvVariables: map[string]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN": "value-from-env", + }, + ExpectedDataModelValue: types.StringValue("value-from-config"), + }, + "when access_token is unset in the config, the GOOGLE_OAUTH_ACCESS_TOKEN environment variable is used": { + EnvVariables: map[string]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN": "value-from-GOOGLE_OAUTH_ACCESS_TOKEN", + }, + ExpectedDataModelValue: types.StringValue("value-from-GOOGLE_OAUTH_ACCESS_TOKEN"), + }, + "when no access_token values are provided via config or environment variables there's no error (as long as credentials supplied in its absence)": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringNull(), + Credentials: types.StringValue(transport_tpg.TestFakeCredentialsPath), + }, + ExpectedDataModelValue: types.StringNull(), + }, + // Handling empty strings in config + "when access_token is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + }, + "when access_token is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN": "value-from-GOOGLE_OAUTH_ACCESS_TOKEN", + }, + ExpectedDataModelValue: types.StringValue(""), + }, + // Handling unknown values + "when access_token is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + AccessToken: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_OAUTH_ACCESS_TOKEN": "value-from-GOOGLE_OAUTH_ACCESS_TOKEN", + }, + ExpectedDataModelValue: types.StringValue("value-from-GOOGLE_OAUTH_ACCESS_TOKEN"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.AccessToken.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want project in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.AccessToken.String()) + } + // fwtransport.FrameworkProviderConfig does not store the credentials info, so test does not make assertions on config struct + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_userProjectOverride(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.BoolValue + ExpectedConfigStructValue basetypes.BoolValue + ExpectError bool + }{ + "user_project_override value set in the provider schema is not overridden by ENVs": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolValue(false), + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "true", + }, + ExpectedDataModelValue: types.BoolValue(false), + ExpectedConfigStructValue: types.BoolValue(false), + }, + "user_project_override can be set by environment variable: value = true": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolNull(), // not set + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "true", + }, + ExpectedDataModelValue: types.BoolValue(true), + ExpectedConfigStructValue: types.BoolValue(true), + }, + "user_project_override can be set by environment variable: value = false": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolNull(), // not set + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "false", + }, + ExpectedDataModelValue: types.BoolValue(false), + ExpectedConfigStructValue: types.BoolValue(false), + }, + "user_project_override can be set by environment variable: value = 1": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolNull(), // not set + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "1", + }, + ExpectedDataModelValue: types.BoolValue(true), + ExpectedConfigStructValue: types.BoolValue(true), + }, + "user_project_override can be set by environment variable: value = 0": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolNull(), // not set + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "0", + }, + ExpectedDataModelValue: types.BoolValue(false), + ExpectedConfigStructValue: types.BoolValue(false), + }, + "setting user_project_override using a non-boolean environment variables results in an error": { + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "I'm not a boolean", + }, + ExpectError: true, + }, + "when no user_project_override values are provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolNull(), // not set + }, + ExpectedDataModelValue: types.BoolNull(), + ExpectedConfigStructValue: types.BoolNull(), + }, + // Handling unknown values + "when user_project_override is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + UserProjectOverride: types.BoolUnknown(), + }, + EnvVariables: map[string]string{ + "USER_PROJECT_OVERRIDE": "true", + }, + ExpectedDataModelValue: types.BoolValue(true), + ExpectedConfigStructValue: types.BoolValue(true), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.UserProjectOverride.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want user_project_override in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.UserProjectOverride.String()) + } + // Checking the value passed to the config structs + if !p.UserProjectOverride.Equal(tc.ExpectedConfigStructValue) { + t.Fatalf("want user_project_override in the `FrameworkProviderConfig` struct to be `%s`, but got the value `%s`", tc.ExpectedConfigStructValue, p.UserProjectOverride.String()) + } + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccount(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + ExpectedConfigStructValue basetypes.StringValue + ExpectError bool + }{ + "impersonate_service_account value set in the provider schema is not overridden by environment variables": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringValue("value-from-config@example.com"), + }, + EnvVariables: map[string]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT": "value-from-env@example.com", + }, + ExpectedDataModelValue: types.StringValue("value-from-config@example.com"), + }, + "impersonate_service_account value can be set by environment variable": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringNull(), // not set + }, + EnvVariables: map[string]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT": "value-from-env@example.com", + }, + ExpectedDataModelValue: types.StringValue("value-from-env@example.com"), + }, + "when no values are provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringNull(), // not set + }, + ExpectedDataModelValue: types.StringNull(), + }, + // Handling empty strings in config + "when impersonate_service_account is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + }, + "when impersonate_service_account is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT": "value-from-env@example.com", + }, + ExpectedDataModelValue: types.StringValue(""), + }, + // Handling unknown values + "when impersonate_service_account is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + ImpersonateServiceAccount: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "GOOGLE_IMPERSONATE_SERVICE_ACCOUNT": "value-from-env@example.com", + }, + ExpectedDataModelValue: types.StringValue("value-from-env@example.com"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.ImpersonateServiceAccount.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want impersonate_service_account in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.ImpersonateServiceAccount.String()) + } + // fwtransport.FrameworkProviderConfig does not store impersonate_service_account info, so test does not make assertions on config struct + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_impersonateServiceAccountDelegates(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + + cases := map[string]struct { + // It's not easy to define basetypes.ListValue values directly in test case, so instead + // pass values into test function to control construction of basetypes.ListValue there. + SetAsNull bool + SetAsUnknown bool + ImpersonateServiceAccountDelegatesValue []string + EnvVariables map[string]string + + ExpectedNull bool + ExpectedUnknown bool + ExpectedDataModelValue []string + ExpectError bool + }{ + "impersonate_service_account_delegates value can be set in the provider schema": { + ImpersonateServiceAccountDelegatesValue: []string{ + "projects/-/serviceAccounts/my-service-account-1@example.iam.gserviceaccount.com", + "projects/-/serviceAccounts/my-service-account-2@example.iam.gserviceaccount.com", + }, + ExpectedDataModelValue: []string{ + "projects/-/serviceAccounts/my-service-account-1@example.iam.gserviceaccount.com", + "projects/-/serviceAccounts/my-service-account-2@example.iam.gserviceaccount.com", + }, + }, + // Note: no environment variables can be used for impersonate_service_account_delegates + "when no impersonate_service_account_delegates value is provided via config, the field remains unset without error": { + SetAsNull: true, // not setting impersonate_service_account_delegates + ExpectedNull: true, + }, + // Handling empty values in config + "when impersonate_service_account_delegates is set as an empty array, that value isn't ignored": { + ImpersonateServiceAccountDelegatesValue: []string{}, + ExpectedDataModelValue: []string{}, + }, + // Handling unknown values + "when impersonate_service_account_delegates is an unknown value, the provider treats it as if it's unset, without error": { + SetAsUnknown: true, + ExpectedUnknown: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := fwmodels.ProviderModel{} + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + // Set ImpersonateServiceAccountDelegates depending on test case + if !tc.SetAsNull && !tc.SetAsUnknown { + isad, _ := types.ListValueFrom(ctx, types.StringType, tc.ImpersonateServiceAccountDelegatesValue) + data.ImpersonateServiceAccountDelegates = isad + } + if tc.SetAsNull { + data.ImpersonateServiceAccountDelegates = types.ListNull(types.StringType) + } + if tc.SetAsUnknown { + data.ImpersonateServiceAccountDelegates = types.ListUnknown(types.StringType) + } + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + var expected attr.Value + if !tc.ExpectedNull && !tc.ExpectedUnknown { + expected, _ = types.ListValueFrom(ctx, types.StringType, tc.ExpectedDataModelValue) + } + if tc.ExpectedNull { + expected = types.ListNull(types.StringType) + } + if tc.ExpectedUnknown { + expected = types.ListUnknown(types.StringType) + } + if !data.ImpersonateServiceAccountDelegates.Equal(expected) { + t.Fatalf("want impersonate_service_account in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", expected, data.ImpersonateServiceAccountDelegates.String()) + } + // fwtransport.FrameworkProviderConfig does not store impersonate_service_account info, so test does not make assertions on config struct + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_scopes(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ScopesValue []string + EnvVariables map[string]string + ExpectedDataModelValue []string + ExpectedConfigStructValue []string + SetAsNull bool + SetAsUnknown bool + ExpectError bool + }{ + "scopes are set in the provider config as a list": { + ScopesValue: []string{"fizz", "buzz", "baz"}, + ExpectedDataModelValue: []string{"fizz", "buzz", "baz"}, + ExpectedConfigStructValue: []string{"fizz", "buzz", "baz"}, + }, + "scopes can be left unset in the provider config without any issues, and a default value is used": { + SetAsNull: true, + ExpectedDataModelValue: transport_tpg.DefaultClientScopes, + ExpectedConfigStructValue: transport_tpg.DefaultClientScopes, + }, + // Handling empty values in config + "scopes set as an empty list the field is treated as if it's unset and a default value is used without errors": { + ScopesValue: []string{}, + ExpectedDataModelValue: transport_tpg.DefaultClientScopes, + ExpectedConfigStructValue: transport_tpg.DefaultClientScopes, + }, + // Handling unknown values + "when scopes is an unknown value, the provider treats it as if it's unset and a default value is used without errors": { + SetAsUnknown: true, + ExpectedDataModelValue: transport_tpg.DefaultClientScopes, + ExpectedConfigStructValue: transport_tpg.DefaultClientScopes, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := fwmodels.ProviderModel{} + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + // Set ImpersonateServiceAccountDelegates depending on test case + if !tc.SetAsNull && !tc.SetAsUnknown { + s, _ := types.ListValueFrom(ctx, types.StringType, tc.ScopesValue) + data.Scopes = s + } + if tc.SetAsNull { + data.Scopes = types.ListNull(types.StringType) + } + if tc.SetAsUnknown { + data.Scopes = types.ListUnknown(types.StringType) + } + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + expectedDm, _ := types.ListValueFrom(ctx, types.StringType, tc.ExpectedDataModelValue) + if !data.Scopes.Equal(expectedDm) { + t.Fatalf("want project in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.Scopes.String()) + } + // Checking the value passed to the config structs + expectedFpc, _ := types.ListValueFrom(ctx, types.StringType, tc.ExpectedConfigStructValue) + if !p.Scopes.Equal(expectedFpc) { + t.Fatalf("want project in the `FrameworkProviderConfig` struct to be `%s`, but got the value `%s`", tc.ExpectedConfigStructValue, p.Scopes.String()) + } + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_requestReason(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + // ExpectedConfigStructValue not used here, as credentials info isn't stored in the config struct + ExpectError bool + }{ + "when request_reason is unset in the config, environment variable CLOUDSDK_CORE_REQUEST_REASON is used": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringNull(), + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "foo", + }, + ExpectedDataModelValue: types.StringValue("foo"), + }, + "request_reason set in the config is not overridden by environment variables": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringValue("value-from-config"), + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "value-from-env", + }, + ExpectedDataModelValue: types.StringValue("value-from-config"), + }, + "when no request_reason is provided via config or environment variables, the field remains unset without error": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringNull(), + }, + ExpectedDataModelValue: types.StringNull(), + }, + // Handling empty strings in config + "when request_reason is set as an empty string, the empty string is not ignored in favor of an environment variable": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringValue(""), + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "foo", + }, + ExpectedDataModelValue: types.StringValue(""), + }, + "when request_reason is set as an empty string the empty string is used and not ignored": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringValue(""), + }, + ExpectedDataModelValue: types.StringValue(""), + }, + // Handling unknown values + "when request_reason is an unknown value, the provider treats it as if it's unset and uses an environment variable instead": { + ConfigValues: fwmodels.ProviderModel{ + RequestReason: types.StringUnknown(), + }, + EnvVariables: map[string]string{ + "CLOUDSDK_CORE_REQUEST_REASON": "foo", + }, + ExpectedDataModelValue: types.StringValue("foo"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.RequestReason.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want request_reason in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.RequestReason.String()) + } + // fwtransport.FrameworkProviderConfig does not store the request reason info, so test does not make assertions on config struct + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_requestTimeout(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + ConfigValues fwmodels.ProviderModel + EnvVariables map[string]string + ExpectedDataModelValue basetypes.StringValue + // ExpectedConfigStructValue not used here, as credentials info isn't stored in the config struct + ExpectError bool + }{ + "if a valid request_timeout is configured in the provider, no error will occur": { + ConfigValues: fwmodels.ProviderModel{ + RequestTimeout: types.StringValue("10s"), + }, + ExpectedDataModelValue: types.StringValue("10s"), + }, + "if an invalid request_timeout is configured in the provider, an error will occur": { + ConfigValues: fwmodels.ProviderModel{ + RequestTimeout: types.StringValue("timeout"), + }, + ExpectError: true, + }, + "when request_timeout is set as an empty string, the empty string isn't ignored and an error will occur": { + ConfigValues: fwmodels.ProviderModel{ + RequestTimeout: types.StringValue(""), + }, + ExpectError: true, + }, + // In the SDK version of the provider config code, this scenario results in a value of "0s" + // instead of "120s", but the final 'effective' value is also "120s" + // See : https://github.com/hashicorp/terraform-provider-google/blob/09cb850ee64bcd78e4457df70905530c1ed75f19/google/transport/config.go#L1228-L1233 + "when request_timeout is unset in the config, the default value is 120s.": { + ConfigValues: fwmodels.ProviderModel{ + RequestTimeout: types.StringNull(), + }, + ExpectedDataModelValue: types.StringValue("120s"), + }, + // Handling unknown values + "when request_timeout is an unknown value, the provider treats it as if it's unset and uses the default value 120s": { + ConfigValues: fwmodels.ProviderModel{ + RequestTimeout: types.StringUnknown(), + }, + ExpectedDataModelValue: types.StringValue("120s"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := tc.ConfigValues + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s : %s", num, err.Summary(), err.Detail()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.RequestTimeout.Equal(tc.ExpectedDataModelValue) { + t.Fatalf("want request_timeout in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectedDataModelValue, data.RequestTimeout.String()) + } + // fwtransport.FrameworkProviderConfig does not store the request timeout info, so test does not make assertions on config struct + }) + } +} + +func TestFrameworkProvider_LoadAndValidateFramework_batching(t *testing.T) { + + // Note: In the test function we need to set the below fields in test case's fwmodels.ProviderModel value + // this is to stop the code under tests experiencing errors, and could be addressed in future refactoring. + // - Credentials: If we don't set this then the test looks for application default credentials and can fail depending on the machine running the test + // - ImpersonateServiceAccountDelegates: If we don't set this, we get a nil pointer exception ¯\_(ツ)_/¯ + + cases := map[string]struct { + // It's not easy to create the value of Batching in the test case, so these inputs are used in the test function + SetBatchingAsNull bool + SetBatchingAsUnknown bool + EnableBatchingValue basetypes.BoolValue + SendAfterValue basetypes.StringValue + + EnvVariables map[string]string + + ExpectBatchingNull bool + ExpectBatchingUnknown bool + ExpectEnableBatchingValue basetypes.BoolValue + ExpectSendAfterValue basetypes.StringValue + ExpectError bool + }{ + "batching can be configured with values for enable_batching and send_after": { + EnableBatchingValue: types.BoolValue(true), + SendAfterValue: types.StringValue("45s"), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("45s"), + }, + "if batching is an empty block, it will set the default values for enable_batching and send_after": { + // In this test, we try to create a list containing only null values + EnableBatchingValue: types.BoolNull(), + SendAfterValue: types.StringNull(), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("10s"), + }, + "when batching is configured with only enable_batching, send_after will be set to a default value": { + EnableBatchingValue: types.BoolValue(true), + SendAfterValue: types.StringNull(), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("10s"), + }, + "when batching is configured with only send_after, enable_batching will be set to a default value": { + EnableBatchingValue: types.BoolNull(), + SendAfterValue: types.StringValue("45s"), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("45s"), + }, + "when the whole batching block is a null value, the provider provides default values for send_after and enable_batching": { + SetBatchingAsNull: true, + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("3s"), + }, + // Handling unknown values + "when batching is an unknown value, the provider treats it as if it's unset (align to SDK behaviour)": { + SetBatchingAsUnknown: true, + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("3s"), + }, + "when batching is configured with send_after as an unknown value, send_after will be set to a default value": { + EnableBatchingValue: types.BoolValue(true), + SendAfterValue: types.StringUnknown(), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("10s"), + }, + "when batching is configured with enable_batching as an unknown value, enable_batching will be set to a default value": { + EnableBatchingValue: types.BoolUnknown(), + SendAfterValue: types.StringValue("45s"), + ExpectEnableBatchingValue: types.BoolValue(true), + ExpectSendAfterValue: types.StringValue("45s"), + }, + // Error states + "when batching is configured with send_after as an empty string, the empty string is not ignored and results in an error": { + EnableBatchingValue: types.BoolValue(true), + SendAfterValue: types.StringValue(""), + ExpectError: true, + }, + "if batching is configured with send_after as an invalid value, there's an error": { + SendAfterValue: types.StringValue("invalid value"), + ExpectError: true, + }, + "if batching is configured with send_after as number value without seconds (s), there's an error": { + SendAfterValue: types.StringValue("123"), + ExpectError: true, + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + // Arrange + acctest.UnsetTestProviderConfigEnvs(t) + acctest.SetupTestEnvs(t, tc.EnvVariables) + + ctx := context.Background() + tfVersion := "foobar" + providerversion := "999" + diags := diag.Diagnostics{} + + data := fwmodels.ProviderModel{} + data.Credentials = types.StringValue(transport_tpg.TestFakeCredentialsPath) + impersonateServiceAccountDelegates, _ := types.ListValue(types.StringType, []attr.Value{}) // empty list + data.ImpersonateServiceAccountDelegates = impersonateServiceAccountDelegates + + // TODO(SarahFrench) - this code will change when batching is reworked + // See https://github.com/GoogleCloudPlatform/magic-modules/pull/7668 + if !tc.SetBatchingAsNull && !tc.SetBatchingAsUnknown { + b, _ := types.ObjectValue( + map[string]attr.Type{ + "enable_batching": types.BoolType, + "send_after": types.StringType, + }, + map[string]attr.Value{ + "enable_batching": tc.EnableBatchingValue, + "send_after": tc.SendAfterValue, + }, + ) + batching, _ := types.ListValue(types.ObjectType{}.WithAttributeTypes(fwmodels.ProviderBatchingAttributes), []attr.Value{b}) + data.Batching = batching + } + if tc.SetBatchingAsNull { + data.Batching = types.ListNull(types.ObjectType{}.WithAttributeTypes(fwmodels.ProviderBatchingAttributes)) + } + if tc.SetBatchingAsUnknown { + data.Batching = types.ListUnknown(types.ObjectType{}.WithAttributeTypes(fwmodels.ProviderBatchingAttributes)) + } + + p := fwtransport.FrameworkProviderConfig{} + + // Act + p.LoadAndValidateFramework(ctx, &data, tfVersion, &diags, providerversion) + + // Assert + if diags.HasError() && tc.ExpectError { + return + } + if diags.HasError() && !tc.ExpectError { + for i, err := range diags.Errors() { + num := i + 1 + t.Logf("unexpected error #%d : %s", num, err.Summary()) + } + t.Fatalf("did not expect error, but [%d] error(s) occurred", diags.ErrorsCount()) + } + // Checking mutation of the data model + if !data.Batching.IsNull() && tc.ExpectBatchingNull { + t.Fatalf("want batching in the `fwmodels.ProviderModel` struct to be null, but got the value `%s`", data.Batching.String()) + } + if !data.Batching.IsUnknown() && tc.ExpectBatchingUnknown { + t.Fatalf("want batching in the `fwmodels.ProviderModel` struct to be unknown, but got the value `%s`", data.Batching.String()) + } + + // The code doesn't mutate values in the fwmodels.ProviderModel struct if the whole batching block is null/unknown, + // so run these checks below only if we're not setting the whole batching block is null/unknown + if !tc.SetBatchingAsNull && !tc.SetBatchingAsUnknown { + var pbConfigs []fwmodels.ProviderBatching + _ = data.Batching.ElementsAs(ctx, &pbConfigs, true) + if !pbConfigs[0].EnableBatching.Equal(tc.ExpectEnableBatchingValue) { + t.Fatalf("want batching.enable_batching in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectEnableBatchingValue.String(), pbConfigs[0].EnableBatching.String()) + } + if !pbConfigs[0].SendAfter.Equal(tc.ExpectSendAfterValue) { + t.Fatalf("want batching.send_after in the `fwmodels.ProviderModel` struct to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), pbConfigs[0].SendAfter.String()) + } + } + + // Check how the batching block's values are used to configure other parts of the `FrameworkProviderConfig` struct + // - RequestBatcherServiceUsage + // - RequestBatcherIam + if p.RequestBatcherServiceUsage.BatchingConfig.EnableBatching != tc.ExpectEnableBatchingValue.ValueBool() { + t.Fatalf("want batching.enable_batching to be `%s`, but got the value `%v`", tc.ExpectEnableBatchingValue.String(), p.RequestBatcherServiceUsage.BatchingConfig.EnableBatching) + } + if !types.StringValue(p.RequestBatcherServiceUsage.BatchingConfig.SendAfter.String()).Equal(tc.ExpectSendAfterValue) { + t.Fatalf("want batching.send_after to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), p.RequestBatcherServiceUsage.BatchingConfig.SendAfter.String()) + } + if p.RequestBatcherIam.BatchingConfig.EnableBatching != tc.ExpectEnableBatchingValue.ValueBool() { + t.Fatalf("want batching.enable_batching to be `%s`, but got the value `%v`", tc.ExpectEnableBatchingValue.String(), p.RequestBatcherIam.BatchingConfig.EnableBatching) + } + if !types.StringValue(p.RequestBatcherIam.BatchingConfig.SendAfter.String()).Equal(tc.ExpectSendAfterValue) { + t.Fatalf("want batching.send_after to be `%s`, but got the value `%s`", tc.ExpectSendAfterValue.String(), p.RequestBatcherIam.BatchingConfig.SendAfter.String()) + } + }) + } +} + +func TestGetRegionFromRegionSelfLink(t *testing.T) { + cases := map[string]struct { + Input basetypes.StringValue + ExpectedOutput basetypes.StringValue + }{ + "A short region name is returned unchanged": { + Input: types.StringValue("us-central1"), + ExpectedOutput: types.StringValue("us-central1"), + }, + "A selflink is shortened to a region name": { + Input: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/regions/us-central1"), + ExpectedOutput: types.StringValue("us-central1"), + }, + "Logic is specific to region selflinks; zone selflinks are not shortened": { + Input: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/zones/asia-east1-a"), + ExpectedOutput: types.StringValue("https://www.googleapis.com/compute/v1/projects/my-project/zones/asia-east1-a"), + }, + } + + for tn, tc := range cases { + t.Run(tn, func(t *testing.T) { + + region := fwtransport.GetRegionFromRegionSelfLink(tc.Input) + + if region != tc.ExpectedOutput { + t.Fatalf("want %s, got %s", region, tc.ExpectedOutput) + } + }) + } +} diff --git a/mmv1/third_party/terraform/fwtransport/go/framework_provider_clients.go.tmpl b/mmv1/third_party/terraform/fwtransport/go/framework_provider_clients.go.tmpl new file mode 100644 index 000000000000..1001fa557202 --- /dev/null +++ b/mmv1/third_party/terraform/fwtransport/go/framework_provider_clients.go.tmpl @@ -0,0 +1,55 @@ +package fwtransport + +import ( + "fmt" + "strings" + + "google.golang.org/api/dns/v1" +{{- if ne $.TargetVersionName "ga" }} + firebase "google.golang.org/api/firebase/v1beta1" +{{- end }} + "google.golang.org/api/option" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +// Methods to create new services from config +// Some base paths below need the version and possibly more of the path +// set on them. The client libraries are inconsistent about which values they need; +// while most only want the host URL, some older ones also want the version and some +// of those "projects" as well. You can find out if this is required by looking at +// the basePath value in the client library file. + +func (p *FrameworkProviderConfig) NewDnsClient(userAgent string, diags *diag.Diagnostics) *dns.Service { + dnsClientBasePath := transport_tpg.RemoveBasePathVersion(p.DNSBasePath) + dnsClientBasePath = strings.ReplaceAll(dnsClientBasePath, "/dns/", "") + tflog.Info(p.Context, fmt.Sprintf("Instantiating Google Cloud DNS client for path %s", dnsClientBasePath)) + clientDns, err := dns.NewService(p.Context, option.WithHTTPClient(p.Client)) + if err != nil { + diags.AddWarning("error creating client dns", err.Error()) + return nil + } + clientDns.UserAgent = userAgent + clientDns.BasePath = dnsClientBasePath + + return clientDns +} + +{{ if ne $.TargetVersionName `ga` -}} +func (p *FrameworkProviderConfig) NewFirebaseClient(userAgent string, diags *diag.Diagnostics) *firebase.Service { + firebaseClientBasePath := transport_tpg.RemoveBasePathVersion(p.FirebaseBasePath) + firebaseClientBasePath = strings.ReplaceAll(firebaseClientBasePath, "/firebase/", "") + tflog.Info(p.Context, fmt.Sprintf("Instantiating Google Cloud firebase client for path %s", firebaseClientBasePath)) + clientFirebase, err := firebase.NewService(p.Context, option.WithHTTPClient(p.Client)) + if err != nil { + diags.AddWarning("error creating client firebase", err.Error()) + return nil + } + clientFirebase.UserAgent = userAgent + clientFirebase.BasePath = firebaseClientBasePath + + return clientFirebase +} +{{- end }} diff --git a/mmv1/third_party/terraform/.copywrite.hcl.tmpl b/mmv1/third_party/terraform/go/.copywrite.hcl.tmpl similarity index 92% rename from mmv1/third_party/terraform/.copywrite.hcl.tmpl rename to mmv1/third_party/terraform/go/.copywrite.hcl.tmpl index 693bde6062ee..601fe64bb238 100644 --- a/mmv1/third_party/terraform/.copywrite.hcl.tmpl +++ b/mmv1/third_party/terraform/go/.copywrite.hcl.tmpl @@ -16,11 +16,11 @@ project { ".changelog/**", "examples/**", "scripts/**", -{{- if or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga")}} +{{- if or (eq $.TargetVersionName "") (eq $.TargetVersionName "ga") }} "google/**/test-fixtures/**", -{{- else}} - "google-{{$.TargetVersionName}}/**/test-fixtures/**", -{{- end}} +{{- else }} + "google-{{ $.TargetVersionName }}/**/test-fixtures/**", +{{- end }} "META.d/*.yml", "META.d/*.yaml", ".golangci.yml", diff --git a/mmv1/third_party/terraform/.goreleaser.yml.tmpl b/mmv1/third_party/terraform/go/.goreleaser.yml.tmpl similarity index 59% rename from mmv1/third_party/terraform/.goreleaser.yml.tmpl rename to mmv1/third_party/terraform/go/.goreleaser.yml.tmpl index 9850819521ea..99665377d314 100644 --- a/mmv1/third_party/terraform/.goreleaser.yml.tmpl +++ b/mmv1/third_party/terraform/go/.goreleaser.yml.tmpl @@ -4,10 +4,10 @@ archives: - src: 'LICENSE' dst: 'LICENSE.txt' format: zip - name_template: '{{"{{ .ProjectName }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"}}' + name_template: '{{"{{"}} .ProjectName {{"}}"}}_{{"{{"}} .Version {{"}}"}}_{{"{{"}} .Os {{"}}"}}_{{"{{"}} .Arch {{"}}"}}' builds: - # Special binary naming is only necessary for Terraform CLI 0.12 - binary: '{{"{{ .ProjectName }}_v{{ .Version }}"}}_x5' + binary: '{{"{{"}} .ProjectName {{"}}"}}_v{{"{{"}} .Version {{"}}"}}_x5' env: - CGO_ENABLED=0 flags: @@ -30,29 +30,29 @@ builds: - goarch: arm64 goos: windows ldflags: - - -s -w -X github.com/hashicorp/terraform-provider-google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}/version.ProviderVersion={{"{{.Version}}"}} - mod_timestamp: '{{"{{ .CommitTimestamp }}"}}' + - -s -w -X github.com/hashicorp/terraform-provider-google{{- if ne $.TargetVersionName "ga" -}}-{{$.TargetVersionName}}{{- end }}/version.ProviderVersion={{"{{"}}.Version{{"}}"}} + mod_timestamp: '{{"{{"}} .CommitTimestamp {{"}}"}}' checksum: extra_files: - glob: 'terraform-registry-manifest.json' - name_template: '{{"{{ .ProjectName }}_{{ .Version }}"}}_manifest.json' - name_template: '{{"{{ .ProjectName }}_{{ .Version }}"}}_SHA256SUMS' + name_template: '{{"{{"}} .ProjectName {{"}}"}}_{{"{{"}} .Version {{"}}"}}_manifest.json' + name_template: '{{"{{"}} .ProjectName {{"}}"}}_{{"{{"}} .Version {{"}}"}}_SHA256SUMS' algorithm: sha256 publishers: - name: upload checksum: true extra_files: - glob: 'terraform-registry-manifest.json' - name_template: '{{"{{ .ProjectName }}_{{ .Version }}"}}_manifest.json' + name_template: '{{"{{"}} .ProjectName {{"}}"}}_{{"{{"}} .Version {{"}}"}}_manifest.json' signature: true - cmd: hc-releases upload -product {{"{{ .ProjectName }} -version {{ .Version }} -file={{ .ArtifactPath }}={{ .ArtifactName }}"}} -header="x-terraform-protocol-version=5.0" -header="x-terraform-protocol-versions=5.0" + cmd: hc-releases upload -product {{"{{"}} .ProjectName {{"}}"}} -version {{"{{"}} .Version {{"}}"}} -file={{"{{"}} .ArtifactPath {{"}}"}}={{"{{"}} .ArtifactName {{"}}"}} -header="x-terraform-protocol-version=5.0" -header="x-terraform-protocol-versions=5.0" env: - - HC_RELEASES_HOST={{"{{ .Env.HC_RELEASES_HOST }}"}} - - HC_RELEASES_KEY={{"{{ .Env.HC_RELEASES_KEY }}"}} + - HC_RELEASES_HOST={{"{{"}} .Env.HC_RELEASES_HOST {{"}}"}} + - HC_RELEASES_KEY={{"{{"}} .Env.HC_RELEASES_KEY {{"}}"}} release: extra_files: - glob: 'terraform-registry-manifest.json' - name_template: '{{"{{ .ProjectName }}_{{ .Version }}"}}_manifest.json' + name_template: '{{"{{"}} .ProjectName {{"}}"}}_{{"{{"}} .Version {{"}}"}}_manifest.json' ids: - none signs: @@ -81,4 +81,4 @@ signs: --out ${signature} artifacts: checksum snapshot: - name_template: "{{"{{ .Tag }}"}}-next" + name_template: "{{"{{"}} .Tag {{"}}"}}-next" diff --git a/mmv1/third_party/terraform/go.mod b/mmv1/third_party/terraform/go/go.mod similarity index 100% rename from mmv1/third_party/terraform/go.mod rename to mmv1/third_party/terraform/go/go.mod diff --git a/mmv1/third_party/terraform/main.go.tmpl b/mmv1/third_party/terraform/go/main.go.tmpl similarity index 95% rename from mmv1/third_party/terraform/main.go.tmpl rename to mmv1/third_party/terraform/go/main.go.tmpl index 70e1fa734bc3..fed8e948f027 100644 --- a/mmv1/third_party/terraform/main.go.tmpl +++ b/mmv1/third_party/terraform/go/main.go.tmpl @@ -33,7 +33,7 @@ func main() { // concat with sdkv2 provider providers := []func() tfprotov5.ProviderServer{ providerserver.NewProtocol5(fwprovider.New(version)), // framework provider - provider.Provider().GRPCProvider, // sdk provider + provider.Provider().GRPCProvider, // sdk provider } // use the muxer diff --git a/mmv1/third_party/terraform/release-metadata.hcl.tmpl b/mmv1/third_party/terraform/go/release-metadata.hcl.tmpl similarity index 100% rename from mmv1/third_party/terraform/release-metadata.hcl.tmpl rename to mmv1/third_party/terraform/go/release-metadata.hcl.tmpl diff --git a/mmv1/third_party/terraform/terraform-registry-manifest.json b/mmv1/third_party/terraform/go/terraform-registry-manifest.json similarity index 100% rename from mmv1/third_party/terraform/terraform-registry-manifest.json rename to mmv1/third_party/terraform/go/terraform-registry-manifest.json diff --git a/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl b/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl new file mode 100644 index 000000000000..13fc3693271d --- /dev/null +++ b/mmv1/third_party/terraform/provider/go/provider_test.go.tmpl @@ -0,0 +1,500 @@ +package provider_test + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/provider" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestProvider(t *testing.T) { + if err := provider.Provider().InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ *schema.Provider = provider.Provider() +} + +func TestProvider_noDuplicatesInResourceMap(t *testing.T) { + _, err := provider.ResourceMapWithErrors() + if err != nil { + t.Error(err) + } +} + +func TestProvider_noDuplicatesInDatasourceMap(t *testing.T) { + _, err := provider.DatasourceMapWithErrors() + if err != nil { + t.Error(err) + } +} + +func TestAccProviderBasePath_setBasePath(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccProviderBasePath_setBasePath("https://www.googleapis.com/compute/beta/", acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_address.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccProviderBasePath_setInvalidBasePath(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccProviderBasePath_setBasePath("https://www.example.com/compute/beta/", acctest.RandString(t, 10)), + ExpectError: regexp.MustCompile("got HTTP response code 404 with body"), + }, + }, + }) +} + +func TestAccProviderMeta_setModuleName(t *testing.T) { + t.Parallel() + + moduleName := "my-module" + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeAddressDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccProviderMeta_setModuleName(moduleName, acctest.RandString(t, 10)), + }, + { + ResourceName: "google_compute_address.default", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccProviderUserProjectOverride(t *testing.T) { + // Parallel fine-grained resource creation + acctest.SkipIfVcr(t) + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + billing := envvar.GetTestBillingAccountFromEnv(t) + pid := "tf-test-" + acctest.RandString(t, 10) + topicName := "tf-test-topic-" + acctest.RandString(t, 10) + + config := acctest.BootstrapConfig(t) + accessToken, err := acctest.SetupProjectsAndGetAccessToken(org, billing, pid, "pubsub", config) + if err != nil { + t.Error(err) + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + // No TestDestroy since that's not really the point of this test + Steps: []resource.TestStep{ + { + Config: testAccProviderUserProjectOverride_step2(accessToken, pid, false, topicName), + ExpectError: regexp.MustCompile("Cloud Pub/Sub API has not been used"), + }, + { + Config: testAccProviderUserProjectOverride_step2(accessToken, pid, true, topicName), + }, + { + ResourceName: "google_pubsub_topic.project-2-topic", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testAccProviderUserProjectOverride_step3(accessToken, true), + }, + }, + }) +} + +// Do the same thing as TestAccProviderUserProjectOverride, but using a resource that gets its project via +// a reference to a different resource instead of a project field. +func TestAccProviderIndirectUserProjectOverride(t *testing.T) { + // Parallel fine-grained resource creation + acctest.SkipIfVcr(t) + t.Parallel() + + org := envvar.GetTestOrgFromEnv(t) + billing := envvar.GetTestBillingAccountFromEnv(t) + pid := "tf-test-" + acctest.RandString(t, 10) + + config := acctest.BootstrapConfig(t) + accessToken, err := acctest.SetupProjectsAndGetAccessToken(org, billing, pid, "cloudkms", config) + if err != nil { + t.Error(err) + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + // No TestDestroy since that's not really the point of this test + Steps: []resource.TestStep{ + { + Config: testAccProviderIndirectUserProjectOverride_step2(pid, accessToken, false), + ExpectError: regexp.MustCompile(`Cloud Key Management Service \(KMS\) API has not been used`), + }, + { + Config: testAccProviderIndirectUserProjectOverride_step2(pid, accessToken, true), + }, + { + ResourceName: "google_kms_crypto_key.project-2-key", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccProviderIndirectUserProjectOverride_step3(accessToken, true), + }, + }, + }) +} + +func TestAccProviderCredentialsEmptyString(t *testing.T) { + // Test is not parallel because ENVs are set. + // Need to skip VCR as this test downloads providers from the Terraform Registry + acctest.SkipIfVcr(t) + + creds := envvar.GetTestCredsFromEnv() + project := envvar.GetTestProjectFromEnv() + t.Setenv("GOOGLE_CREDENTIALS", creds) + t.Setenv("GOOGLE_PROJECT", project) + + pid := "tf-test-" + acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + // No TestDestroy since that's not really the point of this test + Steps: []resource.TestStep{ + { + // This is a control for the other test steps; the provider block doesn't contain `credentials = ""` + Config: testAccProviderCredentials_actWithCredsFromEnv(pid), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + PlanOnly: true, + ExpectNonEmptyPlan: true, + }, + { + // Assert that errors are expected with credentials when + // - GOOGLE_CREDENTIALS is set + // - provider block has credentials = "" + // - TPG v4.60.2 is used + // Context: this was an addidental breaking change introduced with muxing + Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.60.2", + Source: "hashicorp/google", + }, + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`unexpected end of JSON input`), + }, + { + // Assert that errors are NOT expected with credentials when + // - GOOGLE_CREDENTIALS is set + // - provider block has credentials = "" + // - TPG v4.84.0 is used + // Context: this was the fix for the unintended breaking change in 4.60.2 + Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), + ExternalProviders: map[string]resource.ExternalProvider{ + "google": { + VersionConstraint: "4.84.0", + Source: "hashicorp/google", + }, + }, + PlanOnly: true, + ExpectNonEmptyPlan: true, + }, + { + // Validation errors are expected in 5.0.0+ + // Context: we intentionally introduced the breaking change again in 5.0.0+ + Config: testAccProviderCredentials_actWithCredsFromEnv_emptyString(pid), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + }, + }) +} + +func TestAccProviderEmptyStrings(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + // No TestDestroy since that's not really the point of this test + Steps: []resource.TestStep{ + // When no values are set in the provider block there are no errors + // This test case is a control to show validation doesn't accidentally flag unset fields + // The "" argument is a lack of key = value being passed into the provider block + { + Config: testAccProvider_checkPlanTimeErrors("", acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + }, + // credentials as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`credentials = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + // access_token as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`access_token = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + // impersonate_service_account as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`impersonate_service_account = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + // project as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`project = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + // billing_project as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`billing_project = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + // region as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`region = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + // zone as an empty string causes a validation error + { + Config: testAccProvider_checkPlanTimeErrors(`zone = ""`, acctest.RandString(t, 10)), + PlanOnly: true, + ExpectNonEmptyPlan: true, + ExpectError: regexp.MustCompile(`expected a non-empty string`), + }, + }, + }) +} + +func testAccProviderBasePath_setBasePath(endpoint, name string) string { + return fmt.Sprintf(` +provider "google" { + alias = "compute_custom_endpoint" + compute_custom_endpoint = "%s" +} + +resource "google_compute_address" "default" { + provider = google.compute_custom_endpoint + name = "tf-test-address-%s" +}`, endpoint, name) +} + +func testAccProviderMeta_setModuleName(key, name string) string { + return fmt.Sprintf(` +terraform { + provider_meta "google" { + module_name = "%s" + } +} + +resource "google_compute_address" "default" { + name = "tf-test-address-%s" +}`, key, name) +} + +// Set up two projects. Project 1 has a service account that is used to create a +// pubsub topic in project 2. The pubsub API is only enabled in project 2, +// which causes the create to fail unless user_project_override is set to true. + +func testAccProviderUserProjectOverride_step2(accessToken, pid string, override bool, topicName string) string { + return fmt.Sprintf(` +// See step 3 below, which is really step 2 minus the pubsub topic. +// Step 3 exists because provider configurations can't be removed while objects +// created by that provider still exist in state. Step 3 will remove the +// pubsub topic so the whole config can be deleted. +%s + +resource "google_pubsub_topic" "project-2-topic" { + provider = google.project-1-token + project = "%s-2" + + name = "%s" + labels = { + foo = "bar" + } +} +`, testAccProviderUserProjectOverride_step3(accessToken, override), pid, topicName) +} + +func testAccProviderUserProjectOverride_step3(accessToken string, override bool) string { + return fmt.Sprintf(` +provider "google" { + alias = "project-1-token" + access_token = "%s" + user_project_override = %v +} +`, accessToken, override) +} + +func testAccProviderIndirectUserProjectOverride_step2(pid, accessToken string, override bool) string { + return fmt.Sprintf(` +// See step 3 below, which is really step 2 minus the kms resources. +// Step 3 exists because provider configurations can't be removed while objects +// created by that provider still exist in state. Step 3 will remove the +// kms resources so the whole config can be deleted. +%s + +resource "google_kms_key_ring" "project-2-keyring" { + provider = google.project-1-token + project = "%s-2" + + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "project-2-key" { + provider = google.project-1-token + name = "%s" + key_ring = google_kms_key_ring.project-2-keyring.id +} + +data "google_kms_secret_ciphertext" "project-2-ciphertext" { + provider = google.project-1-token + crypto_key = google_kms_crypto_key.project-2-key.id + plaintext = "my-secret" +} +`, testAccProviderIndirectUserProjectOverride_step3(accessToken, override), pid, pid, pid) +} + +func testAccProviderIndirectUserProjectOverride_step3(accessToken string, override bool) string { + return fmt.Sprintf(` +provider "google" { + alias = "project-1-token" + + access_token = "%s" + user_project_override = %v +} +`, accessToken, override) +} + +// Copy the Mmv1 generated function testAccCheckComputeAddressDestroyProducer from the compute_test package to here, +// as that function is in the _test.go file and not importable. +func testAccCheckComputeAddressDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_address" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{"{{"}}ComputeBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/regions/{{"{{"}}region{{"}}"}}/addresses/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + }) + if err == nil { + return fmt.Errorf("ComputeAddress still exists at %s", url) + } + } + + return nil + } +} + +func testAccProviderCredentials_actWithCredsFromEnv(name string) string { + return fmt.Sprintf(` +provider "google" { + alias = "testing_credentials" + +} + +resource "google_compute_address" "default" { + provider = google.testing_credentials + name = "%s" +}`, name) +} + +func testAccProviderCredentials_actWithCredsFromEnv_emptyString(name string) string { + return fmt.Sprintf(` +provider "google" { + alias = "testing_credentials" + credentials = "" +} + +resource "google_compute_address" "default" { + provider = google.testing_credentials + name = "%s" +}`, name) +} + +func testAccProvider_checkPlanTimeErrors(providerArgument, randString string) string { + return fmt.Sprintf(` +provider "google" { + %s +} + +# A random resource so that the test can generate a plan (can't check validation errors when plan is empty) +resource "google_pubsub_topic" "example" { + name = "tf-test-planned-resource-%s" +} +`, providerArgument, randString) +} diff --git a/mmv1/third_party/terraform/provider/go/provider_validators.go b/mmv1/third_party/terraform/provider/go/provider_validators.go new file mode 100644 index 000000000000..5c30801f9467 --- /dev/null +++ b/mmv1/third_party/terraform/provider/go/provider_validators.go @@ -0,0 +1,47 @@ +package provider + +import ( + "context" + "fmt" + "os" + + googleoauth "golang.org/x/oauth2/google" +) + +func ValidateCredentials(v interface{}, k string) (warnings []string, errors []error) { + if v == nil { + return + } + creds := v.(string) + + // reject empty strings + if v.(string) == "" { + errors = append(errors, + fmt.Errorf("expected a non-empty string")) + return + } + + // if this is a path and we can stat it, assume it's ok + if _, err := os.Stat(creds); err == nil { + return + } + if _, err := googleoauth.CredentialsFromJSON(context.Background(), []byte(creds)); err != nil { + errors = append(errors, + fmt.Errorf("JSON credentials are not valid: %s", err)) + } + + return +} + +func ValidateEmptyStrings(v interface{}, k string) (warnings []string, errors []error) { + if v == nil { + return + } + + if v.(string) == "" { + errors = append(errors, + fmt.Errorf("expected a non-empty string")) + } + + return +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index b8231d948fd0..9e26302c84c0 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -227,15 +227,11 @@ var handwrittenDatasources = map[string]*schema.Resource{ var generatedIAMDatasources = map[string]*schema.Resource{ // ####### START generated IAM datasources ########### - <% - resources_for_version.each do |object| - unless object[:iam_class_name].nil? - -%> + <% resources_for_version.each do |object| -%> + <% unless object[:iam_class_name].nil? -%> "<%= object[:terraform_name] -%>_iam_policy": tpgiamresource.DataSourceIamPolicy(<%= object[:iam_class_name] -%>IamSchema, <%= object[:iam_class_name] -%>IamUpdaterProducer), - <% - end - end - -%> + <% end -%> + <% end -%> // ####### END generated IAM datasources ########### } @@ -272,18 +268,12 @@ var generatedResources = map[string]*schema.Resource{ <% unless object[:resource_name].nil? -%> "<%= object[:terraform_name] -%>": <%= object[:resource_name] -%>(), <% end -%> - <% - unless object[:iam_class_name].nil? - -%> + <% unless object[:iam_class_name].nil? -%> "<%= object[:terraform_name] -%>_iam_binding": tpgiamresource.ResourceIamBinding(<%= object[:iam_class_name] -%>IamSchema, <%= object[:iam_class_name] -%>IamUpdaterProducer, <%= object[:iam_class_name] -%>IdParseFunc), "<%= object[:terraform_name] -%>_iam_member": tpgiamresource.ResourceIamMember(<%= object[:iam_class_name] -%>IamSchema, <%= object[:iam_class_name] -%>IamUpdaterProducer, <%= object[:iam_class_name] -%>IdParseFunc), "<%= object[:terraform_name] -%>_iam_policy": tpgiamresource.ResourceIamPolicy(<%= object[:iam_class_name] -%>IamSchema, <%= object[:iam_class_name] -%>IamUpdaterProducer, <%= object[:iam_class_name] -%>IdParseFunc), - <% - end # unless object[:iam_class_name].nil? - -%> - <% - end # resources_for_version.each do - -%> + <% end -%> + <% end -%> } var handwrittenResources = map[string]*schema.Resource{ diff --git a/mmv1/third_party/terraform/release-metadata.hcl.erb b/mmv1/third_party/terraform/release-metadata.hcl.erb index ceaa477e2afa..49613b8da720 100644 --- a/mmv1/third_party/terraform/release-metadata.hcl.erb +++ b/mmv1/third_party/terraform/release-metadata.hcl.erb @@ -1,3 +1,3 @@ <% autogen_exception -%> url_source_repository = "https://github.com/hashicorp/terraform-provider-google<%= "-" + version unless version == 'ga' -%>" -url_license = "https://github.com/hashicorp/terraform-provider-google<%= "-" + version unless version == 'ga' -%>/blob/main/LICENSE" \ No newline at end of file +url_license = "https://github.com/hashicorp/terraform-provider-google<%= "-" + version unless version == 'ga' -%>/blob/main/LICENSE" diff --git a/mmv1/third_party/terraform/scripts/go/diff.go.tmpl b/mmv1/third_party/terraform/scripts/go/diff.go.tmpl new file mode 100644 index 000000000000..2eb0c4fe077f --- /dev/null +++ b/mmv1/third_party/terraform/scripts/go/diff.go.tmpl @@ -0,0 +1,173 @@ +package main + + +import ( + "flag" + "fmt" + "reflect" + "runtime" + "sort" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + googleOld "github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }}/{{ $.ProviderFromVersion }}/provider" + // "github.com/hashicorp/terraform-provider-google/google/provider" will be replaced with corresponding package based on the version when generating the provider package + google "github.com/hashicorp/terraform-provider-google/google/provider" +) + +var verbose bool +var vFlag = flag.Bool("verbose", false, "set to true to produce more verbose diffs") +var resourceFlag = flag.String("resource", "", "the name of the terraform resource to diff") + +func main() { + flag.Parse() + if resourceFlag == nil || *resourceFlag == "" { + fmt.Print("resource flag not specified\n") + panic("the resource to diff must be specified") + } + resourceName := *resourceFlag + verbose = *vFlag + m := google.ResourceMap() + res, ok := m[resourceName] + if !ok { + panic(fmt.Sprintf("Unable to find resource in TPGB: %s", resourceName)) + } + m2 := googleOld.ResourceMap() + res2, ok := m2[resourceName] + if !ok { + panic(fmt.Sprintf("Unable to find resource in clean TPGB: %s", resourceName)) + } + fmt.Printf("------------Diffing resource %s------------\n", resourceName) + diffSchema(res2.Schema, res.Schema, []string{}) + fmt.Print("------------Done------------\n") +} + +// Diffs a Terraform resource schema. Calls itself recursively as some fields +// are implemented using schema.Resource as their element type +func diffSchema(old, new map[string]*schema.Schema, path []string) { + var sharedKeys []string + var addedKeys []string + for k := range new { + if _, ok := old[k]; ok { + sharedKeys = append(sharedKeys, k) + } else { + // Key not found in old schema + addedKeys = append(addedKeys, k) + } + } + var missingKeys []string + for k := range old { + if _, ok := new[k]; !ok { + missingKeys = append(missingKeys, k) + } + } + sort.Strings(sharedKeys) + sort.Strings(addedKeys) + sort.Strings(missingKeys) + if len(addedKeys) != 0 { + var qualifiedKeys []string + for _, k := range addedKeys { + qualifiedKeys = append(qualifiedKeys, strings.Join(append(path, k), ".")) + } + fmt.Printf("Fields added in tpgtools: %v\n", qualifiedKeys) + } + if len(missingKeys) != 0 { + var qualifiedKeys []string + for _, k := range missingKeys { + qualifiedKeys = append(qualifiedKeys, strings.Join(append(path, k), ".")) + } + fmt.Printf("Fields missing in tpgtools: %v\n", qualifiedKeys) + } + for _, k := range sharedKeys { + diffSchemaObject(old[k], new[k], append(path, k)) + } +} + +// Diffs a schema.Schema object. Calls itself and diffSchema recursively as +// needed on nested fields. +func diffSchemaObject(old, new *schema.Schema, path []string) { + if old.Required != new.Required { + fmt.Printf("Required status different for path %s, was: %t is now %t\n", strings.Join(path, "."), old.Required, new.Required) + } + if old.Computed != new.Computed { + fmt.Printf("Computed status different for path %s, was: %t is now %t\n", strings.Join(path, "."), old.Computed, new.Computed) + } + if old.Optional != new.Optional { + fmt.Printf("Optional status different for path %s, was: %t is now %t\n", strings.Join(path, "."), old.Optional, new.Optional) + } + if old.ForceNew != new.ForceNew { + fmt.Printf("ForceNew status different for path %s, was: %t is now %t\n", strings.Join(path, "."), old.ForceNew, new.ForceNew) + } + if old.Type != new.Type { + fmt.Printf("Type different for path %s, was: %s is now %s\n", strings.Join(path, "."), old.Type, new.Type) + // Types are different, other diffs won't make sense + return + } + if old.Sensitive != new.Sensitive { + fmt.Printf("Sensitive status different for path %s, was: %t is now %t\n", strings.Join(path, "."), old.Sensitive, new.Sensitive) + } + if old.Deprecated != new.Deprecated { + fmt.Printf("Deprecated status different for path %s, was: %s is now %s\n", strings.Join(path, "."), old.Deprecated, new.Deprecated) + } + if old.MaxItems != new.MaxItems { + fmt.Printf("MaxItems different for path %s, was: %d is now %d\n", strings.Join(path, "."), old.MaxItems, new.MaxItems) + } + if old.MinItems != new.MinItems { + fmt.Printf("MinItems different for path %s, was: %d is now %d\n", strings.Join(path, "."), old.MinItems, new.MinItems) + } + if old.Default != new.Default { + fmt.Printf("Default value different for path %s, was: %v is now %v\n", strings.Join(path, "."), old.Default, new.Default) + } + if old.ConfigMode != new.ConfigMode { + // This is only set on very few complicated resources (instance, container cluster) + fmt.Printf("ConfigMode different for path %s, was: %v is now %v\n", strings.Join(path, "."), old.ConfigMode, new.ConfigMode) + } + // Verbose diffs. Enabled using --verbose flag + if verbose && !reflect.DeepEqual(old.ConflictsWith, new.ConflictsWith) { + fmt.Printf("ConflictsWith different for path %s, was: %v is now %v\n", strings.Join(path, "."), old.ConflictsWith, new.ConflictsWith) + } + oldDiffSuppressFunc := findFunctionName(old.DiffSuppressFunc) + newDiffSuppressFunc := findFunctionName(new.DiffSuppressFunc) + if verbose && oldDiffSuppressFunc != newDiffSuppressFunc { + fmt.Printf("DiffSuppressFunc for path %s, was: %s is now %s\n", strings.Join(path, "."), oldDiffSuppressFunc, newDiffSuppressFunc) + } + oldStateFunc := findFunctionName(old.StateFunc) + newStateFunc := findFunctionName(new.StateFunc) + if verbose && oldStateFunc != newStateFunc { + fmt.Printf("StateFunc for path %s, was: %s is now %s\n", strings.Join(path, "."), oldStateFunc, newStateFunc) + } + oldValidateFunc := findFunctionName(old.ValidateFunc) + newValidateFunc := findFunctionName(new.ValidateFunc) + if verbose && oldValidateFunc != newValidateFunc { + fmt.Printf("ValidateFunc for path %s, was: %s is now %s\n", strings.Join(path, "."), oldValidateFunc, newValidateFunc) + } + oldSet := findFunctionName(old.Set) + newSet := findFunctionName(new.Set) + if verbose && oldSet != newSet { + fmt.Printf("Set function for path %s, was: %s is now %s\n", strings.Join(path, "."), oldSet, newSet) + } + // Recursive calls for nested objects + if old.Type == schema.TypeList || old.Type == schema.TypeMap || old.Type == schema.TypeSet { + oldElem := old.Elem + newElem := new.Elem + if reflect.TypeOf(oldElem) != reflect.TypeOf(newElem) { + fmt.Printf("Elem type different for path %s, was: %T is now %T\n", strings.Join(path, "."), oldElem, newElem) + } + switch v := oldElem.(type) { + case *schema.Resource: + diffSchema(v.Schema, newElem.(*schema.Resource).Schema, path) + case *schema.Schema: + // Primitive unnamed field as only element + diffSchemaObject(v, newElem.(*schema.Schema), append(path, "elem")) + } + } +} +func findFunctionName(f interface{}) string { + ptr := reflect.ValueOf(f).Pointer() + fun := runtime.FuncForPC(ptr) + if fun == nil { + return "" + } + split := strings.Split(fun.Name(), ".") + return split[len(split)-1] +} diff --git a/mmv1/third_party/terraform/scripts/go/run_diff.sh.tmpl b/mmv1/third_party/terraform/scripts/go/run_diff.sh.tmpl new file mode 100644 index 000000000000..79b3987c81b1 --- /dev/null +++ b/mmv1/third_party/terraform/scripts/go/run_diff.sh.tmpl @@ -0,0 +1,30 @@ +#!/bin/bash +set -e +set -x +if [ -z "$1" ]; then + echo "Must provide 1 argument - name of resource to diff, e.g. 'google_compute_forwarding_rule'" + exit 1 +fi + +function cleanup() { + go mod edit -dropreplace=github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }} + go mod edit -droprequire=github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }} +} + +trap cleanup EXIT +if [[ -d ~/go/src/github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }} ]]; then + pushd ~/go/src/github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }} + git clean -fdx + git reset --hard + git checkout main + git pull + popd +else + mkdir -p ~/go/src/github.com/hashicorp + git clone https://github.com/hashicorp/terraform-provider-{{ $.ProviderFromVersion }} ~/go/src/github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }} +fi + + +go mod edit -require=github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }}@v0.0.0 +go mod edit -replace github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }}=$(realpath ~/go/src/github.com/hashicorp/terraform-provider-clean-{{ $.ProviderFromVersion }}) +go run scripts/diff.go --resource $1 --verbose diff --git a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl index 971de79c1dd0..6ade6964b38e 100644 --- a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl @@ -379,6 +379,22 @@ func TestAccGKEHubFeature_FleetDefaultMemberConfigServiceMesh(t *testing.T) { ImportState: true, ImportStateVerify: true, }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshRemovalUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshReAddUpdate(context), + }, + { + ResourceName: "google_gke_hub_feature.feature", + ImportState: true, + ImportStateVerify: true, + }, }, }) } @@ -415,6 +431,33 @@ resource "google_gke_hub_feature" "feature" { `, context) } +func testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshRemovalUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "servicemesh" + location = "global" + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.mesh] + project = google_project.project.project_id +} +`, context) +} + +func testAccGKEHubFeature_FleetDefaultMemberConfigServiceMeshReAddUpdate(context map[string]interface{}) string { + return gkeHubFeatureProjectSetupForGA(context) + acctest.Nprintf(` +resource "google_gke_hub_feature" "feature" { + name = "servicemesh" + location = "global" + fleet_default_member_config { + mesh { + management = "MANAGEMENT_MANUAL" + } + } + depends_on = [google_project_service.anthos, google_project_service.gkehub, google_project_service.mesh] + project = google_project.project.project_id +} +`, context) +} + func TestAccGKEHubFeature_FleetDefaultMemberConfigConfigManagement(t *testing.T) { // VCR fails to handle batched project services acctest.SkipIfVcr(t) diff --git a/mmv1/third_party/terraform/tpgiamresource/go/iam.go.tmpl b/mmv1/third_party/terraform/tpgiamresource/go/iam.go.tmpl new file mode 100644 index 000000000000..3222c6adfaee --- /dev/null +++ b/mmv1/third_party/terraform/tpgiamresource/go/iam.go.tmpl @@ -0,0 +1,567 @@ +// Utils for modifying IAM policies for resources across GCP +package tpgiamresource + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strings" + "time" + + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + + "github.com/davecgh/go-spew/spew" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +const maxBackoffSeconds = 30 +const IamPolicyVersion = 3 + +// These types are implemented per GCP resource type and specify how to do per-resource IAM operations. +// They are used in the generic Terraform IAM resource definitions +// (e.g. _member/_binding/_policy/_audit_config) +type ( + // The ResourceIamUpdater interface is implemented for each GCP resource supporting IAM policy. + // Implementations should be created per resource and should keep track of the resource identifier. + ResourceIamUpdater interface { + // Fetch the existing IAM policy attached to a resource. + GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) + + // Replaces the existing IAM Policy attached to a resource. + SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error + + // A mutex guards against concurrent to call to the SetResourceIamPolicy method. + // The mutex key should be made of the resource type and resource id. + // For example: `iam-project-{id}`. + GetMutexKey() string + + // Returns the unique resource identifier. + GetResourceId() string + + // Textual description of this resource to be used in error message. + // The description should include the unique resource identifier. + DescribeResource() string + } + + // Factory for generating ResourceIamUpdater for given ResourceData resource + NewResourceIamUpdaterFunc func(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (ResourceIamUpdater, error) + + // Describes how to modify a policy for a given Terraform IAM (_policy/_member/_binding/_audit_config) resource + iamPolicyModifyFunc func(p *cloudresourcemanager.Policy) error + + // Parser for Terraform resource identifier (d.Id) for resource whose IAM policy is being changed + ResourceIdParserFunc func(d *schema.ResourceData, config *transport_tpg.Config) error +) + +// Locking wrapper around read-only operation with retries. +func iamPolicyReadWithRetry(updater ResourceIamUpdater) (*cloudresourcemanager.Policy, error) { + mutexKey := updater.GetMutexKey() + transport_tpg.MutexStore.Lock(mutexKey) + defer transport_tpg.MutexStore.Unlock(mutexKey) + + log.Printf("[DEBUG] Retrieving policy for %s\n", updater.DescribeResource()) + var policy *cloudresourcemanager.Policy + err := transport_tpg.Retry(transport_tpg.RetryOptions{ + RetryFunc: func() (perr error) { + policy, perr = updater.GetResourceIamPolicy() + return perr + }, + Timeout: 10 * time.Minute, + }) + if err != nil { + return nil, err + } + log.Print(spew.Sprintf("[DEBUG] Retrieved policy for %s: %#v\n", updater.DescribeResource(), policy)) + return policy, nil +} + +// Locking wrapper around read-modify-write cycle for IAM policy. +func iamPolicyReadModifyWrite(updater ResourceIamUpdater, modify iamPolicyModifyFunc) error { + mutexKey := updater.GetMutexKey() + transport_tpg.MutexStore.Lock(mutexKey) + defer transport_tpg.MutexStore.Unlock(mutexKey) + + backoff := time.Second + for { + log.Printf("[DEBUG]: Retrieving policy for %s\n", updater.DescribeResource()) + p, err := updater.GetResourceIamPolicy() + if transport_tpg.IsGoogleApiErrorWithCode(err, 429) { + log.Printf("[DEBUG] 429 while attempting to read policy for %s, waiting %v before attempting again", updater.DescribeResource(), backoff) + time.Sleep(backoff) + continue + } else if err != nil { + return err + } + log.Printf("[DEBUG]: Retrieved policy for %s: %+v\n", updater.DescribeResource(), p) + + err = modify(p) + if err != nil { + return err + } + + log.Printf("[DEBUG]: Setting policy for %s to %+v\n", updater.DescribeResource(), p) + err = updater.SetResourceIamPolicy(p) + if err == nil { + fetchBackoff := 1 * time.Second + for successfulFetches := 0; successfulFetches < 3; { + if fetchBackoff > maxBackoffSeconds*time.Second { + return fmt.Errorf("Error applying IAM policy to %s: Waited too long for propagation.\n", updater.DescribeResource()) + } + time.Sleep(fetchBackoff) + log.Printf("[DEBUG]: Retrieving policy for %s\n", updater.DescribeResource()) + new_p, err := updater.GetResourceIamPolicy() + if err != nil { + // Quota for Read is pretty limited, so watch out for running out of quota. + if transport_tpg.IsGoogleApiErrorWithCode(err, 429) { + fetchBackoff = fetchBackoff * 2 + } else { + return err + } + } + log.Printf("[DEBUG]: Retrieved policy for %s: %+v\n", updater.DescribeResource(), p) + if new_p == nil { + // https://github.com/hashicorp/terraform-provider-google/issues/2625 + fetchBackoff = fetchBackoff * 2 + continue + } + modified_p := new_p + // This relies on the fact that `modify` is idempotent: since other changes might have + // happened between the call to set the policy and now, we just need to make sure that + // our change has been made. 'modify(p) == p' is our check for whether this has been + // correctly applied. + err = modify(modified_p) + if err != nil { + return err + } + if modified_p == new_p { + successfulFetches += 1 + } else { + fetchBackoff = fetchBackoff * 2 + } + } + break + } + if tpgresource.IsConflictError(err) { + log.Printf("[DEBUG]: Concurrent policy changes, restarting read-modify-write after %s\n", backoff) + time.Sleep(backoff) + backoff = backoff * 2 + if backoff > 30*time.Second { + return errwrap.Wrapf(fmt.Sprintf("Error applying IAM policy to %s: Too many conflicts. Latest error: {{"{{"}}err{{"}}"}}", updater.DescribeResource()), err) + } + continue + } + + // retry in the case that a service account is not found. This can happen when a service account is deleted + // out of band. + if isServiceAccountNotFoundError, _ := transport_tpg.IamServiceAccountNotFound(err); isServiceAccountNotFoundError { + // calling a retryable function within a retry loop is not + // strictly the _best_ idea, but this error only happens in + // high-traffic projects anyways + currentPolicy, rerr := iamPolicyReadWithRetry(updater) + if rerr != nil { + if p.Etag != currentPolicy.Etag { + // not matching indicates that there is a new state to attempt to apply + log.Printf("current and old etag did not match for %s, retrying", updater.DescribeResource()) + time.Sleep(backoff) + backoff = backoff * 2 + continue + } + + log.Printf("current and old etag matched for %s, not retrying", updater.DescribeResource()) + } else { + // if the error is non-nil, just fall through and return the base error + log.Printf("[DEBUG]: error checking etag for policy %s. error: %v", updater.DescribeResource(), rerr) + } + } + + log.Printf("[DEBUG]: not retrying IAM policy for %s. error: %v", updater.DescribeResource(), err) + return errwrap.Wrapf(fmt.Sprintf("Error applying IAM policy for %s: {{"{{"}}err{{"}}"}}", updater.DescribeResource()), err) + } + log.Printf("[DEBUG]: Set policy for %s", updater.DescribeResource()) + return nil +} + +// Flattens a list of Bindings so each role+condition has a single Binding with combined members +func MergeBindings(bindings []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + bm := createIamBindingsMap(bindings) + return listFromIamBindingMap(bm) +} + +type conditionKey struct { + Description string + Expression string + Title string +} + +func conditionKeyFromCondition(condition *cloudresourcemanager.Expr) conditionKey { + if condition == nil { + return conditionKey{} + } + return conditionKey{condition.Description, condition.Expression, condition.Title} +} + +func (k conditionKey) Empty() bool { + return k == conditionKey{} +} + +func (k conditionKey) String() string { + return fmt.Sprintf("%s/%s/%s", k.Title, k.Description, k.Expression) +} + +type iamBindingKey struct { + Role string + Condition conditionKey +} + +// Removes a single role+condition binding from a list of Bindings +func filterBindingsWithRoleAndCondition(b []*cloudresourcemanager.Binding, role string, condition *cloudresourcemanager.Expr) []*cloudresourcemanager.Binding { + bMap := createIamBindingsMap(b) + key := iamBindingKey{role, conditionKeyFromCondition(condition)} + delete(bMap, key) + return listFromIamBindingMap(bMap) +} + +// Removes given role+condition/bound-member pairs from the given Bindings (i.e subtraction). +func subtractFromBindings(bindings []*cloudresourcemanager.Binding, toRemove ...*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + currMap := createIamBindingsMap(bindings) + toRemoveMap := createIamBindingsMap(toRemove) + + for key, removeSet := range toRemoveMap { + members, ok := currMap[key] + if !ok { + continue + } + // Remove all removed members + for m := range removeSet { + delete(members, m) + } + // Remove role+condition from bindings + if len(members) == 0 { + delete(currMap, key) + } + } + + return listFromIamBindingMap(currMap) +} + +func iamMemberIsCaseSensitive(member string) bool { + // allAuthenticatedUsers and allUsers are special identifiers that are case sensitive. See: + // https://cloud.google.com/iam/docs/overview#all-authenticated-users + return strings.Contains(member, "allAuthenticatedUsers") || strings.Contains(member, "allUsers") || + strings.HasPrefix(member, "principalSet:") || strings.HasPrefix(member, "principal:") || + strings.HasPrefix(member, "principalHierarchy:") +} + +// normalizeIamMemberCasing returns the case adjusted value of an iamMember +// this is important as iam will ignore casing unless it is one of the following +// member types: principalSet, principal, principalHierarchy +// members are in : format +// is case sensitive +// isn't in most cases +// so lowercase the value unless iamMemberIsCaseSensitive and leave the type alone +// since Dec '19 members can be prefixed with "deleted:" to indicate the principal +// has been deleted +func normalizeIamMemberCasing(member string) string { + var pieces []string + if strings.HasPrefix(member, "deleted:") { + pieces = strings.SplitN(member, ":", 3) + if len(pieces) > 2 && !iamMemberIsCaseSensitive(strings.TrimPrefix(member, "deleted:")) { + pieces[2] = strings.ToLower(pieces[2]) + } + } else if strings.HasPrefix(member, "iamMember:") { + pieces = strings.SplitN(member, ":", 3) + if len(pieces) > 2 && !iamMemberIsCaseSensitive(strings.TrimPrefix(member, "iamMember:")) { + pieces[2] = strings.ToLower(pieces[2]) + } + } else if !iamMemberIsCaseSensitive(member) { + pieces = strings.SplitN(member, ":", 2) + if len(pieces) > 1 { + pieces[1] = strings.ToLower(pieces[1]) + } + } + + if len(pieces) > 0 { + member = strings.Join(pieces, ":") + } + return member +} + +// Construct map of role to set of members from list of bindings. +func createIamBindingsMap(bindings []*cloudresourcemanager.Binding) map[iamBindingKey]map[string]struct{} { + bm := make(map[iamBindingKey]map[string]struct{}) + // Get each binding + for _, b := range bindings { + members := make(map[string]struct{}) + key := iamBindingKey{b.Role, conditionKeyFromCondition(b.Condition)} + // Initialize members map + if _, ok := bm[key]; ok { + members = bm[key] + } + // Get each member (user/principal) for the binding + for _, m := range b.Members { + m = normalizeIamMemberCasing(m) + // Add the member + members[m] = struct{}{} + } + if len(members) > 0 { + bm[key] = members + } else { + delete(bm, key) + } + } + return bm +} + +// Return list of Bindings for a map of role to member sets +func listFromIamBindingMap(bm map[iamBindingKey]map[string]struct{}) []*cloudresourcemanager.Binding { + rb := make([]*cloudresourcemanager.Binding, 0, len(bm)) + var keys []iamBindingKey + for k := range bm { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { + keyI := keys[i] + keyJ := keys[j] + return fmt.Sprintf("%s%s", keyI.Role, keyI.Condition.String()) < fmt.Sprintf("%s%s", keyJ.Role, keyJ.Condition.String()) + }) + for _, key := range keys { + members := bm[key] + if len(members) == 0 { + continue + } + b := &cloudresourcemanager.Binding{ + Role: key.Role, + Members: tpgresource.StringSliceFromGolangSet(members), + } + if !key.Condition.Empty() { + b.Condition = &cloudresourcemanager.Expr{ + Description: key.Condition.Description, + Expression: key.Condition.Expression, + Title: key.Condition.Title, + } + } + rb = append(rb, b) + } + return rb +} + +// Flattens AuditConfigs so each role has a single Binding with combined members\ +func removeAllAuditConfigsWithService(ac []*cloudresourcemanager.AuditConfig, service string) []*cloudresourcemanager.AuditConfig { + acMap := createIamAuditConfigsMap(ac) + delete(acMap, service) + return listFromIamAuditConfigMap(acMap) +} + +// Build a AuditConfig service to audit log config map +func createIamAuditConfigsMap(auditConfigs []*cloudresourcemanager.AuditConfig) map[string]map[string]map[string]struct{} { + acMap := make(map[string]map[string]map[string]struct{}) + + for _, ac := range auditConfigs { + if _, ok := acMap[ac.Service]; !ok { + acMap[ac.Service] = make(map[string]map[string]struct{}) + } + alcMap := acMap[ac.Service] + for _, alc := range ac.AuditLogConfigs { + if _, ok := alcMap[alc.LogType]; !ok { + alcMap[alc.LogType] = make(map[string]struct{}) + } + memberMap := alcMap[alc.LogType] + // Add members to map for log type. + for _, m := range alc.ExemptedMembers { + memberMap[m] = struct{}{} + } + } + } + + return acMap +} + +// Construct list of AuditConfigs from audit config maps. +func listFromIamAuditConfigMap(acMap map[string]map[string]map[string]struct{}) []*cloudresourcemanager.AuditConfig { + ac := make([]*cloudresourcemanager.AuditConfig, 0, len(acMap)) + + for service, logConfigMap := range acMap { + if len(logConfigMap) == 0 { + continue + } + + logConfigs := make([]*cloudresourcemanager.AuditLogConfig, 0, len(logConfigMap)) + for logType, memberSet := range logConfigMap { + alc := &cloudresourcemanager.AuditLogConfig{ + LogType: logType, + ForceSendFields: []string{"exemptedMembers"}, + } + if len(memberSet) > 0 { + alc.ExemptedMembers = tpgresource.StringSliceFromGolangSet(memberSet) + } + logConfigs = append(logConfigs, alc) + } + + ac = append(ac, &cloudresourcemanager.AuditConfig{ + Service: service, + AuditLogConfigs: logConfigs, + }) + } + return ac +} + +func jsonPolicyDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if old == "" && new == "" { + return true + } + + var oldPolicy, newPolicy cloudresourcemanager.Policy + if old != "" && new != "" { + if err := json.Unmarshal([]byte(old), &oldPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal old policy %s: %v", old, err) + return false + } + if err := json.Unmarshal([]byte(new), &newPolicy); err != nil { + log.Printf("[ERROR] Could not unmarshal new policy %s: %v", new, err) + return false + } + + return compareIamPolicies(&newPolicy, &oldPolicy) + } + + return false +} + +func compareIamPolicies(a, b *cloudresourcemanager.Policy) bool { + if a.Etag != b.Etag { + log.Printf("[DEBUG] policies etag differ: %q vs %q", a.Etag, b.Etag) + return false + } + if a.Version != b.Version { + log.Printf("[DEBUG] policies version differ: %q vs %q", a.Version, b.Version) + return false + } + if !CompareBindings(a.Bindings, b.Bindings) { + log.Printf("[DEBUG] policies bindings differ: %#v vs %#v", a.Bindings, b.Bindings) + return false + } + if !CompareAuditConfigs(a.AuditConfigs, b.AuditConfigs) { + log.Printf("[DEBUG] policies audit configs differ: %#v vs %#v", a.AuditConfigs, b.AuditConfigs) + return false + } + return true +} + +func CompareBindings(a, b []*cloudresourcemanager.Binding) bool { + aMap := createIamBindingsMap(a) + bMap := createIamBindingsMap(b) + return reflect.DeepEqual(aMap, bMap) +} + +func CompareAuditConfigs(a, b []*cloudresourcemanager.AuditConfig) bool { + aMap := createIamAuditConfigsMap(a) + bMap := createIamAuditConfigsMap(b) + return reflect.DeepEqual(aMap, bMap) +} + +type IamSettings struct { + DeprecationMessage string + EnableBatching bool +} + +func NewIamSettings(options ...func(*IamSettings)) *IamSettings { + settings := &IamSettings{} + for _, o := range options { + o(settings) + } + return settings +} + +func IamWithDeprecationMessage(message string) func(s *IamSettings) { + return func(s *IamSettings) { + s.DeprecationMessage = message + } +} + +func IamWithGAResourceDeprecation() func (s *IamSettings) { + {{- if eq $.TargetVersionName "ga" }} + return IamWithDeprecationMessage("This resource has been deprecated in the google (GA) provider, and will only be available in the google-beta provider in a future release.") + {{- else }} + return IamWithDeprecationMessage("") + {{- end }} +} + +func IamWithBatching (s *IamSettings) { + s.EnableBatching = true +} + +// Util to deref and print auditConfigs +func DebugPrintAuditConfigs(bs []*cloudresourcemanager.AuditConfig) string { + v, _ := json.MarshalIndent(bs, "", "\t") + return string(v) +} + +// Util to deref and print bindings +func DebugPrintBindings(bs []*cloudresourcemanager.Binding) string { + v, _ := json.MarshalIndent(bs, "", "\t") + return string(v) +} + +// Returns a map representing iam bindings that are in the first map but not the second. +func missingBindingsMap(aMap, bMap map[iamBindingKey]map[string]struct{}) map[iamBindingKey]map[string]struct{} { + results := make(map[iamBindingKey]map[string]struct{}) + for key, aMembers := range aMap { + if bMembers, ok := bMap[key]; ok { + // The key is in both maps. + resultMembers := make(map[string]struct{}) + + for aMember := range aMembers { + if _, ok := bMembers[aMember]; !ok { + // The member is in a but not in b. + resultMembers[aMember] = struct{}{} + } + } + for bMember := range bMembers { + if _, ok := aMembers[bMember]; !ok { + // The member is in b but not in a. + resultMembers[bMember] = struct{}{} + } + } + + if len(resultMembers) > 0 { + results[key] = resultMembers + } + } else { + // The key is in map a but not map b. + results[key] = aMembers + } + } + + for key, bMembers := range bMap { + if _, ok := aMap[key]; !ok { + // The key is in map b but not map a. + results[key] = bMembers + } + } + + return results +} + +// Returns the bindings that are in the first set of bindings but not the second. +func MissingBindings(a, b []*cloudresourcemanager.Binding) []*cloudresourcemanager.Binding { + aMap := createIamBindingsMap(a) + bMap := createIamBindingsMap(b) + + var results []*cloudresourcemanager.Binding + for key, membersSet := range missingBindingsMap(aMap, bMap) { + members := make([]string, 0, len(membersSet)) + for member := range membersSet { + members = append(members, member) + } + results = append(results, &cloudresourcemanager.Binding{ + Role: key.Role, + Members: members, + }) + } + return results +} diff --git a/mmv1/third_party/terraform/tpgiamresource/go/iam_test.go.tmpl b/mmv1/third_party/terraform/tpgiamresource/go/iam_test.go.tmpl new file mode 100644 index 000000000000..dbf666ea50e4 --- /dev/null +++ b/mmv1/third_party/terraform/tpgiamresource/go/iam_test.go.tmpl @@ -0,0 +1,1333 @@ +package tpgiamresource + +import ( + "reflect" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" +) + +func TestIamMergeBindings(t *testing.T) { + testCases := []struct { + input []*cloudresourcemanager.Binding + expect []*cloudresourcemanager.Binding + }{ + // Nothing to merge - return same list + { + input: []*cloudresourcemanager.Binding{}, + expect: []*cloudresourcemanager.Binding{}, + }, + // No members returns no binding + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + }, + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + }, + // Nothing to merge - return same list + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1"}, + }, + }, + }, + // Nothing to merge - return same list + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1"}, + }, + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1"}, + }, + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1"}, + }, + { + Role: "role-1", + Members: []string{"member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-1", + Members: []string{"member-3"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2", "member-3"}, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-3", "member-4"}, + }, + { + Role: "role-1", + Members: []string{"member-2", "member-1"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-1", + Members: []string{"member-5"}, + }, + { + Role: "role-3", + Members: []string{"member-1"}, + }, + { + Role: "role-2", + Members: []string{"member-2"}, + }, + {Role: "empty-role", Members: []string{}}, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2", "member-3", "member-4", "member-5"}, + }, + { + Role: "role-2", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-3", + Members: []string{"member-1"}, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + // Same role+members, different condition + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + }, + }, + // Same role, same condition + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + { + Role: "role-1", + Members: []string{"member-3"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2", "member-3"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + }, + }, + // Different roles, same condition + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + { + Role: "role-2", + Members: []string{"member-3"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + { + Role: "role-2", + Members: []string{"member-3"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + }, + }, + }, + }, +{{- end }} + } + + for _, tc := range testCases { + got := MergeBindings(tc.input) + if !CompareBindings(got, tc.expect) { + t.Errorf("Unexpected value for MergeBindings(%s).\nActual: %s\nExpected: %s\n", + DebugPrintBindings(tc.input), DebugPrintBindings(got), DebugPrintBindings(tc.expect)) + } + } +} + +func TestIamFilterBindingsWithRoleAndCondition(t *testing.T) { + testCases := []struct { + input []*cloudresourcemanager.Binding + role string + conditionTitle string + expect []*cloudresourcemanager.Binding + }{ + // No-op + { + input: []*cloudresourcemanager.Binding{}, + role: "role-1", + expect: []*cloudresourcemanager.Binding{}, + }, + // Remove one binding + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + role: "role-1", + expect: []*cloudresourcemanager.Binding{}, + }, + // Remove multiple bindings + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-1", + Members: []string{"member-3"}, + }, + }, + role: "role-1", + expect: []*cloudresourcemanager.Binding{}, + }, + // Remove multiple bindings and leave some. + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-3", + Members: []string{"member-1", "member-3"}, + }, + { + Role: "role-1", + Members: []string{"member-2"}, + }, + { + Role: "role-2", + Members: []string{"member-1", "member-2"}, + }, + }, + role: "role-1", + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-3", + Members: []string{"member-1", "member-3"}, + }, + { + Role: "role-2", + Members: []string{"member-1", "member-2"}, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + // Remove one binding with condition + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + { + Role: "role-1", + Members: []string{"member-3", "member-4"}, + Condition: &cloudresourcemanager.Expr{Title: "condition-1"}, + }, + }, + role: "role-1", + conditionTitle: "condition-1", + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + }, +{{- end }} + } + + for _, tc := range testCases { + got := filterBindingsWithRoleAndCondition(tc.input, tc.role, &cloudresourcemanager.Expr{Title: tc.conditionTitle}) + if !CompareBindings(got, tc.expect) { + t.Errorf("Got unexpected value for removeAllBindingsWithRole(%s, %s).\nActual: %s\nExpected: %s", + DebugPrintBindings(tc.input), tc.role, DebugPrintBindings(got), DebugPrintBindings(tc.expect)) + } + } +} + +func TestIamSubtractFromBindings(t *testing.T) { + testCases := []struct { + input []*cloudresourcemanager.Binding + remove []*cloudresourcemanager.Binding + expect []*cloudresourcemanager.Binding + }{ + { + input: []*cloudresourcemanager.Binding{}, + remove: []*cloudresourcemanager.Binding{}, + expect: []*cloudresourcemanager.Binding{}, + }, + // Empty input should no-op return empty + { + input: []*cloudresourcemanager.Binding{}, + remove: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{}, + }, + // Empty removal should return original expect + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + remove: []*cloudresourcemanager.Binding{}, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + }, + // Removal not in input should no-op + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-1+"}, + }, + }, + remove: []*cloudresourcemanager.Binding{ + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-1+"}, + }, + }, + }, + // Same input/remove should return empty + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + remove: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{}, + }, + // Single removal + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-2"}, + }, + }, + remove: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-2"}, + }, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-2", "member-3"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-1", + Members: []string{"member-1"}, + }, + { + Role: "role-3", + Members: []string{"member-1"}, + }, + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + remove: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-2", "member-4"}, + }, + { + Role: "role-2", + Members: []string{"member-2"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-3"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-3", + Members: []string{"member-1"}, + }, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + // With conditions + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-2", "member-3"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-1", + Members: []string{"member-1"}, + }, + { + Role: "role-3", + Members: []string{"member-1"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + Condition: &cloudresourcemanager.Expr{Title: "condition-1"}, + }, + }, + remove: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-2", "member-4"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + Condition: &cloudresourcemanager.Expr{Title: "condition-1"}, + }, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"member-1", "member-3"}, + }, + { + Role: "role-2", + Members: []string{"member-1"}, + }, + { + Role: "role-3", + Members: []string{"member-1"}, + }, + }, + }, +{{- end }} + } + + for _, tc := range testCases { + got := subtractFromBindings(tc.input, tc.remove...) + if !CompareBindings(got, tc.expect) { + t.Errorf("Unexpected value for subtractFromBindings(%s, %s).\nActual: %s\nExpected: %s\n", + DebugPrintBindings(tc.input), DebugPrintBindings(tc.remove), DebugPrintBindings(got), DebugPrintBindings(tc.expect)) + } + } +} + +func TestIamCreateIamBindingsMap(t *testing.T) { + testCases := []struct { + input []*cloudresourcemanager.Binding + expect map[iamBindingKey]map[string]struct{} + }{ + { + input: []*cloudresourcemanager.Binding{}, + expect: map[iamBindingKey]map[string]struct{}{}, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + { + Role: "role-1", + Members: []string{"user-3"}, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}, "user-3": {}}, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + { + Role: "role-2", + Members: []string{"user-1"}, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-2", conditionKey{}}: {"user-1": {}}, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + { + Role: "role-2", + Members: []string{"user-1"}, + }, + { + Role: "role-1", + Members: []string{"user-3"}, + }, + { + Role: "role-2", + Members: []string{"user-2"}, + }, + { + Role: "role-3", + Members: []string{"user-3"}, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}, "user-3": {}}, + {"role-2", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-3", conditionKey{}}: {"user-3": {}}, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"deleted:serviceAccount:useR-1", "user-2"}, + }, + { + Role: "role-2", + Members: []string{"deleted:user:user-1"}, + }, + { + Role: "role-1", + Members: []string{"serviceAccount:user-3"}, + }, + { + Role: "role-2", + Members: []string{"user-2"}, + }, + { + Role: "role-3", + Members: []string{"user-3"}, + }, + { + Role: "role-4", + Members: []string{"deleted:principal:useR-1"}, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"deleted:serviceAccount:user-1": {}, "user-2": {}, "serviceAccount:user-3": {}}, + {"role-2", conditionKey{}}: {"deleted:user:user-1": {}, "user-2": {}}, + {"role-3", conditionKey{}}: {"user-3": {}}, + {"role-4", conditionKey{}}: {"deleted:principal:useR-1": {}}, + }, + }, + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"principalSet://iam.googleapis.com/projects/1066737951711/locations/global/workloadIdentityPools/example-pool/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/some-eu-central-1-lambdaRole"}, + }, + { + Role: "role-2", + Members: []string{"principal://iam.googleapis.com/projects/1066737951711/locations/global/workloadIdentityPools/example-pool/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/some-eu-central-1-lambdaRole"}, + }, + { + Role: "role-1", + Members: []string{"serviceAccount:useR-3"}, + }, + { + Role: "role-2", + Members: []string{"user-2"}, + }, + { + Role: "role-3", + Members: []string{"user-3"}, + }, + { + Role: "role-3", + Members: []string{"principalHierarchy://iam.googleapis.com/projects/1066737951711/locations/global/workloadIdentityPools"}, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"principalSet://iam.googleapis.com/projects/1066737951711/locations/global/workloadIdentityPools/example-pool/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/some-eu-central-1-lambdaRole": {}, "serviceAccount:user-3": {}}, + {"role-2", conditionKey{}}: {"principal://iam.googleapis.com/projects/1066737951711/locations/global/workloadIdentityPools/example-pool/attribute.aws_role/arn:aws:sts::999999999999:assumed-role/some-eu-central-1-lambdaRole": {}, "user-2": {}}, + {"role-3", conditionKey{}}: {"principalHierarchy://iam.googleapis.com/projects/1066737951711/locations/global/workloadIdentityPools": {}, "user-3": {}}, + }, + }, +{{- if ne $.TargetVersionName "ga" }} + { + input: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + { + Role: "role-2", + Members: []string{"user-1"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + Description: "condition-1-desc", + Expression: "condition-1-expr", + }, + }, + { + Role: "role-2", + Members: []string{"user-2"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-1", + Description: "condition-1-desc", + Expression: "condition-1-expr", + }, + }, + { + Role: "role-2", + Members: []string{"user-1"}, + Condition: &cloudresourcemanager.Expr{ + Title: "condition-2", + Description: "condition-2-desc", + Expression: "condition-2-expr", + }, + }, + }, + expect: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + { + Role: "role-2", + Condition: conditionKey{ + Title: "condition-1", + Description: "condition-1-desc", + Expression: "condition-1-expr", + }, + }: {"user-1": {}, "user-2": {}}, + { + Role: "role-2", + Condition: conditionKey{ + Title: "condition-2", + Description: "condition-2-desc", + Expression: "condition-2-expr", + }, + }: {"user-1": {}}, + }, + }, +{{- end }} + } + + for _, tc := range testCases { + got := createIamBindingsMap(tc.input) + if !reflect.DeepEqual(got, tc.expect) { + t.Errorf("Unexpected value for createIamBindingsMap(%s).\nActual: %#v\nExpected: %#v\n", + DebugPrintBindings(tc.input), got, tc.expect) + } + } +} + +func TestIamMember_MemberDiffSuppress(t *testing.T) { + type IamMemberTestcase struct { + name string + old string + new string + equal bool + } + var iamMemberTestcases = []IamMemberTestcase{ + { + name: "control", + old: "somevalue", + new: "somevalue", + equal: true, + }, + { + name: "principal same casing", + old: "principal:someValueHere", + new: "principal:someValueHere", + equal: true, + }, + { + name: "principal not same casing", + old: "principal:somevalueHere", + new: "principal:someValuehere", + equal: false, + }, + { + name: "principalSet same casing", + old: "principalSet:someValueHere", + new: "principalSet:someValueHere", + equal: true, + }, + { + name: "principalSet not same casing", + old: "principalSet:somevalueHere", + new: "principalSet:someValuehere", + equal: false, + }, + { + name: "principalHierarchy same casing", + old: "principalHierarchy:someValueHere", + new: "principalHierarchy:someValueHere", + equal: true, + }, + { + name: "principalHierarchy not same casing", + old: "principalHierarchy:somevalueHere", + new: "principalHierarchy:someValuehere", + equal: false, + }, + { + name: "serviceAccount same casing", + old: "serviceAccount:same@case.com", + new: "serviceAccount:same@case.com", + equal: true, + }, + { + name: "serviceAccount diff casing", + old: "serviceAccount:sAme@casE.com", + new: "serviceAccount:same@case.com", + equal: true, + }, + { + name: "random diff", + old: "serviasfsfljJKLSD", + new: "servicsFDJKLSFJdfjdlkfsf", + equal: false, + }, + } + + for _, testcase := range iamMemberTestcases { + areEqual := iamMemberCaseDiffSuppress("", testcase.old, testcase.new, &schema.ResourceData{}) + if areEqual != testcase.equal { + t.Errorf("Testcase %s failed: expected equality to be %t but got %t", testcase.name, testcase.equal, areEqual) + } + } +} + +func TestIamListFromIamBindingMap(t *testing.T) { + testCases := []struct { + input map[iamBindingKey]map[string]struct{} + expect []*cloudresourcemanager.Binding + }{ + { + input: map[iamBindingKey]map[string]struct{}{}, + expect: []*cloudresourcemanager.Binding{}, + }, + { + input: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + }, + }, + { + input: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}}, + {"role-2", conditionKey{}}: {"user-1": {}, "user-2": {}}, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1"}, + }, + { + Role: "role-2", + Members: []string{"user-1", "user-2"}, + }, + }, + }, + { + input: map[iamBindingKey]map[string]struct{}{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-2", conditionKey{}}: {}, + }, + expect: []*cloudresourcemanager.Binding{ + { + Role: "role-1", + Members: []string{"user-1", "user-2"}, + }, + }, + }, + } + + for _, tc := range testCases { + got := listFromIamBindingMap(tc.input) + if !CompareBindings(got, tc.expect) { + t.Errorf("Unexpected value for subtractFromBindings(%v).\nActual: %#v\nExpected: %#v\n", + tc.input, DebugPrintBindings(got), DebugPrintBindings(tc.expect)) + } + } +} + +func TestIamRemoveAllAuditConfigsWithService(t *testing.T) { + testCases := []struct { + input []*cloudresourcemanager.AuditConfig + service string + expect []*cloudresourcemanager.AuditConfig + }{ + // No-op + { + service: "foo.googleapis.com", + input: []*cloudresourcemanager.AuditConfig{}, + expect: []*cloudresourcemanager.AuditConfig{}, + }, + // No-op - service not in audit configs + { + service: "bar.googleapis.com", + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + }, + }, + }, + expect: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + }, + }, + }, + }, + // Single removal + { + service: "foo.googleapis.com", + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + }, + }, + }, + expect: []*cloudresourcemanager.AuditConfig{}, + }, + // Multiple removal/merge + { + service: "kms.googleapis.com", + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "kms.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + { + Service: "iam.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + { + Service: "kms.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-2"}, + }, + }, + }, + { + Service: "iam.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-2"}, + }, + }, + }, + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + { + Service: "kms.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-3", "user-4"}, + }, + { + LogType: "DATA_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + }, + }, + }, + expect: []*cloudresourcemanager.AuditConfig{ + { + Service: "iam.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + }, + }, + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + got := removeAllAuditConfigsWithService(tc.input, tc.service) + if !CompareAuditConfigs(got, tc.expect) { + t.Errorf("Got unexpected value for removeAllAuditConfigsWithService(%s, %s).\nActual: %s\nExpected: %s", + DebugPrintAuditConfigs(tc.input), tc.service, DebugPrintAuditConfigs(got), DebugPrintAuditConfigs(tc.expect)) + } + } +} + +func TestIamCreateIamAuditConfigsMap(t *testing.T) { + testCases := []struct { + input []*cloudresourcemanager.AuditConfig + expect map[string]map[string]map[string]struct{} + }{ + { + input: []*cloudresourcemanager.AuditConfig{}, + expect: make(map[string]map[string]map[string]struct{}), + }, + { + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + }, + }, + }, + expect: map[string]map[string]map[string]struct{}{ + "foo.googleapis.com": { + "ADMIN_READ": map[string]struct{}{}, + }, + }, + }, + { + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + }, + expect: map[string]map[string]map[string]struct{}{ + "foo.googleapis.com": { + "ADMIN_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + "DATA_WRITE": map[string]struct{}{"user-1": {}}, + }, + }, + }, + { + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "DATA_READ", + ExemptedMembers: []string{"user-2"}, + }, + }, + }, + }, + expect: map[string]map[string]map[string]struct{}{ + "foo.googleapis.com": { + "ADMIN_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + "DATA_WRITE": map[string]struct{}{"user-1": {}}, + "DATA_READ": map[string]struct{}{"user-2": {}}, + }, + }, + }, + { + input: []*cloudresourcemanager.AuditConfig{ + { + Service: "kms.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + }, + }, + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + }, + }, + { + Service: "kms.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + }, + }, + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "DATA_READ", + ExemptedMembers: []string{"user-2"}, + }, + }, + }, + }, + expect: map[string]map[string]map[string]struct{}{ + "kms.googleapis.com": { + "ADMIN_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + }, + "foo.googleapis.com": { + "ADMIN_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + "DATA_WRITE": map[string]struct{}{"user-1": {}}, + "DATA_READ": map[string]struct{}{"user-2": {}}, + }, + }, + }, + } + + for _, tc := range testCases { + got := createIamAuditConfigsMap(tc.input) + if !reflect.DeepEqual(got, tc.expect) { + t.Errorf("Unexpected value for createIamAuditConfigsMap(%s).\nActual: %#v\nExpected: %#v\n", + DebugPrintAuditConfigs(tc.input), got, tc.expect) + } + } +} + +func TestIamListFromIamAuditConfigsMap(t *testing.T) { + testCases := []struct { + input map[string]map[string]map[string]struct{} + expect []*cloudresourcemanager.AuditConfig + }{ + { + input: make(map[string]map[string]map[string]struct{}), + expect: []*cloudresourcemanager.AuditConfig{}, + }, + { + input: map[string]map[string]map[string]struct{}{ + "foo.googleapis.com": {"ADMIN_READ": map[string]struct{}{}}, + }, + expect: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + }, + }, + }, + }, + { + input: map[string]map[string]map[string]struct{}{ + "foo.googleapis.com": { + "ADMIN_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + "DATA_WRITE": map[string]struct{}{"user-1": {}}, + "DATA_READ": map[string]struct{}{}, + }, + }, + expect: []*cloudresourcemanager.AuditConfig{ + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + { + LogType: "DATA_READ", + }, + }, + }, + }, + }, + { + input: map[string]map[string]map[string]struct{}{ + "kms.googleapis.com": { + "ADMIN_READ": map[string]struct{}{}, + "DATA_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + }, + "foo.googleapis.com": { + "ADMIN_READ": map[string]struct{}{"user-1": {}, "user-2": {}}, + "DATA_WRITE": map[string]struct{}{"user-1": {}}, + "DATA_READ": map[string]struct{}{"user-2": {}}, + }, + }, + expect: []*cloudresourcemanager.AuditConfig{ + { + Service: "kms.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + }, + { + LogType: "DATA_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + }, + }, + { + Service: "foo.googleapis.com", + AuditLogConfigs: []*cloudresourcemanager.AuditLogConfig{ + { + LogType: "ADMIN_READ", + ExemptedMembers: []string{"user-1", "user-2"}, + }, + { + LogType: "DATA_WRITE", + ExemptedMembers: []string{"user-1"}, + }, + { + LogType: "DATA_READ", + ExemptedMembers: []string{"user-2"}, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + got := listFromIamAuditConfigMap(tc.input) + if !CompareAuditConfigs(got, tc.expect) { + t.Errorf("Unexpected value for listFromIamAuditConfigMap(%+v).\nActual: %s\nExpected: %s\n", + tc.input, DebugPrintAuditConfigs(got), DebugPrintAuditConfigs(tc.expect)) + } + } +} diff --git a/mmv1/third_party/terraform/tpgiamresource/iam_test.go.erb b/mmv1/third_party/terraform/tpgiamresource/iam_test.go.erb index 0a058746f456..687f64293a54 100644 --- a/mmv1/third_party/terraform/tpgiamresource/iam_test.go.erb +++ b/mmv1/third_party/terraform/tpgiamresource/iam_test.go.erb @@ -598,7 +598,7 @@ func TestIamCreateIamBindingsMap(t *testing.T) { }, }, expect: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, }, }, { @@ -613,7 +613,7 @@ func TestIamCreateIamBindingsMap(t *testing.T) { }, }, expect: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}, "user-3": {}}, + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}, "user-3": {}}, }, }, { @@ -628,8 +628,8 @@ func TestIamCreateIamBindingsMap(t *testing.T) { }, }, expect: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, - iamBindingKey{"role-2", conditionKey{}}: {"user-1": {}}, + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-2", conditionKey{}}: {"user-1": {}}, }, }, { @@ -656,9 +656,9 @@ func TestIamCreateIamBindingsMap(t *testing.T) { }, }, expect: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}, "user-3": {}}, - iamBindingKey{"role-2", conditionKey{}}: {"user-1": {}, "user-2": {}}, - iamBindingKey{"role-3", conditionKey{}}: {"user-3": {}}, + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}, "user-3": {}}, + {"role-2", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-3", conditionKey{}}: {"user-3": {}}, }, }, { @@ -689,10 +689,10 @@ func TestIamCreateIamBindingsMap(t *testing.T) { }, }, expect: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"deleted:serviceAccount:user-1": {}, "user-2": {}, "serviceAccount:user-3": {}}, - iamBindingKey{"role-2", conditionKey{}}: {"deleted:user:user-1": {}, "user-2": {}}, - iamBindingKey{"role-3", conditionKey{}}: {"user-3": {}}, - iamBindingKey{"role-4", conditionKey{}}: {"deleted:principal:useR-1": {}}, + {"role-1", conditionKey{}}: {"deleted:serviceAccount:user-1": {}, "user-2": {}, "serviceAccount:user-3": {}}, + {"role-2", conditionKey{}}: {"deleted:user:user-1": {}, "user-2": {}}, + {"role-3", conditionKey{}}: {"user-3": {}}, + {"role-4", conditionKey{}}: {"deleted:principal:useR-1": {}}, }, }, { @@ -764,8 +764,8 @@ func TestIamCreateIamBindingsMap(t *testing.T) { }, }, expect: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, - iamBindingKey{ + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + { Role: "role-2", Condition: conditionKey{ Title: "condition-1", @@ -773,7 +773,7 @@ func TestIamCreateIamBindingsMap(t *testing.T) { Expression: "condition-1-expr", }, }: {"user-1": {}, "user-2": {}}, - iamBindingKey{ + { Role: "role-2", Condition: conditionKey{ Title: "condition-2", @@ -884,7 +884,7 @@ func TestIamListFromIamBindingMap(t *testing.T) { }, { input: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, }, expect: []*cloudresourcemanager.Binding{ { @@ -895,8 +895,8 @@ func TestIamListFromIamBindingMap(t *testing.T) { }, { input: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}}, - iamBindingKey{"role-2", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-1", conditionKey{}}: {"user-1": {}}, + {"role-2", conditionKey{}}: {"user-1": {}, "user-2": {}}, }, expect: []*cloudresourcemanager.Binding{ { @@ -911,8 +911,8 @@ func TestIamListFromIamBindingMap(t *testing.T) { }, { input: map[iamBindingKey]map[string]struct{}{ - iamBindingKey{"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, - iamBindingKey{"role-2", conditionKey{}}: {}, + {"role-1", conditionKey{}}: {"user-1": {}, "user-2": {}}, + {"role-2", conditionKey{}}: {}, }, expect: []*cloudresourcemanager.Binding{ { diff --git a/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl b/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl new file mode 100644 index 000000000000..acf087a314f0 --- /dev/null +++ b/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl @@ -0,0 +1,315 @@ +// Contains common diff suppress functions. + +package tpgresource + +import ( + "crypto/sha256" + "log" + "encoding/hex" + "net" + "reflect" + "regexp" + "strconv" + "strings" + "time" + "bytes" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func OptionalPrefixSuppress(prefix string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + return prefix+old == new || prefix+new == old + } +} + +func IgnoreMissingKeyInMap(key string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) + if strings.HasSuffix(k, ".%") { + oldNum, err := strconv.Atoi(old) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) + return false + } + newNum, err := strconv.Atoi(new) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) + return false + } + return oldNum+1 == newNum + } else if strings.HasSuffix(k, "." + key) { + return old == "" + } + return false + } +} + +func OptionalSurroundingSpacesSuppress(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) +} + +func EmptyOrDefaultStringSuppress(defaultVal string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) + } +} + +func EmptyOrFalseSuppressBoolean(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange(k) + return (o == nil && !n.(bool)) +} + +func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // The range may be a: + // A) single IP address (e.g. 10.2.3.4) + // B) CIDR format string (e.g. 10.1.2.0/24) + // C) netmask (e.g. /24) + // + // For A) and B), no diff to suppress, they have to match completely. + // For C), The API picks a network IP address and this creates a diff of the form: + // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" + // We should only compare the mask portion for this case. + if len(new) > 0 && new[0] == '/' { + oldNetmaskStartPos := strings.LastIndex(old, "/") + + if oldNetmaskStartPos != -1 { + oldNetmask := old[strings.LastIndex(old, "/"):] + if oldNetmask == new { + return true + } + } + } + + return false +} + +// Sha256DiffSuppress +// if old is the hex-encoded sha256 sum of new, treat them as equal +func Sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new +} + +func CaseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return strings.ToUpper(old) == strings.ToUpper(new) +} + +// Port range '80' and '80-80' is equivalent. +// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). +// `new` can be either a single port or a port range. +func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return old == new+"-"+new +} + +// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. +// Assume either value could be in either format. +func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { + return true + } + return false +} + +func EmptyOrUnsetBlockDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange(strings.TrimSuffix(k, ".#")) + return EmptyOrUnsetBlockDiffSuppressLogic(k, old, new, o, n) +} + +// The core logic for EmptyOrUnsetBlockDiffSuppress, in a format that is more conducive +// to unit testing. +func EmptyOrUnsetBlockDiffSuppressLogic(k, old, new string, o, n interface{}) bool { + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if old == "0" && new == "1" { + l = n.([]interface{}) + } else if new == "0" && old == "1" { + l = o.([]interface{}) + } else { + // we don't have one set and one unset, so don't suppress the diff + return false + } + + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + for _, v := range contents { + if !IsEmptyValue(reflect.ValueOf(v)) { + return false + } + } + return true +} + +// Suppress diffs for values that are equivalent except for their use of the words "location" +// compared to "region" or "zone" +func LocationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return LocationDiffSuppressHelper(old, new) || LocationDiffSuppressHelper(new, old) +} + +func LocationDiffSuppressHelper(a, b string) bool { + return strings.Replace(a, "/locations/", "/regions/", 1) == b || + strings.Replace(a, "/locations/", "/zones/", 1) == b +} + +// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. +func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { + if strings.HasPrefix(k, "managed.0.domains.") { + return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") + } + return false +} + +func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { + return func(_, old, new string, _ *schema.ResourceData) bool { + oldT, err := time.Parse(format, old) + if err != nil { + return false + } + + newT, err := time.Parse(format, new) + if err != nil { + return false + } + + return oldT == newT + } +} + +// Suppresses diff for IPv4 and IPv6 different formats. +// It also suppresses diffs if an IP is changing to a reference. +func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + addr_equality := false + netmask_equality := false + + addr_netmask_old := strings.Split(old, "/") + addr_netmask_new := strings.Split(new, "/") + + // Check if old or new are IPs (with or without netmask) + var addr_old net.IP + if net.ParseIP(addr_netmask_old[0]) == nil { + addr_old = net.ParseIP(old) + } else { + addr_old = net.ParseIP(addr_netmask_old[0]) + } + var addr_new net.IP + if net.ParseIP(addr_netmask_new[0]) == nil { + addr_new = net.ParseIP(new) + } else { + addr_new = net.ParseIP(addr_netmask_new[0]) + } + + if addr_old != nil { + if addr_new == nil { + // old is an IP and new is a reference + addr_equality = true + } else { + // old and new are IP addresses + addr_equality = bytes.Equal(addr_old, addr_new) + } + } + + // If old and new both have a netmask compare them, otherwise suppress + // This is not technically correct but prevents the permadiff described in https://github.com/hashicorp/terraform-provider-google/issues/16400 + if (len(addr_netmask_old)) == 2 && (len(addr_netmask_new) == 2) { + netmask_equality = addr_netmask_old[1] == addr_netmask_new[1] + } else { + netmask_equality = true + } + + return addr_equality && netmask_equality +} + +// Suppress diffs for duration format. ex "60.0s" and "60s" same +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration +func DurationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + oDuration, err := time.ParseDuration(old) + if err != nil { + return false + } + nDuration, err := time.ParseDuration(new) + if err != nil { + return false + } + return oDuration == nDuration +} + +// Use this method when the field accepts either an IP address or a +// self_link referencing a resource (such as google_compute_route's +// next_hop_ilb) +func CompareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { + // if we can parse `new` as an IP address, then compare as strings + if net.ParseIP(new) != nil { + return new == old + } + + // otherwise compare as self links + return CompareSelfLinkOrResourceName("", old, new, nil) +} + +{{ if ne $.TargetVersionName `ga` -}} +// Suppress all diffs, used for Disk.Interface which is a nonfunctional field +func AlwaysDiffSuppress(_, _, _ string, _ *schema.ResourceData) bool { + return true +} +{{- end }} + +// Use this method when subnet is optioanl and auto_create_subnetworks = true +// API sometimes choose a subnet so the diff needs to be ignored +func CompareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { + if IsEmptyValue(reflect.ValueOf(new)) { + return true + } + // otherwise compare as self links + return CompareSelfLinkOrResourceName("", old, new, nil) +} + +// Suppress diffs in below cases +// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" +// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" +func LastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if last := len(new) - 1; last >= 0 && new[last] == '/' { + new = new[:last] + } + + if last := len(old) - 1; last >= 0 && old[last] == '/' { + old = old[:last] + } + return new == old +} + +// Suppress diffs when the value read from api +// has the project number instead of the project name +func ProjectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + var a2, b2 string + reN := regexp.MustCompile("projects/\\d+") + re := regexp.MustCompile("projects/[^/]+") + replacement := []byte("projects/equal") + a2 = string(reN.ReplaceAll([]byte(old), replacement)) + b2 = string(re.ReplaceAll([]byte(new), replacement)) + return a2 == b2 +} + +func IsNewResource(diff TerraformResourceDiff) bool { + name := diff.Get("name") + return name.(string) == "" +} + +func CompareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { + // The API can return cryptoKeyVersions even though it wasn't specified. + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + + kmsKeyWithoutVersions := strings.Split(old, "/cryptoKeyVersions")[0] + if kmsKeyWithoutVersions == new { + return true + } + + return false +} + +func CidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If the user specified a size and the API returned a full cidr block, suppress. + return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/tpgresource/go/tpgtools_custom_flattens.go.tmpl b/mmv1/third_party/terraform/tpgresource/go/tpgtools_custom_flattens.go.tmpl new file mode 100644 index 000000000000..8c32d4eca8e3 --- /dev/null +++ b/mmv1/third_party/terraform/tpgresource/go/tpgtools_custom_flattens.go.tmpl @@ -0,0 +1,39 @@ +package tpgresource + +import ( + containeraws "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containeraws{{ $.DCLVersion }}" + containerazure "github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/containerazure{{ $.DCLVersion }}" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func FlattenContainerAwsNodePoolManagement(obj *containeraws.NodePoolManagement, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if obj == nil { + return nil + } + transformed := make(map[string]interface{}) + + if obj.AutoRepair == nil || obj.Empty() { + transformed["auto_repair"] = false + } else { + transformed["auto_repair"] = obj.AutoRepair + } + + return []interface{}{transformed} +} + +func FlattenContainerAzureNodePoolManagement(obj *containerazure.NodePoolManagement, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if obj == nil { + return nil + } + transformed := make(map[string]interface{}) + + if obj.AutoRepair == nil || obj.Empty() { + transformed["auto_repair"] = false + } else { + transformed["auto_repair"] = obj.AutoRepair + } + + return []interface{}{transformed} +} diff --git a/mmv1/third_party/terraform/transport/go/provider_handwritten_endpoint.go.tmpl b/mmv1/third_party/terraform/transport/go/provider_handwritten_endpoint.go.tmpl new file mode 100644 index 000000000000..f42ba038e969 --- /dev/null +++ b/mmv1/third_party/terraform/transport/go/provider_handwritten_endpoint.go.tmpl @@ -0,0 +1,131 @@ +package transport + +import ( + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +// For generated resources, endpoint entries live in product-specific provider +// files. Collect handwritten ones here. If any of these are modified, be sure +// to update the provider_reference docs page. + +var CloudBillingCustomEndpointEntryKey = "cloud_billing_custom_endpoint" +var CloudBillingCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ComposerCustomEndpointEntryKey = "composer_custom_endpoint" +var ComposerCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ContainerCustomEndpointEntryKey = "container_custom_endpoint" +var ContainerCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var DataflowCustomEndpointEntryKey = "dataflow_custom_endpoint" +var DataflowCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var IAMCustomEndpointEntryKey = "iam_custom_endpoint" +var IAMCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var IamCredentialsCustomEndpointEntryKey = "iam_credentials_custom_endpoint" +var IamCredentialsCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ResourceManagerV3CustomEndpointEntryKey = "resource_manager_v3_custom_endpoint" +var ResourceManagerV3CustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +{{ if ne $.TargetVersionName `ga` -}} +var RuntimeConfigCustomEndpointEntryKey = "runtimeconfig_custom_endpoint" +var RuntimeConfigCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} +{{- end }} + +var ServiceNetworkingCustomEndpointEntryKey = "service_networking_custom_endpoint" +var ServiceNetworkingCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ServiceUsageCustomEndpointEntryKey = "service_usage_custom_endpoint" +var ServiceUsageCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_SERVICE_USAGE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[ServiceUsageBasePathKey]), +} + +var BigtableAdminCustomEndpointEntryKey = "bigtable_custom_endpoint" +var BigtableAdminCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_BIGTABLE_CUSTOM_ENDPOINT", + }, DefaultBasePaths[BigtableAdminBasePathKey]), +} + +var PrivatecaCertificateTemplateEndpointEntryKey = "privateca_custom_endpoint" +var PrivatecaCertificateTemplateCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_PRIVATECA_CUSTOM_ENDPOINT", + }, DefaultBasePaths[PrivatecaBasePathKey]), +} + +var ContainerAwsCustomEndpointEntryKey = "container_aws_custom_endpoint" +var ContainerAwsCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var ContainerAzureCustomEndpointEntryKey = "container_azure_custom_endpoint" +var ContainerAzureCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +var TagsLocationCustomEndpointEntryKey = "tags_location_custom_endpoint" +var TagsLocationCustomEndpointEntry = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: ValidateCustomEndpoint, +} + +func ValidateCustomEndpoint(v interface{}, k string) (ws []string, errors []error) { + re := `.*/[^/]+/$` + return verify.ValidateRegexp(re)(v, k) +} From ee8b09af1c16bdd61212123853ee9eb22e3a39f8 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Fri, 21 Jun 2024 14:54:49 -0500 Subject: [PATCH 189/356] go rewrite - minor refresh + compute updates (#11016) --- mmv1/api/product.go | 2 - mmv1/api/resource.go | 6 +-- mmv1/api/type.go | 6 +-- mmv1/products/compute/go_Instance.yaml | 1 - .../compute/go_NetworkAttachment.yaml | 21 --------- .../go_OrganizationSecurityPolicy.yaml | 1 + ...OrganizationSecurityPolicyAssociation.yaml | 1 + .../go_OrganizationSecurityPolicyRule.yaml | 1 + mmv1/products/compute/go_Reservation.yaml | 1 + .../go/network_attachment_basic.tf.tmpl | 5 --- .../network_attachment_instance_usage.tf.tmpl | 4 -- mmv1/templates/terraform/resource.go.tmpl | 44 ++++++++++--------- .../terraform/schema_property.go.tmpl | 14 ++++-- mmv1/templates/terraform/yaml_conversion.erb | 3 ++ 14 files changed, 47 insertions(+), 63 deletions(-) diff --git a/mmv1/api/product.go b/mmv1/api/product.go index c7fa4705057a..d02a5ac424d2 100644 --- a/mmv1/api/product.go +++ b/mmv1/api/product.go @@ -69,8 +69,6 @@ type Product struct { } func (p *Product) UnmarshalYAML(n *yaml.Node) error { - p.Async = NewAsync() - type productAlias Product aliasObj := (*productAlias)(p) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 71d768a65630..b5002699a107 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -887,11 +887,11 @@ func (r Resource) HasZone() bool { // resource functions needed for template that previously existed in terraform.go but due to how files are being inherited here it was easier to put in here // taken wholesale from tpgtools func (r Resource) Updatable() bool { - if r.Immutable && !r.RootLabels() { - return false + if !r.Immutable { + return true } for _, p := range r.AllProperties() { - if !p.Immutable && !(p.Required && p.DefaultFromApi) { + if p.UpdateUrl != "" { return true } } diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 572e58c4014e..8059e57beead 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -138,9 +138,9 @@ type Type struct { // ==================== // Array Fields // ==================== - ItemType *Type `yaml:"item_type"` - MinSize int `yaml:"min_size"` - MaxSize int `yaml:"max_size"` + ItemType *Type `yaml:"item_type"` + MinSize string `yaml:"min_size"` + MaxSize string `yaml:"max_size"` // __name ParentName string diff --git a/mmv1/products/compute/go_Instance.yaml b/mmv1/products/compute/go_Instance.yaml index 874134de2d9d..49eb97d03c47 100644 --- a/mmv1/products/compute/go_Instance.yaml +++ b/mmv1/products/compute/go_Instance.yaml @@ -510,7 +510,6 @@ properties: description: | The URL of the network attachment that this interface should connect to in the following format: projects/{projectNumber}/regions/{region_name}/networkAttachments/{network_attachment_name}. - min_version: 'beta' resource: 'networkAttachment' imports: 'selfLink' - name: 'scheduling' diff --git a/mmv1/products/compute/go_NetworkAttachment.yaml b/mmv1/products/compute/go_NetworkAttachment.yaml index 51700c03672c..0750d9be5a76 100644 --- a/mmv1/products/compute/go_NetworkAttachment.yaml +++ b/mmv1/products/compute/go_NetworkAttachment.yaml @@ -17,7 +17,6 @@ name: 'NetworkAttachment' kind: 'compute#networkAttachment' description: | A network attachment is a resource that lets a producer Virtual Private Cloud (VPC) network initiate connections to a consumer VPC network through a Private Service Connect interface. -min_version: 'beta' references: guides: 'Official Documentation': 'https://cloud.google.com/vpc/docs/about-network-attachments' @@ -68,13 +67,11 @@ parameters: type: String description: | Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - min_version: 'beta' required: true - name: 'region' type: ResourceRef description: | URL of the region where the network attachment resides. This field applies only to the region resource. You must specify this field as part of the HTTP request URL. It is not settable as a field in the request body. - min_version: 'beta' required: true immutable: true default_from_api: true @@ -85,39 +82,32 @@ properties: - name: 'kind' type: String description: 'Type of the resource.' - min_version: 'beta' output: true - name: 'id' type: String description: 'The unique identifier for the resource type. The server generates this identifier.' - min_version: 'beta' output: true - name: 'creationTimestamp' type: Time description: 'Creation timestamp in RFC3339 text format.' - min_version: 'beta' output: true - name: 'description' type: String description: | An optional description of this resource. Provide this property when you create the resource. - min_version: 'beta' - name: 'selfLink' type: String description: 'Server-defined URL for the resource.' - min_version: 'beta' output: true - name: 'selfLinkWithId' type: String description: | Server-defined URL for this resource's resource id. - min_version: 'beta' output: true - name: 'connectionPreference' type: Enum description: | The connection preference of service attachment. The value can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is one that always accepts the connection from consumer forwarding rules. - min_version: 'beta' required: true enum_values: - 'ACCEPT_AUTOMATIC' @@ -127,7 +117,6 @@ properties: type: Array description: | An array of connections for all the producers connected to this network attachment. - min_version: 'beta' output: true item_type: type: NestedObject @@ -136,37 +125,31 @@ properties: type: String description: | The status of a connected endpoint to this network attachment. - min_version: 'beta' output: true - name: 'projectIdOrNum' type: String description: | The project id or number of the interface to which the IP was assigned. - min_version: 'beta' output: true - name: 'subnetwork' type: String description: | The subnetwork used to assign the IP to the producer instance network interface. - min_version: 'beta' output: true - name: 'ipAddress' type: String description: | The IPv4 address assigned to the producer instance network interface. This value will be a range in case of Serverless. - min_version: 'beta' output: true - name: 'secondaryIpCidrRanges' type: String description: | Alias IP ranges from the same subnetwork. - min_version: 'beta' output: true - name: 'subnetworks' type: Array description: | An array of URLs where each entry is the URL of a subnet provided by the service consumer to use for endpoints in the producers that connect to this network attachment. - min_version: 'beta' required: true custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' item_type: @@ -180,14 +163,12 @@ properties: type: Array description: | Projects that are not allowed to connect to this network attachment. The project can be specified using its id or number. - min_version: 'beta' item_type: type: String - name: 'producerAcceptLists' type: Array description: | Projects that are allowed to connect to this network attachment. The project can be specified using its id or number. - min_version: 'beta' item_type: type: String - name: 'fingerprint' @@ -195,12 +176,10 @@ properties: description: | Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. An up-to-date fingerprint must be provided in order to patch. - min_version: 'beta' output: true - name: 'network' type: String description: | The URL of the network which the Network Attachment belongs to. Practically it is inferred by fetching the network of the first subnetwork associated. Because it is required that all the subnetworks must be from the same network, it is assured that the Network Attachment belongs to the same network as all the subnetworks. - min_version: 'beta' output: true diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml index 7596cff9d73a..644b09c48415 100644 --- a/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml +++ b/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml @@ -35,6 +35,7 @@ timeouts: delete_minutes: 20 custom_code: post_create: 'templates/terraform/post_create/go/org_security_policy.go.tmpl' + post_delete: 'templates/terraform/post_delete/go/org_security_policy.go.tmpl' post_update: 'templates/terraform/post_update/go/org_security_policy.go.tmpl' test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' examples: diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml index 60f6628b6ba4..3101fac1305e 100644 --- a/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml +++ b/mmv1/products/compute/go_OrganizationSecurityPolicyAssociation.yaml @@ -37,6 +37,7 @@ timeouts: delete_minutes: 20 custom_code: post_create: 'templates/terraform/post_create/go/org_security_policy_association.go.tmpl' + post_delete: 'templates/terraform/post_create/go/org_security_policy_association.go.tmpl' test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' read_error_transform: 'transformSecurityPolicyAssociationReadError' examples: diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml index 39a78b1cd8e6..0b02fc55bfc2 100644 --- a/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml @@ -38,6 +38,7 @@ timeouts: delete_minutes: 20 custom_code: post_create: 'templates/terraform/post_create/go/org_security_policy_rule.go.tmpl' + post_delete: 'templates/terraform/post_create/go/org_security_policy_rule.go.tmpl' post_update: 'templates/terraform/post_create/go/org_security_policy_rule.go.tmpl' test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' examples: diff --git a/mmv1/products/compute/go_Reservation.yaml b/mmv1/products/compute/go_Reservation.yaml index 397f513e5ade..518522620377 100644 --- a/mmv1/products/compute/go_Reservation.yaml +++ b/mmv1/products/compute/go_Reservation.yaml @@ -132,6 +132,7 @@ properties: type: Enum description: | Type of sharing for this shared-reservation + immutable: true default_from_api: true enum_values: - 'LOCAL' diff --git a/mmv1/templates/terraform/examples/go/network_attachment_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_attachment_basic.tf.tmpl index 121cb0fa7483..948b3a02ba43 100644 --- a/mmv1/templates/terraform/examples/go/network_attachment_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_attachment_basic.tf.tmpl @@ -1,5 +1,4 @@ resource "google_compute_network_attachment" "default" { - provider = google-beta name = "{{index $.Vars "resource_name"}}" region = "us-central1" description = "basic network attachment description" @@ -19,13 +18,11 @@ resource "google_compute_network_attachment" "default" { } resource "google_compute_network" "default" { - provider = google-beta name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = google-beta name = "{{index $.Vars "subnetwork_name"}}" region = "us-central1" @@ -34,7 +31,6 @@ resource "google_compute_subnetwork" "default" { } resource "google_project" "rejected_producer_project" { - provider = google-beta project_id = "{{index $.Vars "rejected_producer_project_name"}}" name = "{{index $.Vars "rejected_producer_project_name"}}" org_id = "{{index $.TestEnvVars "org_id"}}" @@ -42,7 +38,6 @@ resource "google_project" "rejected_producer_project" { } resource "google_project" "accepted_producer_project" { - provider = google-beta project_id = "{{index $.Vars "accepted_producer_project_name"}}" name = "{{index $.Vars "accepted_producer_project_name"}}" org_id = "{{index $.TestEnvVars "org_id"}}" diff --git a/mmv1/templates/terraform/examples/go/network_attachment_instance_usage.tf.tmpl b/mmv1/templates/terraform/examples/go/network_attachment_instance_usage.tf.tmpl index 6a44bee01a67..df90307115da 100644 --- a/mmv1/templates/terraform/examples/go/network_attachment_instance_usage.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_attachment_instance_usage.tf.tmpl @@ -1,11 +1,9 @@ resource "google_compute_network" "default" { - provider = google-beta name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { - provider = google-beta name = "{{index $.Vars "subnetwork_name"}}" region = "us-central1" @@ -14,7 +12,6 @@ resource "google_compute_subnetwork" "default" { } resource "google_compute_network_attachment" "{{$.PrimaryResourceId}}" { - provider = google-beta name = "{{index $.Vars "resource_name"}}" region = "us-central1" description = "my basic network attachment" @@ -24,7 +21,6 @@ resource "google_compute_network_attachment" "{{$.PrimaryResourceId}}" { } resource "google_compute_instance" "default" { - provider = google-beta name = "{{index $.Vars "instance_name"}}" zone = "us-central1-a" machine_type = "e2-micro" diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 97f3863d2962..a4442d3436f0 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -70,7 +70,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { return &schema.Resource{ Create: resource{{ $.ResourceName -}}Create, Read: resource{{ $.ResourceName -}}Read, -{{- if $.Updatable -}} {{/* ##TODO Q2 || $.root_labels? -}} */}} +{{- if or $.Updatable $.RootLabels }} Update: resource{{ $.ResourceName -}}Update, {{- end}} Delete: resource{{ $.ResourceName -}}Delete, @@ -84,7 +84,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { Timeouts: &schema.ResourceTimeout { Create: schema.DefaultTimeout({{ $.Timeouts.InsertMinutes -}} * time.Minute), -{{- if $.Updatable -}} {{/* ##TODO Q2 || $.root_labels? -}} */}} +{{- if or $.Updatable $.RootLabels }} Update: schema.DefaultTimeout({{ $.Timeouts.UpdateMinutes -}} * time.Minute), {{- end}} Delete: schema.DefaultTimeout({{ $.Timeouts.DeleteMinutes -}} * time.Minute), @@ -137,12 +137,12 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- if $.VirtualFields -}} {{- range $field := $.VirtualFields }} "{{ $field.Name -}}": { - Type: schema.{{ $field.Type -}}, + Type: {{ $field.TFType $field.Type -}}, Optional: true, -{{ if $field.Immutable -}} +{{- if $field.Immutable }} ForceNew: true, {{- end}} -{{ if $field.DefaultValue -}} +{{- if not (eq $field.DefaultValue nil) }} Default: {{ $field.GoLiteral $field.DefaultValue -}}, {{- end}} Description: `{{ replace $field.GetDescription "`" "'" -1 -}}`, @@ -182,7 +182,7 @@ func resource{{ $.ResourceName }}{{ camelize $prop.Name "upper" }}SetStyleDiff(_ {{- end}} func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{}) error { -{{- if and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "Create") -}} +{{- if and ($.GetAsync) (and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "Create")) -}} var project string {{- end}} config := meta.(*transport_tpg.Config) @@ -273,8 +273,10 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ billingProject = bp } - {{/*TODO Q2 COMPILE PRECREATE */}} headers := make(http.Header) +{{- if $.CustomCode.PreCreate }} + {{ $.CustomTemplate $.CustomCode.PreCreate false -}} +{{- end}} res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "{{ upper $.CreateVerb -}}", @@ -298,7 +300,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating {{ $.Name -}}: %s", err) } {{/* # Set resource properties from create API response (unless it returns an Operation) */}} -{{if not ($.GetAsync.IsA "OpAsync") }} +{{if and ($.GetAsync) (not ($.GetAsync.IsA "OpAsync")) }} {{- range $prop := $.GettableProperties }} {{ if and ($.IsInIdentity $prop) $prop.Output }} if err := d.Set("{{ underscore $prop.Name -}}", flatten{{ if $.NestedQuery -}}Nested{{end}}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}(res["{{ $prop.ApiName -}}"], d, config)); err != nil { @@ -315,7 +317,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ } d.SetId(id) -{{if ($.GetAsync.Allow "Create") -}} +{{if and $.GetAsync ($.GetAsync.Allow "Create") -}} {{if ($.GetAsync.IsA "OpAsync") -}} {{if and $.GetAsync.Result.ResourceInsideResponse $.GetIdentity -}} // Use the resource in the operation response to populate @@ -397,7 +399,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{- $.CustomTemplate $.CustomCode.PostCreate false -}} {{- end}} -{{if $.GetAsync.Allow "Create" -}} +{{if and ($.GetAsync) ($.GetAsync.Allow "Create") -}} {{if $.GetAsync.IsA "PollAsync" -}} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName -}}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncExistence -}}, "Creating {{ $.Name -}}", d.Timeout(schema.TimeoutCreate), {{ $.GetAsync.TargetOccurrences -}}) if err != nil { @@ -420,7 +422,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{ end -}} } -{{if ($.GetAsync.IsA "PollAsync")}} +{{if and ($.GetAsync) ($.GetAsync.IsA "PollAsync")}} func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interface{}) transport_tpg.PollReadFunc { return func() (map[string]interface{}, error) { {{if $.GetAsync.CustomPollRead -}} @@ -604,7 +606,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{- if $.VirtualFields -}} // Explicitly set virtual fields to default values if unset {{- range $prop := $.VirtualFields }} -{{ if $prop.DefaultValue -}} +{{ if not (eq $prop.DefaultValue nil) -}} if _, ok := d.GetOkExists("{{ $prop.Name -}}"); !ok { if err := d.Set("{{ $prop.Name -}}", {{ $prop.DefaultValue -}}); err != nil { return fmt.Errorf("Error setting {{ $prop.Name -}}: %s", err) @@ -675,7 +677,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{if $.Updatable -}} func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{}) error { -{{- if and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "update") -}} +{{- if and ($.GetAsync) (and ($.GetAsync.IsA "OpAsync") ($.GetAsync.IncludeProject) ($.GetAsync.Allow "update")) -}} var project string {{- end}} config := meta.(*transport_tpg.Config) @@ -802,7 +804,7 @@ if len(updateMask) > 0 { log.Printf("[DEBUG] Finished updating {{ $.Name }} %q: %#v", d.Id(), res) } -{{ if $.GetAsync.Allow "update" -}} +{{ if and ($.GetAsync) ($.GetAsync.Allow "update") -}} {{ if $.GetAsync.IsA "OpAsync" -}} err = {{ $.ClientNamePascal -}}OperationWaitTime( config, res, {{if or $.HasProject $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Updating {{ $.Name -}}", userAgent, @@ -992,9 +994,10 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ // Only the root field "labels" and "terraform_labels" are mutable return resource{{ $.ResourceName -}}Read(d, meta) } -{{- end}} + +{{ end}} func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{}) error { -{{- if and (and ($.GetAsync.IsA "OpAsync") $.GetAsync.IncludeProject) ($.GetAsync.Allow "delete")}} +{{- if and ($.GetAsync) (and (and ($.GetAsync.IsA "OpAsync") $.GetAsync.IncludeProject) ($.GetAsync.Allow "delete")) }} var project string {{- end }} {{- if $.SkipDelete }} @@ -1046,7 +1049,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} {{/*Keep this after mutex - patch request data relies on current resource state*/}} obj, err = resource{{ $.ResourceName }}PatchDeleteEncoder(d, meta, obj) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, "{{ $.ResourceName }}") + return transport_tpg.HandleNotFoundError(err, d, "{{ $.Name }}") } {{- if $.UpdateMask }} url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "{{- join $.NestedQuery.Keys "," -}}"}) @@ -1091,7 +1094,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} if err != nil { return transport_tpg.HandleNotFoundError(err, d, "{{ $.Name }}") } - {{ if $.GetAsync.Allow "Delete" -}} + {{ if and $.GetAsync ($.GetAsync.Allow "Delete") -}} {{ if $.GetAsync.IsA "PollAsync" }} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName }}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncAbsence }}, "Deleting {{ $.Name }}", d.Timeout(schema.TimeoutCreate), {{ $.Async.TargetOccurrences }}) if err != nil { @@ -1141,10 +1144,11 @@ func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{} return nil, fmt.Errorf("Error constructing id: %s", err) } d.SetId(id) - {{- if $.VirtualFields -}} + {{ if $.VirtualFields }} + // Explicitly set virtual fields to default values on import {{- range $vf := $.VirtualFields }} - {{- if $vf.DefaultValue }} + {{- if not (eq $vf.DefaultValue nil) }} if err := d.Set("{{ $vf.Name }}", {{ $vf.DefaultValue }}); err != nil { return nil, fmt.Errorf("Error setting {{ $vf.Name }}: %s", err) } diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 17dad86dc26c..44f21449b2c7 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -66,7 +66,7 @@ {{- if .ItemType.DefaultValue -}} Default value: {{ .ItemType.DefaultValue -}} {{- end -}} -Possible values: [{{- .EnumValuesToString "\"" false -}}] +{{- " "}}Possible values: [{{- .ItemType.EnumValuesToString "\"" false -}}] {{- else if and (eq .Type "Enum") (not .Output) -}} {{- if .DefaultValue -}} {{- " "}}Default value: "{{ .DefaultValue -}}" @@ -109,14 +109,20 @@ Possible values: [{{- .EnumValuesToString "\"" false -}}] }, {{ else if eq .ItemType.Type "Enum" -}} Elem: &schema.Schema{ - Type: schema.Type{{ .ItemTypeClass -}}, - ValidateFunc: verify.ValidateEnum([]string{ {{- .ItemType.EnumValuesToString "\"" true -}} }), + Type: schema.TypeString, + {{- if not .Output }} + ValidateFunc: verify.ValidateEnum([]string{ {{- .ItemType.EnumValuesToString "\"" false -}} }), + {{- end }} }, {{ else -}} Elem: &schema.Schema{ - Type: schema.Type{{ .ItemTypeClass -}}, {{ if eq .ItemType.Type "ResourceRef" -}} + Type: schema.TypeString, + {{- if not .Output }} DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + {{- end }} + {{ else -}} + Type: schema.Type{{ .ItemTypeClass -}}, {{ end -}} }, {{ end -}} diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 590a2de8cc22..a04c27fadbbb 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -367,6 +367,9 @@ custom_code: <% unless object.custom_code.post_create.nil? -%> post_create: '<%= object.convert_go_file( object.custom_code.post_create )%>' <% end -%> +<% unless object.custom_code.post_delete.nil? -%> + post_delete: '<%= object.convert_go_file( object.custom_code.post_delete )%>' +<% end -%> <% unless object.custom_code.custom_create.nil? -%> custom_create: '<%= object.convert_go_file( object.custom_code.custom_create )%>' <% end -%> From c7555af50997905afbc34b536e5a24d9a27de781 Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Fri, 21 Jun 2024 13:55:06 -0700 Subject: [PATCH 190/356] chore(ci): refactor vcr comment into go template (#10879) --- .ci/magician/cmd/test_terraform_vcr.go | 256 ++++++----- ...est_terraform_vcr_non_exercised_tests.tmpl | 13 + .../cmd/test_terraform_vcr_record_replay.tmpl | 31 ++ .ci/magician/cmd/test_terraform_vcr_test.go | 402 ++++++++++++++++++ .../test_terraform_vcr_test_analytics.tmpl | 20 + ...erraform_vcr_with_replay_failed_tests.tmpl | 12 + ...aform_vcr_without_replay_failed_tests.tmpl | 7 + 7 files changed, 634 insertions(+), 107 deletions(-) create mode 100644 .ci/magician/cmd/test_terraform_vcr_non_exercised_tests.tmpl create mode 100644 .ci/magician/cmd/test_terraform_vcr_record_replay.tmpl create mode 100644 .ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl create mode 100644 .ci/magician/cmd/test_terraform_vcr_with_replay_failed_tests.tmpl create mode 100644 .ci/magician/cmd/test_terraform_vcr_without_replay_failed_tests.tmpl diff --git a/.ci/magician/cmd/test_terraform_vcr.go b/.ci/magician/cmd/test_terraform_vcr.go index 9bc4d850e996..c9d9c8df526d 100644 --- a/.ci/magician/cmd/test_terraform_vcr.go +++ b/.ci/magician/cmd/test_terraform_vcr.go @@ -7,6 +7,7 @@ import ( "regexp" "sort" "strings" + "text/template" "github.com/spf13/cobra" @@ -15,6 +16,21 @@ import ( "magician/provider" "magician/source" "magician/vcr" + + _ "embed" +) + +var ( + //go:embed test_terraform_vcr_test_analytics.tmpl + testsAnalyticsTmplText string + //go:embed test_terraform_vcr_non_exercised_tests.tmpl + nonExercisedTestsTmplText string + //go:embed test_terraform_vcr_with_replay_failed_tests.tmpl + withReplayFailedTestsTmplText string + //go:embed test_terraform_vcr_without_replay_failed_tests.tmpl + withoutReplayFailedTestsTmplText string + //go:embed test_terraform_vcr_record_replay.tmpl + recordReplayTmplText string ) var ttvEnvironmentVariables = [...]string{ @@ -40,6 +56,37 @@ var ttvEnvironmentVariables = [...]string{ "USER", } +type analytics struct { + ReplayingResult *vcr.Result + RunFullVCR bool + AffectedServices []string +} + +type nonExercisedTests struct { + NotRunBetaTests []string + NotRunGATests []string +} + +type withReplayFailedTests struct { + ReplayingResult *vcr.Result +} + +type withoutReplayFailedTests struct { + ReplayingErr error + PRNumber string + BuildID string +} + +type recordReplay struct { + RecordingResult *vcr.Result + ReplayingAfterRecordingResult *vcr.Result + HasTerminatedTests bool + RecordingErr error + AllRecordingPassed bool + PRNumber string + BuildID string +} + var testTerraformVCRCmd = &cobra.Command{ Use: "test-terraform-vcr", Short: "Run vcr tests for affected packages", @@ -143,7 +190,7 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, return fmt.Errorf("error posting pending status: %w", err) } - replayingResult, affectedServicesComment, testDirs, replayingErr := runReplaying(runFullVCR, services, vt) + replayingResult, testDirs, replayingErr := runReplaying(runFullVCR, services, vt) testState := "success" if replayingErr != nil { testState = "failure" @@ -159,55 +206,41 @@ func execTestTerraformVCR(prNumber, mmCommitSha, buildID, projectID, buildStep, return nil } - failedTestsPattern := strings.Join(replayingResult.FailedTests, "|") - - comment := `#### Tests analytics -Total tests: ` + fmt.Sprintf("`%d`", len(replayingResult.PassedTests)+len(replayingResult.SkippedTests)+len(replayingResult.FailedTests)) + ` -Passed tests: ` + fmt.Sprintf("`%d`", len(replayingResult.PassedTests)) + ` -Skipped tests: ` + fmt.Sprintf("`%d`", len(replayingResult.SkippedTests)) + ` -Affected tests: ` + fmt.Sprintf("`%d`", len(replayingResult.FailedTests)) + ` - -
Click here to see the affected service packages
` + affectedServicesComment + `
` + var servicesArr []string + for s := range services { + servicesArr = append(servicesArr, s) + } + analyticsData := analytics{ + ReplayingResult: replayingResult, + RunFullVCR: runFullVCR, + AffectedServices: sort.StringSlice(servicesArr), + } + testsAnalyticsComment, err := formatTestsAnalytics(analyticsData) + if err != nil { + return fmt.Errorf("error formatting test_analytics comment: %w", err) + } notRunBeta, notRunGa := notRunTests(tpgRepo.UnifiedZeroDiff, tpgbRepo.UnifiedZeroDiff, replayingResult) - if len(notRunBeta) > 0 || len(notRunGa) > 0 { - comment += ` - -#### Non-exercised tests` - - if len(notRunBeta) > 0 { - comment += ` - -Tests were added that are skipped in VCR: -` - for _, t := range notRunBeta { - comment += ` -- ` + t - } - } - - if len(notRunGa) > 0 { - comment += ` - -Tests were added that are GA-only additions and require manual runs: -` - for _, t := range notRunGa { - comment += ` -- ` + t - } - } + nonExercisedTestsData := nonExercisedTests{ + NotRunBetaTests: notRunBeta, + NotRunGATests: notRunGa, + } + nonExercisedTestsComment, err := formatNonExercisedTests(nonExercisedTestsData) + if err != nil { + return fmt.Errorf("error formatting non exercised tests comment: %w", err) } if len(replayingResult.FailedTests) > 0 { - comment += fmt.Sprintf(` - - -#### Action taken -
Found %d affected test(s) by replaying old test recordings. Starting RECORDING based on the most recent commit. Click here to see the affected tests
%s
- -[Get to know how VCR tests work](https://googlecloudplatform.github.io/magic-modules/docs/getting-started/contributing/#general-contributing-steps)`, len(replayingResult.FailedTests), failedTestsPattern) + withReplayFailedTestsData := withReplayFailedTests{ + ReplayingResult: replayingResult, + } + withReplayFailedTestsComment, err := formatWithReplayFailedTests(withReplayFailedTestsData) + if err != nil { + return fmt.Errorf("error formatting action taken comment: %w", err) + } + comment := strings.Join([]string{testsAnalyticsComment, nonExercisedTestsComment, withReplayFailedTestsComment}, "\n") if err := gh.PostComment(prNumber, comment); err != nil { return fmt.Errorf("error posting comment: %w", err) } @@ -233,15 +266,10 @@ Tests were added that are GA-only additions and require manual runs: return nil } - comment = "" + var replayingAfterRecordingResult *vcr.Result + var replayingAfterRecordingErr error if len(recordingResult.PassedTests) > 0 { - comment += "$\\textcolor{green}{\\textsf{Tests passed during RECORDING mode:}}$\n" - for _, passedTest := range recordingResult.PassedTests { - comment += fmt.Sprintf("`%s`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/recording/%s.log)]\n", passedTest, prNumber, buildID, passedTest) - } - comment += "\n\n" - - replayingAfterRecordingResult, replayingAfterRecordingErr := vt.RunParallel(vcr.Replaying, provider.Beta, testDirs, recordingResult.PassedTests) + replayingAfterRecordingResult, replayingAfterRecordingErr = vt.RunParallel(vcr.Replaying, provider.Beta, testDirs, recordingResult.PassedTests) if replayingAfterRecordingErr != nil { testState = "failure" } @@ -249,61 +277,43 @@ Tests were added that are GA-only additions and require manual runs: if err := vt.UploadLogs("ci-vcr-logs", prNumber, buildID, true, true, vcr.Replaying, provider.Beta); err != nil { return fmt.Errorf("error uploading recording logs: %w", err) } - - if len(replayingAfterRecordingResult.FailedTests) > 0 { - comment += "$\\textcolor{red}{\\textsf{Tests failed when rerunning REPLAYING mode:}}$\n" - for _, failedTest := range replayingAfterRecordingResult.FailedTests { - comment += fmt.Sprintf("`%s`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/build-log/replaying_build_after_recording/%s_replaying_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/replaying_after_recording/%s.log)]\n", failedTest, prNumber, buildID, failedTest, prNumber, buildID, failedTest) - } - comment += "\n\n" - comment += `Tests failed due to non-determinism or randomness when the VCR replayed the response after the HTTP request was made. - -Please fix these to complete your PR. If you believe these test failures to be incorrect or unrelated to your change, or if you have any questions, please raise the concern with your reviewer. -` - } else { - comment += "$\\textcolor{green}{\\textsf{No issues found for passed tests after REPLAYING rerun.}}$\n" - } - comment += "\n---\n" - } - if len(recordingResult.FailedTests) > 0 { - comment += "$\\textcolor{red}{\\textsf{Tests failed during RECORDING mode:}}$\n" - for _, failedTest := range recordingResult.FailedTests { - comment += fmt.Sprintf("`%s`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/build-log/recording_build/%s_recording_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/recording/%s.log)]\n", failedTest, prNumber, buildID, failedTest, prNumber, buildID, failedTest) - } - comment += "\n\n" - if len(recordingResult.PassedTests)+len(recordingResult.FailedTests) < len(replayingResult.FailedTests) { - comment += "$\\textcolor{red}{\\textsf{Several tests got terminated during RECORDING mode.}}$\n" - } - comment += "$\\textcolor{red}{\\textsf{Please fix these to complete your PR.}}$\n" - } else { - if len(recordingResult.PassedTests)+len(recordingResult.FailedTests) < len(replayingResult.FailedTests) { - comment += "$\\textcolor{red}{\\textsf{Several tests got terminated during RECORDING mode.}}$\n" - } else if recordingErr != nil { - // Check for any uncaught errors in RECORDING mode. - comment += "$\\textcolor{red}{\\textsf{Errors occurred during RECORDING mode. Please fix them to complete your PR.}}$\n" - } else { - comment += "$\\textcolor{green}{\\textsf{All tests passed!}}$\n" - } + hasTerminatedTests := (len(recordingResult.PassedTests) + len(recordingResult.FailedTests)) < len(replayingResult.FailedTests) + allRecordingPassed := len(recordingResult.FailedTests) == 0 && !hasTerminatedTests && recordingErr == nil + + recordReplayData := recordReplay{ + RecordingResult: recordingResult, + ReplayingAfterRecordingResult: replayingAfterRecordingResult, + RecordingErr: recordingErr, + HasTerminatedTests: hasTerminatedTests, + AllRecordingPassed: allRecordingPassed, + PRNumber: prNumber, + BuildID: buildID, + } + recordReplayComment, err := formatRecordReplay(recordReplayData) + if err != nil { + return fmt.Errorf("error formatting record replay comment: %w", err) + } + if err := gh.PostComment(prNumber, recordReplayComment); err != nil { + return fmt.Errorf("error posting comment: %w", err) } - comment += fmt.Sprintf("View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/build-log/recording_test.log) or the [debug log](https://console.cloud.google.com/storage/browser/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/recording) for each test", prNumber, buildID, prNumber, buildID) - } else { - // Add newlines so that the color formatting will work properly. - comment += ` + } else { // len(replayingResult.FailedTests) == 0 + withoutReplayFailedTestsData := withoutReplayFailedTests{ + ReplayingErr: replayingErr, + PRNumber: prNumber, + BuildID: buildID, + } + withoutReplayFailedTestsComment, err := formatWithoutReplayFailedTests(withoutReplayFailedTestsData) + if err != nil { + return fmt.Errorf("error formatting action taken comment: %w", err) + } -` - if replayingErr != nil { - // Check for any uncaught errors in REPLAYING mode. - comment += "$\\textcolor{red}{\\textsf{Errors occurred during REPLAYING mode. Please fix them to complete your PR.}}$\n" - } else { - comment += "$\\textcolor{green}{\\textsf{All tests passed!}}$\n" + comment := strings.Join([]string{testsAnalyticsComment, nonExercisedTestsComment, withoutReplayFailedTestsComment}, "\n") + if err := gh.PostComment(prNumber, comment); err != nil { + return fmt.Errorf("error posting comment: %w", err) } - comment += fmt.Sprintf("View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-%s/artifacts/%s/build-log/replaying_test.log)", prNumber, buildID) - } - if err := gh.PostComment(prNumber, comment); err != nil { - return fmt.Errorf("error posting comment: %w", err) } if err := gh.PostBuildStatus(prNumber, "VCR-test", testState, buildStatusTargetURL, mmCommitSha); err != nil { @@ -379,17 +389,14 @@ func modifiedPackages(changedFiles []string) (map[string]struct{}, bool) { return services, runFullVCR } -func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) (*vcr.Result, string, []string, error) { +func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) (*vcr.Result, []string, error) { var result *vcr.Result - affectedServicesComment := "None" var testDirs []string var replayingErr error if runFullVCR { fmt.Println("run full VCR tests") - affectedServicesComment = "all service packages are affected" result, replayingErr = vt.Run(vcr.Replaying, provider.Beta, nil) } else if len(services) > 0 { - affectedServicesComment = "
    " result = &vcr.Result{} for service := range services { servicePath := "./" + filepath.Join("google-beta", "services", service) @@ -403,12 +410,10 @@ func runReplaying(runFullVCR bool, services map[string]struct{}, vt *vcr.Tester) result.SkippedTests = append(result.SkippedTests, serviceResult.SkippedTests...) result.FailedTests = append(result.FailedTests, serviceResult.FailedTests...) result.Panics = append(result.Panics, serviceResult.Panics...) - affectedServicesComment += fmt.Sprintf("
  • %s
  • ", service) } - affectedServicesComment += "
" } - return result, affectedServicesComment, testDirs, replayingErr + return result, testDirs, replayingErr } func handlePanics(prNumber, buildID, buildStatusTargetURL, mmCommitSha string, result *vcr.Result, mode vcr.Mode, gh GithubClient) (bool, error) { @@ -430,3 +435,40 @@ View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/head func init() { rootCmd.AddCommand(testTerraformVCRCmd) } + +func formatComment(fileName string, tmplText string, data any) (string, error) { + funcs := template.FuncMap{ + "join": strings.Join, + "add": func(i, j int) int { return i + j }, + } + tmpl, err := template.New(fileName).Funcs(funcs).Parse(tmplText) + if err != nil { + panic(fmt.Sprintf("Unable to parse %s: %s", fileName, err)) + } + sb := new(strings.Builder) + err = tmpl.Execute(sb, data) + if err != nil { + return "", err + } + return strings.TrimSpace(sb.String()), nil +} + +func formatTestsAnalytics(data analytics) (string, error) { + return formatComment("test_terraform_vcr_test_analytics.tmpl", testsAnalyticsTmplText, data) +} + +func formatNonExercisedTests(data nonExercisedTests) (string, error) { + return formatComment("test_terraform_vcr_recording_mode_results.tmpl", nonExercisedTestsTmplText, data) +} + +func formatWithReplayFailedTests(data withReplayFailedTests) (string, error) { + return formatComment("test_terraform_vcr_with_replay_failed_tests.tmpl", withReplayFailedTestsTmplText, data) +} + +func formatWithoutReplayFailedTests(data withoutReplayFailedTests) (string, error) { + return formatComment("test_terraform_vcr_without_replay_failed_tests.tmpl", withoutReplayFailedTestsTmplText, data) +} + +func formatRecordReplay(data recordReplay) (string, error) { + return formatComment("test_terraform_vcr_record_replay.tmpl", recordReplayTmplText, data) +} diff --git a/.ci/magician/cmd/test_terraform_vcr_non_exercised_tests.tmpl b/.ci/magician/cmd/test_terraform_vcr_non_exercised_tests.tmpl new file mode 100644 index 000000000000..b3a6a21c02c5 --- /dev/null +++ b/.ci/magician/cmd/test_terraform_vcr_non_exercised_tests.tmpl @@ -0,0 +1,13 @@ +{{- if or (gt (len .NotRunBetaTests) 0) (gt (len .NotRunGATests) 0) -}} +#### Non-exercised tests + +{{if gt (len .NotRunBetaTests) 0 -}} +Tests were added that are skipped in VCR: +{{range .NotRunBetaTests}}{{. | printf "- %s\n"}}{{end}} +{{end}} + +{{if gt (len .NotRunGATests) 0 -}} +Tests were added that are GA-only additions and require manual runs: +{{range .NotRunGATests}}{{. | printf "- %s\n"}}{{end}} +{{end}} +{{end}} diff --git a/.ci/magician/cmd/test_terraform_vcr_record_replay.tmpl b/.ci/magician/cmd/test_terraform_vcr_record_replay.tmpl new file mode 100644 index 000000000000..d5c7535798f8 --- /dev/null +++ b/.ci/magician/cmd/test_terraform_vcr_record_replay.tmpl @@ -0,0 +1,31 @@ +{{- if gt (len .RecordingResult.PassedTests) 0 -}} +$\textcolor{green}{\textsf{Tests passed during RECORDING mode:}}$ +{{range .RecordingResult.PassedTests}}`{{.}}`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{$.PRNumber}}/artifacts/{{$.BuildID}}/recording/{{.}}.log)] +{{end}} + +{{- if gt (len .ReplayingAfterRecordingResult.FailedTests ) 0 -}} +$\textcolor{red}{\textsf{Tests failed when rerunning REPLAYING mode:}}$ +{{range .ReplayingAfterRecordingResult.FailedTests}}`{{.}}`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{$.PRNumber}}/artifacts/{{$.BuildID}}/build-log/replaying_build_after_recording/{{.}}_replaying_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{$.PRNumber}}/artifacts/{{$.BuildID}}/replaying_after_recording/{{.}}.log)] +{{end}} + +Tests failed due to non-determinism or randomness when the VCR replayed the response after the HTTP request was made. + +Please fix these to complete your PR. If you believe these test failures to be incorrect or unrelated to your change, or if you have any questions, please raise the concern with your reviewer. + +{{else}} +$\textcolor{green}{\textsf{No issues found for passed tests after REPLAYING rerun.}}$ +{{end}}{{/* end of if gt (len .ReplayingAfterRecordingResult.FailedTests ) 0 */}} +--- +{{end}}{{/* end of if gt (len .RecordingResult.PassedTests) 0 */}} + +{{if gt (len .RecordingResult.FailedTests) 0 -}} +$\textcolor{red}{\textsf{Tests failed during RECORDING mode:}}$ +{{range .RecordingResult.FailedTests}}`{{.}}`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{$.PRNumber}}/artifacts/{{$.BuildID}}/build-log/recording_build/{{.}}_recording_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{$.PRNumber}}/artifacts/{{$.BuildID}}/recording/{{.}}.log)] +{{end}} +{{end}} {{- /* end of if gt (len .RecordingResult.FailedTests) 0 */ -}} + +{{if .HasTerminatedTests}}$\textcolor{red}{\textsf{Several tests got terminated during RECORDING mode.}}${{end}} +{{if .RecordingErr}}$\textcolor{red}{\textsf{Errors occurred during RECORDING mode. Please fix them to complete your PR.}}${{end}} +{{if .AllRecordingPassed}}$\textcolor{green}{\textsf{All tests passed!}}${{end}} + +View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{.PRNumber}}/artifacts/{{.BuildID}}/build-log/recording_test.log) or the [debug log](https://console.cloud.google.com/storage/browser/ci-vcr-logs/beta/refs/heads/auto-pr-{{.PRNumber}}/artifacts/{{.BuildID}}/recording) for each test diff --git a/.ci/magician/cmd/test_terraform_vcr_test.go b/.ci/magician/cmd/test_terraform_vcr_test.go index feb140fff4b0..773c28c7e2ef 100644 --- a/.ci/magician/cmd/test_terraform_vcr_test.go +++ b/.ci/magician/cmd/test_terraform_vcr_test.go @@ -1,9 +1,12 @@ package cmd import ( + "fmt" "reflect" + "strings" "testing" + "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "magician/vcr" @@ -213,3 +216,402 @@ func TestNotRunTests(t *testing.T) { }) } } + +func TestAnalyticsComment(t *testing.T) { + tests := []struct { + name string + data analytics + want string + }{ + { + name: "run full vcr is false and no affected services", + data: analytics{ + ReplayingResult: &vcr.Result{ + PassedTests: []string{"a", "b", "c"}, + SkippedTests: []string{"d", "e"}, + FailedTests: []string{"f"}, + }, + RunFullVCR: false, + AffectedServices: []string{}, + }, + want: strings.Join( + []string{ + "#### Tests analytics", + "Total tests: 7", + "Passed tests: 3", + "Skipped tests: 2", + "Affected tests: 1", + "", + "
", + "Click here to see the affected service packages", + "
", + "", + "None", + "", + "
", + "
", + }, + "\n", + ), + }, + { + name: "run full vcr is false and has affected services", + data: analytics{ + ReplayingResult: &vcr.Result{ + PassedTests: []string{"a", "b", "c"}, + SkippedTests: []string{"d", "e"}, + FailedTests: []string{"f"}, + }, + RunFullVCR: false, + AffectedServices: []string{"svc-a", "svc-b"}, + }, + want: strings.Join( + []string{ + "#### Tests analytics", + "Total tests: 7", + "Passed tests: 3", + "Skipped tests: 2", + "Affected tests: 1", + "", + "
", + "Click here to see the affected service packages", + "
", + "", + "
    ", + "
  • svc-a
  • ", + "
  • svc-b
  • ", + "", + "
", + "", + "
", + "
", + }, + "\n", + ), + }, + { + name: "run full vcr is true", + data: analytics{ + ReplayingResult: &vcr.Result{ + PassedTests: []string{"a", "b", "c"}, + SkippedTests: []string{"d", "e"}, + FailedTests: []string{"f"}, + }, + RunFullVCR: true, + AffectedServices: []string{}, + }, + want: strings.Join([]string{ + "#### Tests analytics", + "Total tests: 7", + "Passed tests: 3", + "Skipped tests: 2", + "Affected tests: 1", + "", + "
", + "Click here to see the affected service packages", + "
", + "", + "All service packages are affected", + "", + "
", + "
", + }, + "\n", + ), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := formatTestsAnalytics(tc.data) + if err != nil { + t.Fatalf("Failed to format comment: %v", err) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("formatTestsAnalytics() returned unexpected difference (-want +got):\n%s", diff) + } + }) + } +} + +func TestNonExercisedTestsComment(t *testing.T) { + tests := []struct { + name string + data nonExercisedTests + want string + }{ + { + name: "without non exercised tests", + data: nonExercisedTests{}, + want: strings.Join( + []string{}, + "\n", + ), + }, + { + name: "with not run beta tests", + data: nonExercisedTests{ + NotRunBetaTests: []string{"beta-1", "beta-2"}, + }, + want: strings.Join( + []string{ + "#### Non-exercised tests", + "", + "Tests were added that are skipped in VCR:", + "- beta-1", + "- beta-2", + }, + "\n", + ), + }, + { + name: "with not run ga tests", + data: nonExercisedTests{ + NotRunGATests: []string{"ga-1", "ga-2"}, + }, + want: strings.Join( + []string{ + "#### Non-exercised tests", + "", + "", + "", + "Tests were added that are GA-only additions and require manual runs:", + "- ga-1", + "- ga-2", + }, + "\n", + ), + }, + { + name: "with not run ga tests and not run beta tests", + data: nonExercisedTests{ + NotRunGATests: []string{"ga-1", "ga-2"}, + NotRunBetaTests: []string{"beta-1", "beta-2"}, + }, + want: strings.Join( + []string{ + "#### Non-exercised tests", + "", + "Tests were added that are skipped in VCR:", + "- beta-1", + "- beta-2", + "", + "", + "", + "Tests were added that are GA-only additions and require manual runs:", + "- ga-1", + "- ga-2", + }, + "\n", + ), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := formatNonExercisedTests(tc.data) + if err != nil { + t.Fatalf("Failed to format comment: %v", err) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("formatNonExercisedTests() returned unexpected difference (-want +got):\n%s", diff) + } + }) + } +} + +func TestWithReplayFailedTests(t *testing.T) { + tests := []struct { + name string + data withReplayFailedTests + want string + }{ + { + name: "with failed tests", + data: withReplayFailedTests{ + ReplayingResult: &vcr.Result{ + FailedTests: []string{"a", "b"}, + }, + }, + want: strings.Join( + []string{ + "#### Action taken", + "
", + "Found 2 affected test(s) by replaying old test recordings. Starting RECORDING based on the most recent commit. Click here to see the affected tests", + "", + "
", + "
    ", + "
  • a
  • ", + "
  • b
  • ", + "", // Empty line + "
", + "
", + "
", + "", + "[Get to know how VCR tests work](https://googlecloudplatform.github.io/magic-modules/docs/getting-started/contributing/#general-contributing-steps)", + }, + "\n", + ), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := formatWithReplayFailedTests(tc.data) + if err != nil { + t.Fatalf("Failed to format comment: %v", err) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("formatWithReplayFailedTests() returned unexpected difference (-want +got):\n%s", diff) + } + }) + } +} + +func TestWithoutReplayFailedTests(t *testing.T) { + tests := []struct { + name string + data withoutReplayFailedTests + want string + }{ + { + name: "with replay error", + data: withoutReplayFailedTests{ + ReplayingErr: fmt.Errorf("some error"), + BuildID: "build-123", + PRNumber: "pr-123", + }, + want: strings.Join( + []string{ + "$\\textcolor{red}{\\textsf{Errors occurred during REPLAYING mode. Please fix them to complete your PR.}}$", + "", + "View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/replaying_test.log)", + }, + "\n", + ), + }, + { + name: "without replay error", + data: withoutReplayFailedTests{ + BuildID: "build-123", + PRNumber: "pr-123", + }, + want: strings.Join( + []string{ + "$\\textcolor{green}{\\textsf{All tests passed!}}$", + "", + "View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/replaying_test.log)", + }, + "\n", + ), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := formatWithoutReplayFailedTests(tc.data) + if err != nil { + t.Fatalf("Failed to format comment: %v", err) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("formatWithoutReplayFailedTests() returned unexpected difference (-want +got):\n%s", diff) + } + }) + } +} + +func TestRecordReplay(t *testing.T) { + tests := []struct { + name string + data recordReplay + want string + }{ + { + name: "ReplayingAfterRecordingResult has failed tests", + data: recordReplay{ + RecordingResult: &vcr.Result{ + PassedTests: []string{"a", "b", "c"}, + FailedTests: []string{"d", "e"}, + }, + ReplayingAfterRecordingResult: &vcr.Result{ + PassedTests: []string{"a"}, + FailedTests: []string{"b", "c"}, + }, + HasTerminatedTests: true, + RecordingErr: fmt.Errorf("some error"), + BuildID: "build-123", + PRNumber: "pr-123", + }, + want: strings.Join( + []string{ + "$\\textcolor{green}{\\textsf{Tests passed during RECORDING mode:}}$", "`a`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/a.log)]", + "`b`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/b.log)]", + "`c`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/c.log)]", + "$\\textcolor{red}{\\textsf{Tests failed when rerunning REPLAYING mode:}}$", + "`b`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/replaying_build_after_recording/b_replaying_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/replaying_after_recording/b.log)]", + "`c`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/replaying_build_after_recording/c_replaying_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/replaying_after_recording/c.log)]", + "", + "", + "Tests failed due to non-determinism or randomness when the VCR replayed the response after the HTTP request was made.", + "", + "Please fix these to complete your PR. If you believe these test failures to be incorrect or unrelated to your change, or if you have any questions, please raise the concern with your reviewer.", + "", + "", + "---", + "", + "", + "$\\textcolor{red}{\\textsf{Tests failed during RECORDING mode:}}$", + "`d`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/recording_build/d_recording_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/d.log)]", + "`e`[[Error message](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/recording_build/e_recording_test.log)] [[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/e.log)]", + "", + "$\\textcolor{red}{\\textsf{Several tests got terminated during RECORDING mode.}}$", + "$\\textcolor{red}{\\textsf{Errors occurred during RECORDING mode. Please fix them to complete your PR.}}$", + "", + "", + "View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/recording_test.log) or the [debug log](https://console.cloud.google.com/storage/browser/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording) for each test", + }, + "\n", + ), + }, + { + name: "ReplayingAfterRecordingResult does not have failed tests", + data: recordReplay{ + RecordingResult: &vcr.Result{ + PassedTests: []string{"a", "b", "c"}, + }, + ReplayingAfterRecordingResult: &vcr.Result{ + PassedTests: []string{"a", "b", "c"}, + }, + AllRecordingPassed: true, + BuildID: "build-123", + PRNumber: "pr-123", + }, + want: strings.Join( + []string{ + "$\\textcolor{green}{\\textsf{Tests passed during RECORDING mode:}}$", "`a`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/a.log)]", + "`b`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/b.log)]", + "`c`[[Debug log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording/c.log)]", + "", + "$\\textcolor{green}{\\textsf{No issues found for passed tests after REPLAYING rerun.}}$", + "", + "---", + "", + "", + "", + "", + "$\\textcolor{green}{\\textsf{All tests passed!}}$", + "", + "View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/build-log/recording_test.log) or the [debug log](https://console.cloud.google.com/storage/browser/ci-vcr-logs/beta/refs/heads/auto-pr-pr-123/artifacts/build-123/recording) for each test", + }, + "\n", + ), + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got, err := formatRecordReplay(tc.data) + if err != nil { + t.Fatalf("Failed to format comment: %v", err) + } + if diff := cmp.Diff(tc.want, got); diff != "" { + t.Errorf("formatRecordReplay() returned unexpected difference (-want +got):\n%s", diff) + } + }) + } +} diff --git a/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl b/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl new file mode 100644 index 000000000000..391c07d05213 --- /dev/null +++ b/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl @@ -0,0 +1,20 @@ +#### Tests analytics +Total tests: {{add (add (len .ReplayingResult.PassedTests) (len .ReplayingResult.PassedTests)) (len .ReplayingResult.FailedTests) }} +Passed tests: {{len .ReplayingResult.PassedTests}} +Skipped tests: {{len .ReplayingResult.SkippedTests}} +Affected tests: {{len .ReplayingResult.FailedTests}} + +
+Click here to see the affected service packages +
+{{if .RunFullVCR}} +All service packages are affected +{{else if gt (len .AffectedServices) 0}} +
    +{{range .AffectedServices}}{{. | printf "
  • %s
  • \n"}}{{end}} +
+{{else}} +None +{{end}} +
+
diff --git a/.ci/magician/cmd/test_terraform_vcr_with_replay_failed_tests.tmpl b/.ci/magician/cmd/test_terraform_vcr_with_replay_failed_tests.tmpl new file mode 100644 index 000000000000..68c804f67584 --- /dev/null +++ b/.ci/magician/cmd/test_terraform_vcr_with_replay_failed_tests.tmpl @@ -0,0 +1,12 @@ +#### Action taken +
+Found {{len .ReplayingResult.FailedTests}} affected test(s) by replaying old test recordings. Starting RECORDING based on the most recent commit. Click here to see the affected tests + +
+
    +{{range .ReplayingResult.FailedTests}}{{. | printf "
  • %s
  • \n"}}{{end}} +
+
+
+ +[Get to know how VCR tests work](https://googlecloudplatform.github.io/magic-modules/docs/getting-started/contributing/#general-contributing-steps) diff --git a/.ci/magician/cmd/test_terraform_vcr_without_replay_failed_tests.tmpl b/.ci/magician/cmd/test_terraform_vcr_without_replay_failed_tests.tmpl new file mode 100644 index 000000000000..9c342f06f610 --- /dev/null +++ b/.ci/magician/cmd/test_terraform_vcr_without_replay_failed_tests.tmpl @@ -0,0 +1,7 @@ +{{- if .ReplayingErr -}} +$\textcolor{red}{\textsf{Errors occurred during REPLAYING mode. Please fix them to complete your PR.}}$ +{{- else -}} +$\textcolor{green}{\textsf{All tests passed!}}$ +{{- end}} + +View the [build log](https://storage.cloud.google.com/ci-vcr-logs/beta/refs/heads/auto-pr-{{.PRNumber}}/artifacts/{{.BuildID}}/build-log/replaying_test.log) From 711ab403e2bac1d4ef6c5982b52b01dcff343b14 Mon Sep 17 00:00:00 2001 From: Lingkai Shen Date: Fri, 21 Jun 2024 19:57:26 -0400 Subject: [PATCH 191/356] Make firebase_hosting_site upsert on creation (#10986) --- mmv1/products/firebasehosting/Site.yaml | 2 + .../pre_create/firebasehosting_site.go.erb | 27 +++++++++++ ...resource_firebase_hosting_site_test.go.erb | 46 +++++++++++++++++++ 3 files changed, 75 insertions(+) create mode 100644 mmv1/templates/terraform/pre_create/firebasehosting_site.go.erb diff --git a/mmv1/products/firebasehosting/Site.yaml b/mmv1/products/firebasehosting/Site.yaml index 1fc419d686b1..f20104f3b333 100644 --- a/mmv1/products/firebasehosting/Site.yaml +++ b/mmv1/products/firebasehosting/Site.yaml @@ -24,6 +24,8 @@ references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Official Documentation': 'https://firebase.google.com/docs/hosting' api: 'https://firebase.google.com/docs/reference/hosting/rest/v1beta1/projects.sites' +custom_code: !ruby/object:Provider::Terraform::CustomCode + pre_create: templates/terraform/pre_create/firebasehosting_site.go.erb import_format: ['projects/{{project}}/sites/{{site_id}}', 'sites/{{site_id}}', '{{site_id}}'] examples: diff --git a/mmv1/templates/terraform/pre_create/firebasehosting_site.go.erb b/mmv1/templates/terraform/pre_create/firebasehosting_site.go.erb new file mode 100644 index 000000000000..3989189d0049 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/firebasehosting_site.go.erb @@ -0,0 +1,27 @@ + +// Check if the Firebase hostng site already exits. Do an update if so. + +getUrl, err := tpgresource.ReplaceVars(d, config, "{{FirebaseHostingBasePath}}projects/{{project}}/sites/{{site_id}}") +if err != nil { + return err +} +_, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + Headers: headers, +}) + +if err == nil { + // Hosting site already exists + log.Printf("[DEBUG] Firebase hosting site already exists %s", d.Get("site_id")) + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/sites/{{site_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceFirebaseHostingSiteUpdate(d, meta) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/firebasehosting/resource_firebase_hosting_site_test.go.erb b/mmv1/third_party/terraform/services/firebasehosting/resource_firebase_hosting_site_test.go.erb index c12e002acaa2..57548470be69 100644 --- a/mmv1/third_party/terraform/services/firebasehosting/resource_firebase_hosting_site_test.go.erb +++ b/mmv1/third_party/terraform/services/firebasehosting/resource_firebase_hosting_site_test.go.erb @@ -46,6 +46,34 @@ func TestAccFirebaseHostingSite_firebasehostingSiteUpdate(t *testing.T) { }) } +func TestAccFirebaseHostingSite_firebasehostingSiteUpsert(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "site_id": "tf-test-site-upsert", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseHostingSiteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseHostingSite_firebasehostingSiteUpsert(context), + }, + { + ResourceName: "google_firebase_hosting_site.create2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_id"}, + }, + }, + }) +} + + func testAccFirebaseHostingSite_firebasehostingSiteBeforeUpdate(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_firebase_web_app" "before" { @@ -80,4 +108,22 @@ resource "google_firebase_hosting_site" "update" { `, context) } +func testAccFirebaseHostingSite_firebasehostingSiteUpsert(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "create" { + provider = google-beta + project = "%{project_id}" + site_id = "%{site_id}%{random_suffix}" +} + +resource "google_firebase_hosting_site" "create2" { + provider = google-beta + project = "%{project_id}" + site_id = "%{site_id}%{random_suffix}" + + depends_on = [google_firebase_hosting_site.create] +} +`, context) +} + <% end -%> \ No newline at end of file From fb2c2dc57b1b378c5b0567498de681b58043dcd5 Mon Sep 17 00:00:00 2001 From: Charles Leon Date: Fri, 21 Jun 2024 17:03:37 -0700 Subject: [PATCH 192/356] fix google_access_context_manager_service_perimeters permadiff (#10965) Co-authored-by: Charlesleonius --- .../ServicePerimeters.yaml | 1 + ...er_serviceperimeters_custom_flatten.go.erb | 811 ++++++++++++++++++ ...ontext_manager_services_perimeters_test.go | 21 +- 3 files changed, 824 insertions(+), 9 deletions(-) create mode 100644 mmv1/templates/terraform/custom_flatten/accesscontextmanager_serviceperimeters_custom_flatten.go.erb diff --git a/mmv1/products/accesscontextmanager/ServicePerimeters.yaml b/mmv1/products/accesscontextmanager/ServicePerimeters.yaml index 2b941289d41a..98896714991d 100644 --- a/mmv1/products/accesscontextmanager/ServicePerimeters.yaml +++ b/mmv1/products/accesscontextmanager/ServicePerimeters.yaml @@ -57,6 +57,7 @@ properties: name: 'servicePerimeters' description: | The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy. + custom_flatten: 'templates/terraform/custom_flatten/accesscontextmanager_serviceperimeters_custom_flatten.go.erb' item_type: !ruby/object:Api::Type::NestedObject properties: - !ruby/object:Api::Type::String diff --git a/mmv1/templates/terraform/custom_flatten/accesscontextmanager_serviceperimeters_custom_flatten.go.erb b/mmv1/templates/terraform/custom_flatten/accesscontextmanager_serviceperimeters_custom_flatten.go.erb new file mode 100644 index 000000000000..acd645f8d830 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/accesscontextmanager_serviceperimeters_custom_flatten.go.erb @@ -0,0 +1,811 @@ +func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + apiData := make([]map[string]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + continue + } + apiData = append(apiData, map[string]interface{}{ + "name": flattenAccessContextManagerServicePerimetersServicePerimetersName(original["name"], d, config), + "title": flattenAccessContextManagerServicePerimetersServicePerimetersTitle(original["title"], d, config), + "description": flattenAccessContextManagerServicePerimetersServicePerimetersDescription(original["description"], d, config), + "create_time": flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(original["createTime"], d, config), + "update_time": flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(original["updateTime"], d, config), + "perimeter_type": flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(original["perimeterType"], d, config), + "status": flattenAccessContextManagerServicePerimetersServicePerimetersStatus(original["status"], d, config), + "spec": flattenAccessContextManagerServicePerimetersServicePerimetersSpec(original["spec"], d, config), + "use_explicit_dry_run_spec": flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(original["useExplicitDryRunSpec"], d, config), + }) + } + configData := []map[string]interface{}{} + for _, item := range d.Get("service_perimeters").([]interface{}) { + configData = append(configData, item.(map[string]interface{})) + } + sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "name") + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sorted +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "PERIMETER_TYPE_REGULAR" + } + + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(original["resources"], d, config) + transformed["access_levels"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(original["accessLevels"], d, config) + transformed["restricted_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(original["restrictedServices"], d, config) + transformed["vpc_accessible_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(original["vpcAccessibleServices"], d, config) + transformed["ingress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(original["ingressPolicies"], d, config) + transformed["egress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(original["egressPolicies"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + transformed["allowed_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(accesscontextmanagerServicePerimetersServicePerimetersServicePerimetersStatusIngressPoliciesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "ingress_from": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(original["ingressFrom"], d, config), + "ingress_to": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(original["ingressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(original["sources"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), + "resource": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(original["resources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "egress_from": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(original["egressFrom"], d, config), + "egress_to": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(original["egressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSources(original["sources"], d, config) + transformed["source_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourceRestriction(original["sourceRestriction"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourcesAccessLevel(original["accessLevel"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourceRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(original["resources"], d, config) + transformed["external_resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(original["externalResources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(original["resources"], d, config) + transformed["access_levels"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(original["accessLevels"], d, config) + transformed["restricted_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(original["restrictedServices"], d, config) + transformed["vpc_accessible_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(original["vpcAccessibleServices"], d, config) + transformed["ingress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(original["ingressPolicies"], d, config) + transformed["egress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(original["egressPolicies"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + transformed["allowed_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ingress_from": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(original["ingressFrom"], d, config), + "ingress_to": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(original["ingressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(original["sources"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), + "resource": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(original["resources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "egress_from": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(original["egressFrom"], d, config), + "egress_to": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(original["egressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSources(original["sources"], d, config) + transformed["source_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourceRestriction(original["sourceRestriction"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourcesAccessLevel(original["accessLevel"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourceRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(original["resources"], d, config) + transformed["external_resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(original["externalResources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go b/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go index 01ef093c0d13..2b451a7b3a0e 100644 --- a/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go +++ b/mmv1/third_party/terraform/services/accesscontextmanager/resource_access_context_manager_services_perimeters_test.go @@ -26,25 +26,28 @@ func testAccAccessContextManagerServicePerimeters_basicTest(t *testing.T) { Config: testAccAccessContextManagerServicePerimeters_basic(org, "my policy", "level", "storage_perimeter", "bigtable_perimeter", "bigquery_omni_perimeter"), }, { - ResourceName: "google_access_context_manager_service_perimeters.test-access", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_access_context_manager_service_perimeters.test-access", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_perimeters"}, }, { Config: testAccAccessContextManagerServicePerimeters_update(org, "my policy", "level", "storage_perimeter", "bigquery_perimeter", "bigtable_perimeter", "bigquery_omni_perimeter"), }, { - ResourceName: "google_access_context_manager_service_perimeters.test-access", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_access_context_manager_service_perimeters.test-access", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_perimeters"}, }, { Config: testAccAccessContextManagerServicePerimeters_empty(org, "my policy", "level"), }, { - ResourceName: "google_access_context_manager_service_perimeters.test-access", - ImportState: true, - ImportStateVerify: true, + ResourceName: "google_access_context_manager_service_perimeters.test-access", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"service_perimeters"}, }, }, }) From a00db19f538bab3f8f8d42b35d033a7703e19e18 Mon Sep 17 00:00:00 2001 From: rafalmaczewski Date: Sat, 22 Jun 2024 02:08:18 +0200 Subject: [PATCH 193/356] fix: google_compute_instance with user-managed service account and empty scopes results in no service account assignment (#10358) --- .../compute/resource_compute_instance.go.erb | 47 +++++- .../resource_compute_instance_test.go.erb | 143 ++++++++++++++++++ 2 files changed, 189 insertions(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 220f8ddf3099..f292b032a776 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -2475,7 +2475,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("service_account.0.email") || scopesChange { sa := d.Get("service_account").([]interface{}) req := &compute.InstancesSetServiceAccountRequest{ForceSendFields: []string{"email"}} - if len(sa) > 0 && sa[0] != nil { + if !isEmptyServiceAccountBlock(d) && len(sa) > 0 && sa[0] != nil { saMap := sa[0].(map[string]interface{}) req.Email = saMap["email"].(string) req.Scopes = tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(saMap["scopes"].(*schema.Set))) @@ -3094,6 +3094,11 @@ func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool // suppress changes between { } and {scopes:[]} if l[0] != nil { contents := l[0].(map[string]interface{}) + email := contents["email"] + if email != "" { + // if email is non empty, don't suppress the diff + return false + } if scopes, ok := contents["scopes"]; ok { a := scopes.(*schema.Set).List() if a != nil && len(a) > 0 { @@ -3103,3 +3108,43 @@ func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool } return true } + +// isEmptyServiceAccountBlock is used to work around an issue when updating +// service accounts. Creating the instance with some scopes but without +// specifying a service account email, assigns default compute service account +// to the instance: +// +// service_account { +// scopes = ["some-scope"] +// } +// +// Then when updating the instance with empty service account: +// +// service_account { +// scopes = [] +// } +// +// the default Terraform behavior is to clear scopes without clearing the +// email. The email was previously computed to be the default service account +// and has not been modified, so the default plan is to leave it unchanged. +// However, when creating a new instance: +// +// service_account { +// scopes = [] +// } +// +// indicates an instance without any service account set. +// isEmptyServiceAccountBlock is used to detect empty service_account block +// and if it is, it is interpreted as no service account and no scopes. +// +func isEmptyServiceAccountBlock(d *schema.ResourceData) bool { + serviceAccountsConfig := d.GetRawConfig().GetAttr("service_account") + if serviceAccountsConfig.IsNull() || len(serviceAccountsConfig.AsValueSlice()) == 0 { + return true + } + serviceAccount := serviceAccountsConfig.AsValueSlice()[0] + if serviceAccount.GetAttr("email").IsNull() && len(serviceAccount.GetAttr("scopes").AsValueSlice()) == 0 { + return true + } + return false +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index e2eb240b706a..dde0a9397a76 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -1110,6 +1110,54 @@ func TestAccComputeInstance_serviceAccount(t *testing.T) { }) } +func TestAccComputeInstance_noServiceAccount(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_noServiceAccount(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_serviceAccountEmail_0scopes(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_serviceAccountEmail_0scopes(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { t.Parallel() @@ -1126,6 +1174,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1135,6 +1184,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1144,6 +1194,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1153,6 +1204,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 3), ), }, @@ -1177,6 +1229,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1186,6 +1239,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 1), ), }, @@ -1195,6 +1249,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -4084,6 +4139,30 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope } } +func testAccCheckComputeInstanceNoServiceAccount(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 0 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 0, got %d", count) + } + return nil + } +} + +func testAccCheckComputeInstanceMatchServiceAccount(instance *compute.Instance, serviceAcctRegexp string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + email := instance.ServiceAccounts[0].Email + if !regexp.MustCompile(serviceAcctRegexp).MatchString(email) { + return fmt.Errorf("ServiceAccount email didn't match:\"%s\", got \"%s\"", serviceAcctRegexp, email) + } + + return nil + } +} + func testAccCheckComputeInstanceScopes(instance *compute.Instance, scopeCount int) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -6116,6 +6195,70 @@ resource "google_compute_instance" "foobar" { `, instance) } +func testAccComputeInstance_noServiceAccount(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = [] + } +} +`, instance) +} + +func testAccComputeInstance_serviceAccountEmail_0scopes(instance string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + email = data.google_compute_default_service_account.default.email + scopes = [] + } +} + +data "google_compute_default_service_account" "default" { +} +`, instance) +} + func testAccComputeInstance_serviceAccount_update0(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { From b1a4a0722e54c1fdaac36f1f00ca4b1c6e328c0f Mon Sep 17 00:00:00 2001 From: Shingo Furuyama Date: Mon, 24 Jun 2024 23:23:43 +0900 Subject: [PATCH 194/356] datafusion: Add network config fields for Private Service Connect (#10969) --- mmv1/products/datafusion/Instance.yaml | 53 ++++++++++++++++++- .../examples/data_fusion_instance_psc.tf.erb | 39 ++++++++++++++ 2 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 mmv1/templates/terraform/examples/data_fusion_instance_psc.tf.erb diff --git a/mmv1/products/datafusion/Instance.yaml b/mmv1/products/datafusion/Instance.yaml index ccd0efd1c762..e8e0d553ab73 100644 --- a/mmv1/products/datafusion/Instance.yaml +++ b/mmv1/products/datafusion/Instance.yaml @@ -59,6 +59,18 @@ examples: test_vars_overrides: # Mark for testing to avoid service networking connection usage that is not cleaned up prober_test_run: '`options = { prober_test_run = "true" }`' + - !ruby/object:Provider::Terraform::Examples + name: 'data_fusion_instance_psc' + primary_resource_id: 'psc_instance' + vars: + instance_name: 'psc-instance' + network_name: 'datafusion-psc-network' + subnet_name: 'datafusion-psc-subnet' + attachment_name: 'datafusion-psc-attachment' + prober_test_run: '' + test_vars_overrides: + # Mark for testing to avoid service networking connection usage that is not cleaned up + prober_test_run: '`options = { prober_test_run = "true" }`' - !ruby/object:Provider::Terraform::Examples name: 'data_fusion_instance_cmek' primary_resource_id: 'cmek' @@ -237,7 +249,6 @@ properties: description: | The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. - required: true immutable: true - !ruby/object:Api::Type::String name: 'network' @@ -245,8 +256,46 @@ properties: Name of the network in the project with which the tenant project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network} - required: true immutable: true + - !ruby/object:Api::Type::Enum + name: 'connectionType' + description: | + Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and + the corresponding tenant project from a predefined list of available connection modes. + If this field is unspecified for a private instance, VPC peering is used. + values: + - :VPC_PEERING + - :PRIVATE_SERVICE_CONNECT_INTERFACES + immutable: true + - !ruby/object:Api::Type::NestedObject + name: 'privateServiceConnectConfig' + description: | + Optional. Configuration for Private Service Connect. + This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. + immutable: true + properties: + - !ruby/object:Api::Type::String + name: 'networkAttachment' + description: | + Optional. The reference to the network attachment used to establish private connectivity. + It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. + This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. + immutable: true + - !ruby/object:Api::Type::String + name: 'unreachableCidrBlock' + description: | + Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. + The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. + This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. + If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. + ignore_read: true + immutable: true + - !ruby/object:Api::Type::String + name: 'effectiveUnreachableCidrBlock' + description: | + Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. + The size of this block is /25. The format of this field is governed by RFC 4632. + output: true - !ruby/object:Api::Type::String name: 'zone' description: | diff --git a/mmv1/templates/terraform/examples/data_fusion_instance_psc.tf.erb b/mmv1/templates/terraform/examples/data_fusion_instance_psc.tf.erb new file mode 100644 index 000000000000..16e25435dfe3 --- /dev/null +++ b/mmv1/templates/terraform/examples/data_fusion_instance_psc.tf.erb @@ -0,0 +1,39 @@ +resource "google_data_fusion_instance" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]["instance_name"] %>" + region = "us-central1" + type = "BASIC" + private_instance = true + + network_config { + connection_type = "PRIVATE_SERVICE_CONNECT_INTERFACES" + private_service_connect_config { + network_attachment = google_compute_network_attachment.psc.id + unreachable_cidr_block = "192.168.0.0/25" + } + } + + <%= ctx[:vars]['prober_test_run'] %> +} + +resource "google_compute_network" "psc" { + name = "<%= ctx[:vars]["network_name"] %>" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc" { + name = "<%= ctx[:vars]["subnet_name"] %>" + region = "us-central1" + + network = google_compute_network.psc.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_network_attachment" "psc" { + name = "<%= ctx[:vars]["attachment_name"] %>" + region = "us-central1" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.psc.self_link + ] +} From 62869102395e9659ae75cbfdd9ee3879d5e761b5 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 24 Jun 2024 10:56:32 -0500 Subject: [PATCH 195/356] Add 6.0.0 feature branch instructions back in (#11022) --- .../make-a-breaking-change.md | 37 ++++++++++++++++--- 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/docs/content/develop/breaking-changes/make-a-breaking-change.md b/docs/content/develop/breaking-changes/make-a-breaking-change.md index 7fe9526760fd..a7ead0be56bf 100644 --- a/docs/content/develop/breaking-changes/make-a-breaking-change.md +++ b/docs/content/develop/breaking-changes/make-a-breaking-change.md @@ -63,7 +63,7 @@ The general process for contributing a breaking change to the 1. Make the `main` branch forwards-compatible with the major release 2. Add deprecations and warnings to the `main` branch of `magic-modules` 3. Add upgrade guide entries to the `main` branch of `magic-modules` -4. Make the breaking change on ~~`FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}`~~ `main` temporarily +4. Make the breaking change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` These are covered in more detail in the following sections. The upgrade guide and the actual breaking change will be merged only after both are completed. @@ -184,11 +184,36 @@ The upgrade guide and the actual breaking change will be merged only after both ### Make the breaking change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` -> [!CAUTION] -> `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` is not yet ready. If you want to make your -> breaking change ahead of time (possibly for early review), please submit a PR on `main` with the title prefix "6.0.0 - ". -> Ensure that a Github Issue is created as per all PR's, and our team will manually switch your PR over to -> `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} when it is ready. +When working on your breaking change, make sure that your base branch +is `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}`. This +means that you will follow the standard +[contribution process]({{< ref "/get-started/contribution-process" >}}) +with the following changes: + +1. Before you start, check out and sync your local `magic-modules` and provider + repositories with the upstream major release branches. + ```bash + cd ~/magic-modules + git checkout FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} + git pull --ff-only origin FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} + cd $GOPATH/src/github.com/hashicorp/terraform-provider-google + git checkout FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} + git pull --ff-only origin FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} + cd $GOPATH/src/github.com/hashicorp/terraform-provider-google-beta + git checkout FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} + git pull --ff-only origin FEATURE-BRANCH-major-release-{{% param "majorVersion" %}} + ``` +1. Make sure that any deprecation notices and warnings that you added in previous sections + are present on the major release branch. Changes to the `main` branch will be + merged into the major release branch every Monday. +1. Make the breaking change. +1. Remove any deprecation notices and warnings (including in documentation) not already removed by the breaking change. +1. When you create your pull request, + [change the base branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-base-branch-of-a-pull-request) + to `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` +1. To resolve merge conflicts with `git rebase` or `git merge`, use `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` instead of `main`. + +The upgrade guide and the actual breaking change will be merged only after both are completed. ## What's next? From def52fed28327b8f3e14c1894aeb21cba08da598 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 24 Jun 2024 11:14:17 -0500 Subject: [PATCH 196/356] go rewrite - description-copy update and overall refresh (#11020) --- GNUmakefile | 2 +- mmv1/description-copy.go | 31 +- mmv1/main.go | 4 +- mmv1/products/compute/Address.yaml | 1 - mmv1/products/compute/BackendService.yaml | 2 - mmv1/products/compute/ForwardingRule.yaml | 3 - .../compute/GlobalForwardingRule.yaml | 3 - mmv1/products/compute/HttpHealthCheck.yaml | 1 - mmv1/products/compute/HttpsHealthCheck.yaml | 1 - .../compute/ManagedSslCertificate.yaml | 1 - .../compute/RegionBackendService.yaml | 2 - .../compute/RegionSslCertificate.yaml | 1 - mmv1/products/compute/SslCertificate.yaml | 1 - mmv1/products/compute/go_Address.yaml | 1 - mmv1/products/compute/go_BackendService.yaml | 2 - mmv1/products/compute/go_Firewall.yaml | 6 +- mmv1/products/compute/go_ForwardingRule.yaml | 3 - .../compute/go_GlobalForwardingRule.yaml | 3 - mmv1/products/compute/go_HttpHealthCheck.yaml | 1 - .../products/compute/go_HttpsHealthCheck.yaml | 1 - .../compute/go_ManagedSslCertificate.yaml | 25 +- .../compute/go_RegionBackendService.yaml | 2 - .../compute/go_RegionSslCertificate.yaml | 7 +- mmv1/products/compute/go_Route.yaml | 10 +- mmv1/products/compute/go_RouterNat.yaml | 14 +- .../compute/go_ServiceAttachment.yaml | 2 +- mmv1/products/compute/go_SslCertificate.yaml | 7 +- .../products/compute/go_TargetHttpsProxy.yaml | 12 + mmv1/products/compute/go_UrlMap.yaml | 178 ++ mmv1/products/compute/go_VpnGateway.yaml | 6 +- mmv1/products/pubsub/go_Subscription.yaml | 37 +- ...r_serviceperimeters_custom_flatten.go.tmpl | 811 +++++++ ...l_map_custom_error_response_policy.tf.tmpl | 86 + ..._featureonlinestore_with_optimized.tf.tmpl | 21 + .../go/firebasehosting_site.go.tmpl | 27 + mmv1/templates/terraform/yaml_conversion.erb | 8 +- mmv1/third_party/terraform/go/go.mod | 2 +- .../go/resource_compute_instance.go.tmpl | 47 +- .../go/resource_compute_instance_test.go.tmpl | 143 ++ ...ce_compute_target_https_proxy_test.go.tmpl | 1 + .../go/resource_compute_url_map_test.go.tmpl | 1912 +++++++++++++++++ ...esource_firebase_hosting_site_test.go.tmpl | 46 + 42 files changed, 3379 insertions(+), 95 deletions(-) create mode 100644 mmv1/templates/terraform/custom_flatten/go/accesscontextmanager_serviceperimeters_custom_flatten.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/url_map_custom_error_response_policy.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_with_optimized.tf.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/firebasehosting_site.go.tmpl create mode 100644 mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go.tmpl diff --git a/GNUmakefile b/GNUmakefile index e4e63fe494a8..a462c011415e 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -137,5 +137,5 @@ doctor: refresh-go: cd mmv1;\ bundle exec compiler.rb -e terraform -o $(OUTPUT_PATH) -v $(VERSION) $(mmv1_compile) --go-yaml; \ - go run . --yaml --template + go run . --yaml --template; \ go run . --yaml --handwritten \ No newline at end of file diff --git a/mmv1/description-copy.go b/mmv1/description-copy.go index 6e5a97657eba..0294174e95c8 100644 --- a/mmv1/description-copy.go +++ b/mmv1/description-copy.go @@ -10,8 +10,24 @@ import ( "strings" ) +func CopyAllDescriptions() { + identifiers := []string{ + "description:", + "note:", + "set_hash_func:", + "warning:", + "required_properties:", + "optional_properties:", + "attributes:", + } + + for i, id := range identifiers { + CopyText(id, len(identifiers)-1 == i) + } +} + // Used to copy/paste text from Ruby -> Go YAML files -func CopyText(identifier string) { +func CopyText(identifier string, last bool) { var allProductFiles []string = make([]string, 0) files, err := filepath.Glob("products/**/go_product.yaml") if err != nil { @@ -92,16 +108,11 @@ func CopyText(identifier string) { for scanner.Scan() { line := scanner.Text() if firstLine { - if line == "NOT CONVERTED - RUN YAML MODE" { - firstLine = false - w.WriteString(fmt.Sprintf("%s\n", "NOT CONVERTED #2 - RUN YAML MODE")) - continue - } else if line == "NOT CONVERTED #2 - RUN YAML MODE" { - firstLine = false - w.WriteString(fmt.Sprintf("%s\n", "NOT CONVERTED #3 - RUN YAML MODE")) - continue - } else if line == "NOT CONVERTED #3 - RUN YAML MODE" { + if strings.Contains(line, "NOT CONVERTED - RUN YAML MODE") { firstLine = false + if !last { + w.WriteString(fmt.Sprintf("NOT CONVERTED - RUN YAML MODE\n")) + } continue } else { break diff --git a/mmv1/main.go b/mmv1/main.go index 7235b1324b9c..851f05737390 100644 --- a/mmv1/main.go +++ b/mmv1/main.go @@ -41,9 +41,7 @@ func main() { flag.Parse() if *yamlMode { - CopyText("description:") - CopyText("note:") - CopyText("set_hash_func:") + CopyAllDescriptions() } if *templateMode { diff --git a/mmv1/products/compute/Address.yaml b/mmv1/products/compute/Address.yaml index e137f2286548..de0f0398c124 100644 --- a/mmv1/products/compute/Address.yaml +++ b/mmv1/products/compute/Address.yaml @@ -165,7 +165,6 @@ properties: configure Private Service Connect. Only global internal addresses can use this purpose. - This should only be set when using an Internal address. default_from_api: true - !ruby/object:Api::Type::Enum diff --git a/mmv1/products/compute/BackendService.yaml b/mmv1/products/compute/BackendService.yaml index 150fa6c33361..5a7539e956ad 100644 --- a/mmv1/products/compute/BackendService.yaml +++ b/mmv1/products/compute/BackendService.yaml @@ -834,7 +834,6 @@ properties: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, @@ -844,7 +843,6 @@ properties: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. diff --git a/mmv1/products/compute/ForwardingRule.yaml b/mmv1/products/compute/ForwardingRule.yaml index e2cfde7e7d5d..4e6f3b6655ab 100644 --- a/mmv1/products/compute/ForwardingRule.yaml +++ b/mmv1/products/compute/ForwardingRule.yaml @@ -286,7 +286,6 @@ properties: * When the `target` is a Private Service Connect Google APIs bundle, you must specify an `IPAddress`. - Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. @@ -304,7 +303,6 @@ properties: * `global/addresses/address-name` * `address-name` - The forwarding rule's `target` or `backendService`, and in most cases, also the `loadBalancingScheme`, determine the type of IP address that you can use. For detailed information, see @@ -494,7 +492,6 @@ properties: * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. update_verb: :POST update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setTarget' diff --git a/mmv1/products/compute/GlobalForwardingRule.yaml b/mmv1/products/compute/GlobalForwardingRule.yaml index b287c5445545..af9a88d4d512 100644 --- a/mmv1/products/compute/GlobalForwardingRule.yaml +++ b/mmv1/products/compute/GlobalForwardingRule.yaml @@ -238,7 +238,6 @@ properties: * When the `target` is a Private Service Connect Google APIs bundle, you must specify an `IPAddress`. - Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. @@ -256,7 +255,6 @@ properties: * `global/addresses/address-name` * `address-name` - The forwarding rule's `target`, and in most cases, also the `loadBalancingScheme`, determine the type of IP address that you can use. For detailed information, see @@ -479,7 +477,6 @@ properties: * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. update_verb: :POST update_url: 'projects/{{project}}/global/forwardingRules/{{name}}/setTarget' diff --git a/mmv1/products/compute/HttpHealthCheck.yaml b/mmv1/products/compute/HttpHealthCheck.yaml index a49f63feb4bc..e826da96bf2b 100644 --- a/mmv1/products/compute/HttpHealthCheck.yaml +++ b/mmv1/products/compute/HttpHealthCheck.yaml @@ -21,7 +21,6 @@ description: | An HttpHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTP. - ~> **Note:** google_compute_http_health_check is a legacy health check. The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) should be preferred for all uses except diff --git a/mmv1/products/compute/HttpsHealthCheck.yaml b/mmv1/products/compute/HttpsHealthCheck.yaml index c8ca925f0989..aeb352b1f5f8 100644 --- a/mmv1/products/compute/HttpsHealthCheck.yaml +++ b/mmv1/products/compute/HttpsHealthCheck.yaml @@ -21,7 +21,6 @@ description: | An HttpsHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTPS. - ~> **Note:** google_compute_https_health_check is a legacy health check. The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) should be preferred for all uses except diff --git a/mmv1/products/compute/ManagedSslCertificate.yaml b/mmv1/products/compute/ManagedSslCertificate.yaml index 106e07944520..1975c56387a5 100644 --- a/mmv1/products/compute/ManagedSslCertificate.yaml +++ b/mmv1/products/compute/ManagedSslCertificate.yaml @@ -117,7 +117,6 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. - !ruby/object:Api::Type::NestedObject name: 'managed' diff --git a/mmv1/products/compute/RegionBackendService.yaml b/mmv1/products/compute/RegionBackendService.yaml index 4cb367ee521d..9b62faaad6f8 100644 --- a/mmv1/products/compute/RegionBackendService.yaml +++ b/mmv1/products/compute/RegionBackendService.yaml @@ -842,7 +842,6 @@ properties: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, @@ -852,7 +851,6 @@ properties: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. diff --git a/mmv1/products/compute/RegionSslCertificate.yaml b/mmv1/products/compute/RegionSslCertificate.yaml index fc3e6c825658..ba2941812207 100644 --- a/mmv1/products/compute/RegionSslCertificate.yaml +++ b/mmv1/products/compute/RegionSslCertificate.yaml @@ -128,7 +128,6 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. default_from_api: true custom_expand: 'templates/terraform/custom_expand/name_or_name_prefix.go.erb' diff --git a/mmv1/products/compute/SslCertificate.yaml b/mmv1/products/compute/SslCertificate.yaml index 0329f36edfb4..f00bbfe1d91c 100644 --- a/mmv1/products/compute/SslCertificate.yaml +++ b/mmv1/products/compute/SslCertificate.yaml @@ -115,7 +115,6 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. default_from_api: true custom_expand: 'templates/terraform/custom_expand/name_or_name_prefix.go.erb' diff --git a/mmv1/products/compute/go_Address.yaml b/mmv1/products/compute/go_Address.yaml index a44d0393d9b2..beca1b1ebe45 100644 --- a/mmv1/products/compute/go_Address.yaml +++ b/mmv1/products/compute/go_Address.yaml @@ -158,7 +158,6 @@ properties: configure Private Service Connect. Only global internal addresses can use this purpose. - This should only be set when using an Internal address. default_from_api: true - name: 'networkTier' diff --git a/mmv1/products/compute/go_BackendService.yaml b/mmv1/products/compute/go_BackendService.yaml index 90a748f145cd..c13d11c4ee35 100644 --- a/mmv1/products/compute/go_BackendService.yaml +++ b/mmv1/products/compute/go_BackendService.yaml @@ -820,7 +820,6 @@ properties: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, @@ -830,7 +829,6 @@ properties: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. diff --git a/mmv1/products/compute/go_Firewall.yaml b/mmv1/products/compute/go_Firewall.yaml index d67f8e1166ab..c28d906a27b9 100644 --- a/mmv1/products/compute/go_Firewall.yaml +++ b/mmv1/products/compute/go_Firewall.yaml @@ -33,9 +33,9 @@ references: 'Official Documentation': 'https://cloud.google.com/vpc/docs/firewalls' api: 'https://cloud.google.com/compute/docs/reference/v1/firewalls' docs: - optional_properties: '* `enable_logging` - (Optional, Deprecated) This field denotes whether to enable logging for a particular firewall rule. -If logging is enabled, logs will be exported to Stackdriver. Deprecated in favor of `log_config` -' + optional_properties: | + * `enable_logging` - (Optional, Deprecated) This field denotes whether to enable logging for a particular firewall rule. + If logging is enabled, logs will be exported to Stackdriver. Deprecated in favor of `log_config` base_url: 'projects/{{project}}/global/firewalls' has_self_link: true update_verb: 'PATCH' diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index 93399233a2fe..2b84b92a45a4 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -273,7 +273,6 @@ properties: * When the `target` is a Private Service Connect Google APIs bundle, you must specify an `IPAddress`. - Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. @@ -291,7 +290,6 @@ properties: * `global/addresses/address-name` * `address-name` - The forwarding rule's `target` or `backendService`, and in most cases, also the `loadBalancingScheme`, determine the type of IP address that you can use. For detailed information, see @@ -476,7 +474,6 @@ properties: * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setTarget' update_verb: 'POST' diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index 77c4f47f2e7a..c85cfd9c7525 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -229,7 +229,6 @@ properties: * When the `target` is a Private Service Connect Google APIs bundle, you must specify an `IPAddress`. - Otherwise, you can optionally specify an IP address that references an existing static (reserved) IP address resource. When omitted, Google Cloud assigns an ephemeral IP address. @@ -247,7 +246,6 @@ properties: * `global/addresses/address-name` * `address-name` - The forwarding rule's `target`, and in most cases, also the `loadBalancingScheme`, determine the type of IP address that you can use. For detailed information, see @@ -467,7 +465,6 @@ properties: * `vpc-sc` - [ APIs that support VPC Service Controls](https://cloud.google.com/vpc-service-controls/docs/supported-products). * `all-apis` - [All supported Google APIs](https://cloud.google.com/vpc/docs/private-service-connect#supported-apis). - For Private Service Connect forwarding rules that forward traffic to managed services, the target must be a service attachment. required: true update_url: 'projects/{{project}}/global/forwardingRules/{{name}}/setTarget' diff --git a/mmv1/products/compute/go_HttpHealthCheck.yaml b/mmv1/products/compute/go_HttpHealthCheck.yaml index f24acd5ea12c..dca967b94c9c 100644 --- a/mmv1/products/compute/go_HttpHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpHealthCheck.yaml @@ -19,7 +19,6 @@ description: | An HttpHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTP. - ~> **Note:** google_compute_http_health_check is a legacy health check. The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) should be preferred for all uses except diff --git a/mmv1/products/compute/go_HttpsHealthCheck.yaml b/mmv1/products/compute/go_HttpsHealthCheck.yaml index f08af7f264dd..a186293003ad 100644 --- a/mmv1/products/compute/go_HttpsHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpsHealthCheck.yaml @@ -19,7 +19,6 @@ description: | An HttpsHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTPS. - ~> **Note:** google_compute_https_health_check is a legacy health check. The newer [google_compute_health_check](/docs/providers/google/r/compute_health_check.html) should be preferred for all uses except diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml index 2dd937bc54e6..5f1f840af51f 100644 --- a/mmv1/products/compute/go_ManagedSslCertificate.yaml +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -27,19 +27,19 @@ references: 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' api: 'https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates' docs: - warning: 'This resource should be used with extreme caution! Provisioning an SSL -certificate is complex. Ensure that you understand the lifecycle of a -certificate before attempting complex tasks like cert rotation automatically. -This resource will "return" as soon as the certificate object is created, -but post-creation the certificate object will go through a "provisioning" -process. The provisioning process can complete only when the domain name -for which the certificate is created points to a target pool which, itself, -points at the certificate. Depending on your DNS provider, this may take -some time, and migrating from self-managed certificates to Google-managed -certificates may entail some downtime while the certificate provisions. + warning: | + This resource should be used with extreme caution! Provisioning an SSL + certificate is complex. Ensure that you understand the lifecycle of a + certificate before attempting complex tasks like cert rotation automatically. + This resource will "return" as soon as the certificate object is created, + but post-creation the certificate object will go through a "provisioning" + process. The provisioning process can complete only when the domain name + for which the certificate is created points to a target pool which, itself, + points at the certificate. Depending on your DNS provider, this may take + some time, and migrating from self-managed certificates to Google-managed + certificates may entail some downtime while the certificate provisions. -In conclusion: Be extremely cautious. -' + In conclusion: Be extremely cautious. base_url: 'projects/{{project}}/global/sslCertificates' has_self_link: true immutable: true @@ -107,7 +107,6 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. - name: 'managed' type: NestedObject diff --git a/mmv1/products/compute/go_RegionBackendService.yaml b/mmv1/products/compute/go_RegionBackendService.yaml index 13700c4f64c0..cab122136a1b 100644 --- a/mmv1/products/compute/go_RegionBackendService.yaml +++ b/mmv1/products/compute/go_RegionBackendService.yaml @@ -826,7 +826,6 @@ properties: UNAVAILABLE_WEIGHT. Otherwise, Load Balancing remains equal-weight. - This field is applicable to either: * A regional backend service with the service_protocol set to HTTP, HTTPS, or HTTP2, @@ -836,7 +835,6 @@ properties: Load Balancing). Only MAGLEV and WEIGHTED_MAGLEV values are possible for External Network Load Balancing. The default is MAGLEV. - If session_affinity is not NONE, and this field is not set to MAGLEV, WEIGHTED_MAGLEV, or RING_HASH, session affinity settings will not take effect. diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml index 21f77cac5d19..a365f1cacf13 100644 --- a/mmv1/products/compute/go_RegionSslCertificate.yaml +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -24,9 +24,9 @@ references: 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' api: 'https://cloud.google.com/compute/docs/reference/rest/v1/regionSslCertificates' docs: - optional_properties: '* `name_prefix` - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with `name`. -' + optional_properties: | + * `name_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. Conflicts with `name`. base_url: 'projects/{{project}}/regions/{{region}}/sslCertificates' has_self_link: true immutable: true @@ -119,7 +119,6 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. default_from_api: true custom_expand: 'templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl' diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index f3f3ecfb2435..ef5b1a44c013 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -44,11 +44,11 @@ references: 'Using Routes': 'https://cloud.google.com/vpc/docs/using-routes' api: 'https://cloud.google.com/compute/docs/reference/rest/v1/routes' docs: - optional_properties: '* `next_hop_instance_zone` - (Optional when `next_hop_instance` is - specified) The zone of the instance specified in - `next_hop_instance`. Omit if `next_hop_instance` is specified as - a URL. -' + optional_properties: | + * `next_hop_instance_zone` - (Optional when `next_hop_instance` is + specified) The zone of the instance specified in + `next_hop_instance`. Omit if `next_hop_instance` is specified as + a URL. base_url: 'projects/{{project}}/global/routes' has_self_link: true immutable: true diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml index 4ba216388bec..8e6afb9e5415 100644 --- a/mmv1/products/compute/go_RouterNat.yaml +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -150,7 +150,7 @@ properties: is set to MANUAL_ONLY. is_set: true send_empty_value: true - set_hash_func: 'computeRouterNatIPsHash' + set_hash_func: computeRouterNatIPsHash custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' item_type: name: 'address' @@ -198,7 +198,7 @@ properties: api_name: subnetworks is_set: true send_empty_value: true - set_hash_func: 'computeRouterNatSubnetworkHash' + set_hash_func: computeRouterNatSubnetworkHash item_type: type: NestedObject properties: @@ -323,7 +323,7 @@ properties: description: 'A list of rules associated with this NAT.' is_set: true send_empty_value: true - set_hash_func: 'computeRouterNatRulesHash' + set_hash_func: computeRouterNatRulesHash item_type: type: NestedObject properties: @@ -368,7 +368,7 @@ properties: These IP addresses must be valid static external IP addresses assigned to the project. This field is used for public NAT. is_set: true - set_hash_func: 'computeRouterNatIPsHash' + set_hash_func: computeRouterNatIPsHash custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_ip_set.tmpl' custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' item_type: @@ -386,7 +386,7 @@ properties: These IPs should be used for updating/patching a NAT rule only. This field is used for public NAT. is_set: true - set_hash_func: 'computeRouterNatIPsHash' + set_hash_func: computeRouterNatIPsHash custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_ip_set.tmpl' custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' item_type: @@ -404,7 +404,7 @@ properties: This field is used for private NAT. is_set: true min_version: 'beta' - set_hash_func: 'computeRouterNatRulesSubnetHash' + set_hash_func: computeRouterNatRulesSubnetHash custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_subnets_set.tmpl' custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' item_type: @@ -422,7 +422,7 @@ properties: This field is used for private NAT. is_set: true min_version: 'beta' - set_hash_func: 'computeRouterNatRulesSubnetHash' + set_hash_func: computeRouterNatRulesSubnetHash custom_flatten: 'templates/terraform/custom_flatten/go/nat_rules_subnets_set.tmpl' custom_expand: 'templates/terraform/custom_expand/go/array_resourceref_with_validation.go.tmpl' item_type: diff --git a/mmv1/products/compute/go_ServiceAttachment.yaml b/mmv1/products/compute/go_ServiceAttachment.yaml index 39e46277bc81..6c7d3fc345de 100644 --- a/mmv1/products/compute/go_ServiceAttachment.yaml +++ b/mmv1/products/compute/go_ServiceAttachment.yaml @@ -211,7 +211,7 @@ properties: attachment. is_set: true send_empty_value: true - set_hash_func: 'computeServiceAttachmentConsumerAcceptListsHash' + set_hash_func: computeServiceAttachmentConsumerAcceptListsHash item_type: type: NestedObject properties: diff --git a/mmv1/products/compute/go_SslCertificate.yaml b/mmv1/products/compute/go_SslCertificate.yaml index 40808e930866..97bc3263f0a8 100644 --- a/mmv1/products/compute/go_SslCertificate.yaml +++ b/mmv1/products/compute/go_SslCertificate.yaml @@ -24,9 +24,9 @@ references: 'Official Documentation': 'https://cloud.google.com/load-balancing/docs/ssl-certificates' api: 'https://cloud.google.com/compute/docs/reference/rest/v1/sslCertificates' docs: - optional_properties: '* `name_prefix` - (Optional) Creates a unique name beginning with the - specified prefix. Conflicts with `name`. -' + optional_properties: | + * `name_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. Conflicts with `name`. base_url: 'projects/{{project}}/global/sslCertificates' has_self_link: true immutable: true @@ -107,7 +107,6 @@ properties: characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. - These are in the same namespace as the managed SSL certificates. default_from_api: true custom_expand: 'templates/terraform/custom_expand/go/name_or_name_prefix.go.tmpl' diff --git a/mmv1/products/compute/go_TargetHttpsProxy.yaml b/mmv1/products/compute/go_TargetHttpsProxy.yaml index d9ec4cdc7d50..0b758b752c8c 100644 --- a/mmv1/products/compute/go_TargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpsProxy.yaml @@ -125,6 +125,18 @@ properties: - 'NONE' - 'ENABLE' - 'DISABLE' + - name: 'tlsEarlyData' + type: Enum + description: | + Specifies whether TLS 1.3 0-RTT Data (“Early Data”) should be accepted for this service. + Early Data allows a TLS resumption handshake to include the initial application payload + (a HTTP request) alongside the handshake, reducing the effective round trips to “zero”. + This applies to TLS 1.3 connections over TCP (HTTP/2) as well as over UDP (QUIC/h3). + default_from_api: true + enum_values: + - 'STRICT' + - 'PERMISSIVE' + - 'DISABLED' - name: 'certificateManagerCertificates' type: Array description: | diff --git a/mmv1/products/compute/go_UrlMap.yaml b/mmv1/products/compute/go_UrlMap.yaml index 38680546b572..dee0cd5963de 100644 --- a/mmv1/products/compute/go_UrlMap.yaml +++ b/mmv1/products/compute/go_UrlMap.yaml @@ -118,6 +118,15 @@ examples: http_health_check_name: 'health-check' backend_bucket_name: 'static-asset-backend-bucket' storage_bucket_name: 'static-asset-bucket' + - name: 'url_map_custom_error_response_policy' + primary_resource_id: 'urlmap' + min_version: 'beta' + vars: + url_map_name: 'urlmap' + backend_service_name: 'login' + http_health_check_name: 'health-check' + storage_bucket_name: 'static-asset-bucket' + error_backend_bucket_name: 'error-backend-bucket' parameters: properties: - name: 'creationTimestamp' @@ -301,6 +310,63 @@ properties: description: | An optional description of this resource. Provide this property when you create the resource. + - name: 'defaultCustomErrorResponsePolicy' + type: NestedObject + description: | + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + + This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. + + For example, consider a UrlMap with the following configuration: + + UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors + A RouteRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + + When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. + + defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. + min_version: 'beta' + properties: + - name: 'errorResponseRule' + type: Array + description: | + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + api_name: errorResponseRules + item_type: + type: NestedObject + properties: + - name: 'matchResponseCodes' + type: Array + description: | + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + item_type: + type: String + - name: 'path' + type: String + description: | + The full path to a file within backendBucket . For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters + - name: 'overrideResponseCode' + type: Integer + description: | + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + - name: 'errorService' + type: ResourceRef + description: | + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + resource: 'BackendBucket' + imports: 'selfLink' - name: 'headerAction' type: NestedObject description: | @@ -406,6 +472,60 @@ properties: required: true item_type: type: String + - name: 'customErrorResponsePolicy' + type: NestedObject + description: | + customErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + If a policy for an error code is not configured for the PathRule, a policy for the error code configured in pathMatcher.defaultCustomErrorResponsePolicy is applied. If one is not specified in pathMatcher.defaultCustomErrorResponsePolicy, the policy configured in UrlMap.defaultCustomErrorResponsePolicy takes effect. + For example, consider a UrlMap with the following configuration: + UrlMap.defaultCustomErrorResponsePolicy are configured with policies for 5xx and 4xx errors + A PathRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in PathRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + customErrorResponsePolicy is supported only for global external Application Load Balancers. + min_version: 'beta' + properties: + - name: 'errorResponseRule' + type: Array + description: | + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + api_name: errorResponseRules + item_type: + type: NestedObject + properties: + - name: 'matchResponseCodes' + type: Array + description: | + Valid values include: + + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + item_type: + type: String + - name: 'path' + type: String + description: | + The full path to a file within backendBucket . For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters + - name: 'overrideResponseCode' + type: Integer + description: | + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + - name: 'errorService' + type: ResourceRef + description: | + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + + resource: 'BackendBucket' + imports: 'selfLink' - name: 'routeAction' type: NestedObject description: | @@ -1910,6 +2030,64 @@ properties: The value must be between 0.0 and 100.0 inclusive. validation: function: 'validation.FloatBetween(0, 100)' + - name: 'defaultCustomErrorResponsePolicy' + type: NestedObject + description: | + defaultCustomErrorResponsePolicy specifies how the Load Balancer returns error responses when BackendServiceor BackendBucket responds with an error. + + This policy takes effect at the PathMatcher level and applies only when no policy has been defined for the error code at lower levels like RouteRule and PathRule within this PathMatcher. If an error code does not have a policy defined in defaultCustomErrorResponsePolicy, then a policy defined for the error code in UrlMap.defaultCustomErrorResponsePolicy takes effect. + + For example, consider a UrlMap with the following configuration: + + UrlMap.defaultCustomErrorResponsePolicy is configured with policies for 5xx and 4xx errors + A RouteRule for /coming_soon/ is configured for the error code 404. + If the request is for www.myotherdomain.com and a 404 is encountered, the policy under UrlMap.defaultCustomErrorResponsePolicy takes effect. If a 404 response is encountered for the request www.example.com/current_events/, the pathMatcher's policy takes effect. If however, the request for www.example.com/coming_soon/ encounters a 404, the policy in RouteRule.customErrorResponsePolicy takes effect. If any of the requests in this example encounter a 500 error code, the policy at UrlMap.defaultCustomErrorResponsePolicy takes effect. + + When used in conjunction with pathMatcher.defaultRouteAction.retryPolicy, retries take precedence. Only once all retries are exhausted, the defaultCustomErrorResponsePolicy is applied. While attempting a retry, if load balancer is successful in reaching the service, the defaultCustomErrorResponsePolicy is ignored and the response from the service is returned to the client. + + defaultCustomErrorResponsePolicy is supported only for global external Application Load Balancers. + min_version: 'beta' + properties: + - name: 'errorResponseRule' + type: Array + description: | + Specifies rules for returning error responses. + In a given policy, if you specify rules for both a range of error codes as well as rules for specific error codes then rules with specific error codes have a higher priority. + For example, assume that you configure a rule for 401 (Un-authorized) code, and another for all 4 series error codes (4XX). + If the backend service returns a 401, then the rule for 401 will be applied. However if the backend service returns a 403, the rule for 4xx takes effect. + api_name: errorResponseRules + item_type: + type: NestedObject + properties: + - name: 'matchResponseCodes' + type: Array + description: | + Valid values include: + - A number between 400 and 599: For example 401 or 503, in which case the load balancer applies the policy if the error code exactly matches this value. + - 5xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 500 to 599. + - 4xx: Load Balancer will apply the policy if the backend service responds with any response code in the range of 400 to 499. + Values must be unique within matchResponseCodes and across all errorResponseRules of CustomErrorResponsePolicy. + item_type: + type: String + - name: 'path' + type: String + description: | + The full path to a file within backendBucket. For example: /errors/defaultError.html + path must start with a leading slash. path cannot have trailing slashes. + If the file is not available in backendBucket or the load balancer cannot reach the BackendBucket, a simple Not Found Error is returned to the client. + The value must be from 1 to 1024 characters. + - name: 'overrideResponseCode' + type: Integer + description: | + The HTTP status code returned with the response containing the custom error content. + If overrideResponseCode is not supplied, the same response code returned by the original backend bucket or backend service is returned to the client. + - name: 'errorService' + type: ResourceRef + description: | + The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + + resource: 'BackendBucket' + imports: 'selfLink' - name: 'test' type: Array description: | diff --git a/mmv1/products/compute/go_VpnGateway.yaml b/mmv1/products/compute/go_VpnGateway.yaml index 87e0c5d6dda6..07b33e5b2f78 100644 --- a/mmv1/products/compute/go_VpnGateway.yaml +++ b/mmv1/products/compute/go_VpnGateway.yaml @@ -22,9 +22,9 @@ references: guides: api: 'https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways' docs: - warning: 'Classic VPN is deprecating certain functionality on October 31, 2021. For more information, -see the [Classic VPN partial deprecation page](https://cloud.google.com/network-connectivity/docs/vpn/deprecations/classic-vpn-deprecation). -' + warning: | + Classic VPN is deprecating certain functionality on October 31, 2021. For more information, + see the [Classic VPN partial deprecation page](https://cloud.google.com/network-connectivity/docs/vpn/deprecations/classic-vpn-deprecation). base_url: 'projects/{{project}}/regions/{{region}}/targetVpnGateways' has_self_link: true immutable: true diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 3ee6f3eb70fa..02ab2a488f57 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -75,6 +75,14 @@ examples: subscription_name: 'example-subscription' dataset_id: 'example_dataset' table_id: 'example_table' + - name: 'pubsub_subscription_push_bq_service_account' + primary_resource_id: 'example' + vars: + topic_name: 'example-topic' + subscription_name: 'example-subscription' + dataset_id: 'example_dataset' + table_id: 'example_table' + service_account_id: 'example-bqw' - name: 'pubsub_subscription_push_cloudstorage' primary_resource_id: 'example' vars: @@ -87,6 +95,13 @@ examples: topic_name: 'example-topic' subscription_name: 'example-subscription' bucket_name: 'example-bucket' + - name: 'pubsub_subscription_push_cloudstorage_service_account' + primary_resource_id: 'example' + vars: + topic_name: 'example-topic' + subscription_name: 'example-subscription' + bucket_name: 'example-bucket' + service_account_id: 'example-stw' parameters: properties: - name: 'name' @@ -133,17 +148,15 @@ properties: description: | When true, use the topic's schema as the columns to write to in BigQuery, if it exists. Only one of use_topic_schema and use_table_schema can be set. - # Not present in Ruby version - # conflicts: - # - use_table_schema + conflicts: + - use_table_schema - name: 'useTableSchema' type: Boolean description: | When true, use the BigQuery table's schema as the columns to write to in BigQuery. Messages must be published in JSON format. Only one of use_topic_schema and use_table_schema can be set. - # Not present in Ruby version - # conflicts: - # - use_topic_schema + conflicts: + - use_topic_schema - name: 'writeMetadata' type: Boolean description: | @@ -155,6 +168,12 @@ properties: When true and use_topic_schema or use_table_schema is true, any fields that are a part of the topic schema or message schema that are not part of the BigQuery table schema are dropped when writing to BigQuery. Otherwise, the schemas must be kept in sync and any messages with extra fields are not written and remain in the subscription's backlog. + - name: 'serviceAccountEmail' + type: String + description: | + The service account to use to write to BigQuery. If not specified, the Pub/Sub + [service agent](https://cloud.google.com/iam/docs/service-agents), + service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. - name: 'cloudStorageConfig' type: NestedObject description: | @@ -212,6 +231,12 @@ properties: type: Boolean description: | When true, write the subscription name, messageId, publishTime, attributes, and orderingKey as additional fields in the output. + - name: 'serviceAccountEmail' + type: String + description: | + The service account to use to write to Cloud Storage. If not specified, the Pub/Sub + [service agent](https://cloud.google.com/iam/docs/service-agents), + service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com, is used. - name: 'pushConfig' type: NestedObject description: | diff --git a/mmv1/templates/terraform/custom_flatten/go/accesscontextmanager_serviceperimeters_custom_flatten.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/accesscontextmanager_serviceperimeters_custom_flatten.go.tmpl new file mode 100644 index 000000000000..231fc3f35c04 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/accesscontextmanager_serviceperimeters_custom_flatten.go.tmpl @@ -0,0 +1,811 @@ +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + apiData := make([]map[string]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + continue + } + apiData = append(apiData, map[string]interface{}{ + "name": flattenAccessContextManagerServicePerimetersServicePerimetersName(original["name"], d, config), + "title": flattenAccessContextManagerServicePerimetersServicePerimetersTitle(original["title"], d, config), + "description": flattenAccessContextManagerServicePerimetersServicePerimetersDescription(original["description"], d, config), + "create_time": flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(original["createTime"], d, config), + "update_time": flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(original["updateTime"], d, config), + "perimeter_type": flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(original["perimeterType"], d, config), + "status": flattenAccessContextManagerServicePerimetersServicePerimetersStatus(original["status"], d, config), + "spec": flattenAccessContextManagerServicePerimetersServicePerimetersSpec(original["spec"], d, config), + "use_explicit_dry_run_spec": flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(original["useExplicitDryRunSpec"], d, config), + }) + } + configData := []map[string]interface{}{} + for _, item := range d.Get("service_perimeters").([]interface{}) { + configData = append(configData, item.(map[string]interface{})) + } + sorted, err := tpgresource.SortMapsByConfigOrder(configData, apiData, "name") + if err != nil { + log.Printf("[ERROR] Could not sort API response value: %s", err) + return v + } + + return sorted +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersTitle(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersDescription(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersCreateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersUpdateTime(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersPerimeterType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { + return "PERIMETER_TYPE_REGULAR" + } + + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatus(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(original["resources"], d, config) + transformed["access_levels"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(original["accessLevels"], d, config) + transformed["restricted_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(original["restrictedServices"], d, config) + transformed["vpc_accessible_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(original["vpcAccessibleServices"], d, config) + transformed["ingress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(original["ingressPolicies"], d, config) + transformed["egress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(original["egressPolicies"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + transformed["allowed_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := schema.NewSet(schema.HashResource(accesscontextmanagerServicePerimetersServicePerimetersServicePerimetersStatusIngressPoliciesSchema()), []interface{}{}) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed.Add(map[string]interface{}{ + "ingress_from": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(original["ingressFrom"], d, config), + "ingress_to": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(original["ingressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(original["sources"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), + "resource": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(original["resources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "egress_from": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(original["egressFrom"], d, config), + "egress_to": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(original["egressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSources(original["sources"], d, config) + transformed["source_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourceRestriction(original["sourceRestriction"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourcesAccessLevel(original["accessLevel"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressFromSourceRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(original["resources"], d, config) + transformed["external_resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(original["externalResources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersStatusEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(original["resources"], d, config) + transformed["access_levels"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(original["accessLevels"], d, config) + transformed["restricted_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(original["restrictedServices"], d, config) + transformed["vpc_accessible_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(original["vpcAccessibleServices"], d, config) + transformed["ingress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(original["ingressPolicies"], d, config) + transformed["egress_policies"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(original["egressPolicies"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecAccessLevels(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecRestrictedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["enable_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(original["enableRestriction"], d, config) + transformed["allowed_services"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(original["allowedServices"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesEnableRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecVpcAccessibleServicesAllowedServices(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "ingress_from": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(original["ingressFrom"], d, config), + "ingress_to": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(original["ingressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(original["sources"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(original["accessLevel"], d, config), + "resource": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(original["resource"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressFromSourcesResource(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(original["resources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecIngressPoliciesIngressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPolicies(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "egress_from": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(original["egressFrom"], d, config), + "egress_to": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(original["egressTo"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFrom(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["identity_type"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(original["identityType"], d, config) + transformed["identities"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(original["identities"], d, config) + transformed["sources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSources(original["sources"], d, config) + transformed["source_restriction"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourceRestriction(original["sourceRestriction"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentityType(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromIdentities(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "access_level": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourcesAccessLevel(original["accessLevel"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourcesAccessLevel(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressFromSourceRestriction(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressTo(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(original["resources"], d, config) + transformed["external_resources"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(original["externalResources"], d, config) + transformed["operations"] = + flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(original["operations"], d, config) + return []interface{}{transformed} +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToExternalResources(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + return schema.NewSet(schema.HashString, v.([]interface{})) +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperations(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "service_name": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(original["serviceName"], d, config), + "method_selectors": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(original["methodSelectors"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsServiceName(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectors(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "method": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(original["method"], d, config), + "permission": flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(original["permission"], d, config), + }) + } + return transformed +} +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsMethod(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersSpecEgressPoliciesEgressToOperationsMethodSelectorsPermission(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} + +func flattenAccessContextManagerServicePerimetersServicePerimetersUseExplicitDryRunSpec(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return v +} diff --git a/mmv1/templates/terraform/examples/go/url_map_custom_error_response_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_custom_error_response_policy.tf.tmpl new file mode 100644 index 000000000000..6b3a72411f47 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/url_map_custom_error_response_policy.tf.tmpl @@ -0,0 +1,86 @@ +resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "url_map_name"}}" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 5xx responses will be catched + path = "/*" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx", "5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/login" + override_response_code = 404 + } + error_response_rule { + match_response_codes = ["503"] # Only a 503 response will be catched on path example + path = "/example" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx"] + path = "/register" + override_response_code = 401 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "{{index $.Vars "backend_service_name"}}" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "{{index $.Vars "http_health_check_name"}}" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "{{index $.Vars "error_backend_bucket_name"}}" + bucket_name = google_storage_bucket.error.name + enable_cdn = true +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "{{index $.Vars "storage_bucket_name"}}" + location = "US" +} diff --git a/mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_with_optimized.tf.tmpl b/mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_with_optimized.tf.tmpl new file mode 100644 index 000000000000..acbdcb5b461c --- /dev/null +++ b/mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_with_optimized.tf.tmpl @@ -0,0 +1,21 @@ +resource "google_vertex_ai_feature_online_store" "{{$.PrimaryResourceId}}" { + provider = google + name = "{{index $.Vars "name"}}" + labels = { + foo = "bar" + } + region = "us-central1" + optimized {} + dedicated_serving_endpoint { + private_service_connect_config { + enable_private_service_connect = true + project_allowlist = [data.google_project.project.number] + } + } +} + +data "google_project" "project" { + provider = google +} + + diff --git a/mmv1/templates/terraform/pre_create/go/firebasehosting_site.go.tmpl b/mmv1/templates/terraform/pre_create/go/firebasehosting_site.go.tmpl new file mode 100644 index 000000000000..f31de67be01f --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/firebasehosting_site.go.tmpl @@ -0,0 +1,27 @@ + +// Check if the Firebase hostng site already exits. Do an update if so. + +getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}FirebaseHostingBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/sites/{{"{{"}}site_id{{"}}"}}") +if err != nil { + return err +} +_, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: getUrl, + UserAgent: userAgent, + Headers: headers, +}) + +if err == nil { + // Hosting site already exists + log.Printf("[DEBUG] Firebase hosting site already exists %s", d.Get("site_id")) + // Replace import id for the resource id + id, err := tpgresource.ReplaceVars(d, config, "projects/{{"{{"}}project{{"}}"}}/sites/{{"{{"}}site_id{{"}}"}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + return resourceFirebaseHostingSiteUpdate(d, meta) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index a04c27fadbbb..e2f06c8688ff 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -60,19 +60,19 @@ references: <% unless object.docs.nil? -%> docs: <% unless object.docs.warning.nil? -%> - warning: '<%= object.docs.warning %>' + warning: <% end -%> <% unless object.docs.note.nil? -%> note: <% end -%> <% unless object.docs.required_properties.nil? -%> - required_properties: '<%= object.docs.required_properties %>' + required_properties: <% end -%> <% unless object.docs.optional_properties.nil? -%> - optional_properties: '<%= object.docs.optional_properties %>' + optional_properties: <% end -%> <% unless object.docs.attributes.nil? -%> - attributes: '<%= object.docs.attributes %>' + attributes: <% end -%> <% end -%> <% diff --git a/mmv1/third_party/terraform/go/go.mod b/mmv1/third_party/terraform/go/go.mod index 491855828e38..9aeb1b3967d5 100644 --- a/mmv1/third_party/terraform/go/go.mod +++ b/mmv1/third_party/terraform/go/go.mod @@ -4,7 +4,7 @@ go 1.21 require ( cloud.google.com/go/bigtable v1.24.0 - github.com/GoogleCloudPlatform/declarative-resource-client-library v1.67.0 + github.com/GoogleCloudPlatform/declarative-resource-client-library v1.68.0 github.com/apparentlymart/go-cidr v1.1.0 github.com/davecgh/go-spew v1.1.1 github.com/dnaeon/go-vcr v1.0.1 diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl index 7c177d611d58..045b0be451fa 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl @@ -2474,7 +2474,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if d.HasChange("service_account.0.email") || scopesChange { sa := d.Get("service_account").([]interface{}) req := &compute.InstancesSetServiceAccountRequest{ForceSendFields: []string{"email"}} - if len(sa) > 0 && sa[0] != nil { + if !isEmptyServiceAccountBlock(d) && len(sa) > 0 && sa[0] != nil { saMap := sa[0].(map[string]interface{}) req.Email = saMap["email"].(string) req.Scopes = tpgresource.CanonicalizeServiceScopes(tpgresource.ConvertStringSet(saMap["scopes"].(*schema.Set))) @@ -3093,6 +3093,11 @@ func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool // suppress changes between { } and {scopes:[]} if l[0] != nil { contents := l[0].(map[string]interface{}) + email := contents["email"] + if email != "" { + // if email is non empty, don't suppress the diff + return false + } if scopes, ok := contents["scopes"]; ok { a := scopes.(*schema.Set).List() if a != nil && len(a) > 0 { @@ -3102,3 +3107,43 @@ func serviceAccountDiffSuppress(k, old, new string, d *schema.ResourceData) bool } return true } + +// isEmptyServiceAccountBlock is used to work around an issue when updating +// service accounts. Creating the instance with some scopes but without +// specifying a service account email, assigns default compute service account +// to the instance: +// +// service_account { +// scopes = ["some-scope"] +// } +// +// Then when updating the instance with empty service account: +// +// service_account { +// scopes = [] +// } +// +// the default Terraform behavior is to clear scopes without clearing the +// email. The email was previously computed to be the default service account +// and has not been modified, so the default plan is to leave it unchanged. +// However, when creating a new instance: +// +// service_account { +// scopes = [] +// } +// +// indicates an instance without any service account set. +// isEmptyServiceAccountBlock is used to detect empty service_account block +// and if it is, it is interpreted as no service account and no scopes. +// +func isEmptyServiceAccountBlock(d *schema.ResourceData) bool { + serviceAccountsConfig := d.GetRawConfig().GetAttr("service_account") + if serviceAccountsConfig.IsNull() || len(serviceAccountsConfig.AsValueSlice()) == 0 { + return true + } + serviceAccount := serviceAccountsConfig.AsValueSlice()[0] + if serviceAccount.GetAttr("email").IsNull() && len(serviceAccount.GetAttr("scopes").AsValueSlice()) == 0 { + return true + } + return false +} diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 3a9d5d2bc89b..27e65e9281ec 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -1109,6 +1109,54 @@ func TestAccComputeInstance_serviceAccount(t *testing.T) { }) } +func TestAccComputeInstance_noServiceAccount(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_noServiceAccount(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + +func TestAccComputeInstance_serviceAccountEmail_0scopes(t *testing.T) { + t.Parallel() + + var instance compute.Instance + var instanceName = fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeInstanceDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeInstance_serviceAccountEmail_0scopes(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), + ), + }, + computeInstanceImportStep("us-central1-a", instanceName, []string{}), + }, + }) +} + func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { t.Parallel() @@ -1125,6 +1173,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1134,6 +1183,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1143,6 +1193,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1152,6 +1203,7 @@ func TestAccComputeInstance_serviceAccount_updated(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 3), ), }, @@ -1176,6 +1228,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -1185,6 +1238,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceMatchServiceAccount(&instance, "\\d+-compute@developer.gserviceaccount.com"), testAccCheckComputeInstanceScopes(&instance, 1), ), }, @@ -1194,6 +1248,7 @@ func TestAccComputeInstance_serviceAccount_updated0to1to0scopes(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists( t, "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceNoServiceAccount(&instance), testAccCheckComputeInstanceScopes(&instance, 0), ), }, @@ -4083,6 +4138,30 @@ func testAccCheckComputeInstanceServiceAccount(instance *compute.Instance, scope } } +func testAccCheckComputeInstanceNoServiceAccount(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 0 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 0, got %d", count) + } + return nil + } +} + +func testAccCheckComputeInstanceMatchServiceAccount(instance *compute.Instance, serviceAcctRegexp string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if count := len(instance.ServiceAccounts); count != 1 { + return fmt.Errorf("Wrong number of ServiceAccounts: expected 1, got %d", count) + } + + email := instance.ServiceAccounts[0].Email + if !regexp.MustCompile(serviceAcctRegexp).MatchString(email) { + return fmt.Errorf("ServiceAccount email didn't match:\"%s\", got \"%s\"", serviceAcctRegexp, email) + } + + return nil + } +} + func testAccCheckComputeInstanceScopes(instance *compute.Instance, scopeCount int) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -6115,6 +6194,70 @@ resource "google_compute_instance" "foobar" { `, instance) } +func testAccComputeInstance_noServiceAccount(instance string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + scopes = [] + } +} +`, instance) +} + +func testAccComputeInstance_serviceAccountEmail_0scopes(instance string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "e2-medium" + zone = "us-central1-a" + + boot_disk { + initialize_params { + image = data.google_compute_image.my_image.self_link + } + } + + network_interface { + network = "default" + } + + service_account { + email = data.google_compute_default_service_account.default.email + scopes = [] + } +} + +data "google_compute_default_service_account" "default" { +} +`, instance) +} + func testAccComputeInstance_serviceAccount_update0(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl index b66d344c9d48..7fc12c1b57e9 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_target_https_proxy_test.go.tmpl @@ -225,6 +225,7 @@ resource "google_compute_target_https_proxy" "foobar" { google_compute_ssl_certificate.foobar2.self_link, ] quic_override = "ENABLE" + tls_early_data = "STRICT" } resource "google_compute_backend_service" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go.tmpl new file mode 100644 index 000000000000..ce955dca24d1 --- /dev/null +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_url_map_test.go.tmpl @@ -0,0 +1,1912 @@ +package compute_test + +import ( + "fmt" + "testing" + "github.com/hashicorp/terraform-provider-google/google/acctest" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccComputeUrlMap_update_path_matcher(t *testing.T) { + t.Parallel() + + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + + { + Config: testAccComputeUrlMap_basic2(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_advanced(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_advanced1(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + + { + Config: testAccComputeUrlMap_advanced2(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultRouteActionPathUrlRewrite(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionPathUrlRewrite(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + { + Config: testAccComputeUrlMap_defaultRouteActionPathUrlRewrite_update(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultRouteActionUrlRewrite(t *testing.T) { + t.Parallel() + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionUrlRewrite(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + + { + Config: testAccComputeUrlMap_defaultRouteActionUrlRewrite_update(acctest.RandString(t, 10)), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func TestAccComputeUrlMap_noPathRulesWithUpdate(t *testing.T) { + t.Parallel() + + bsName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + hcName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + umName := fmt.Sprintf("urlmap-test-%s", acctest.RandString(t, 10)) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_noPathRules(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + { + Config: testAccComputeUrlMap_basic1(bsName, hcName, umName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeUrlMapExists( + t, "google_compute_url_map.foobar"), + ), + }, + }, + }) +} + +func testAccCheckComputeUrlMapExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := acctest.GoogleProviderConfig(t) + name := rs.Primary.Attributes["name"] + + found, err := config.NewComputeClient(config.UserAgent).UrlMaps.Get( + config.Project, name).Do() + if err != nil { + return err + } + + if found.Name != name { + return fmt.Errorf("Url map not found") + } + return nil + } +} + +func TestAccComputeUrlMap_defaultRouteActionTrafficDirectorPathUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirectorPath(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirectorPathUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultRouteActionTrafficDirectorUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirector(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_defaultRouteActionTrafficDirectorUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_trafficDirectorUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_trafficDirector(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_trafficDirectorUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_trafficDirectorPathUpdate(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_trafficDirectorPath(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_trafficDirectorPathUpdate(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_trafficDirectorRemoveRouteRule(t *testing.T) { + t.Parallel() + + randString := acctest.RandString(t, 10) + + bsName := fmt.Sprintf("urlmap-test-%s", randString) + hcName := fmt.Sprintf("urlmap-test-%s", randString) + umName := fmt.Sprintf("urlmap-test-%s", randString) + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_trafficDirector(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeUrlMap_trafficDirectorRemoveRouteRule(bsName, hcName, umName), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeUrlMap_defaultUrlRedirect(t *testing.T) { + t.Parallel() + + randomSuffix := acctest.RandString(t, 10) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_defaultUrlRedirectConfig(randomSuffix), + }, + { + ResourceName: "google_compute_url_map.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccComputeUrlMap_urlMapCustomErrorResponsePolicyUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeUrlMapDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeUrlMap_urlMapCustomErrorResponsePolicy(context), + }, + { + ResourceName: "google_compute_url_map.urlmap", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_service"}, + }, + { + Config: testAccComputeUrlMap_urlMapCustomErrorResponsePolicyUpdate(context), + }, + { + ResourceName: "google_compute_url_map.urlmap", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"default_service"}, + }, + }, + }) +} +{{- end }} + +func testAccComputeUrlMap_basic1(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "boop" + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.foobar.self_link + } + } + + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.foobar.self_link + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_basic2(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } + + test { + host = "mysite.com" + path = "/test" + service = google_compute_backend_service.foobar.self_link + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_advanced1(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blop" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blop" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_advanced2(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + host_rule { + hosts = ["myfavoritesite.com"] + path_matcher = "blip" + } + + host_rule { + hosts = ["myleastfavoritesite.com"] + path_matcher = "blub" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blub" + + path_rule { + paths = ["/*", "/blub"] + service = google_compute_backend_service.foobar.self_link + } + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blip" + + path_rule { + paths = ["/*", "/home"] + service = google_compute_backend_service.foobar.self_link + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionPathUrlRewrite(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_backend_service.foobar.self_link + } + + default_route_action { + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionPathUrlRewrite_update(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "blep" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "blep" + + path_rule { + paths = ["/home"] + service = google_compute_backend_service.foobar.self_link + } + + path_rule { + paths = ["/login"] + service = google_compute_backend_service.foobar.self_link + } + + default_route_action { + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionUrlRewrite(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + default_route_action { + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_defaultRouteActionUrlRewrite_update(suffix string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + default_route_action { + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + } +} +`, suffix, suffix, suffix) +} + +func testAccComputeUrlMap_noPathRules(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "urlmap-test-%s" + health_checks = [google_compute_http_health_check.zero.self_link] +} + +resource "google_compute_http_health_check" "zero" { + name = "urlmap-test-%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_service = google_compute_backend_service.foobar.self_link + + host_rule { + hosts = ["mysite.com", "myothersite.com"] + path_matcher = "boop" + } + + path_matcher { + default_service = google_compute_backend_service.foobar.self_link + name = "boop" + } + + test { + host = "mysite.com" + path = "/*" + service = google_compute_backend_service.foobar.self_link + } +} +`, bsName, hcName, umName) +} + +func testAccComputeUrlMap_trafficDirector(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = "${google_compute_backend_service.home.self_link}" + + route_rules { + priority = 1 + header_action { + request_headers_to_remove = ["RemoveMe2"] + request_headers_to_add { + header_name = "AddSomethingElse" + header_value = "MyOtherValue" + replace = true + } + response_headers_to_remove = ["RemoveMe3"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + match_rules { + full_path_match = "a full path" + header_matches { + header_name = "someheader" + exact_match = "match this exactly" + invert_match = true + } + ignore_case = true + metadata_filters { + filter_match_criteria = "MATCH_ANY" + filter_labels { + name = "PLANET" + value = "MARS" + } + } + query_parameter_matches { + name = "a query parameter" + present_match = true + } + } + url_redirect { + host_redirect = "A host" + https_redirect = false + path_redirect = "some/path" + redirect_response_code = "TEMPORARY_REDIRECT" + strip_query = true + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home2.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = "${google_compute_backend_service.home2.self_link}" + + route_rules { + priority = 2 + header_action { + request_headers_to_remove = ["RemoveMe2", "AndMe"] + request_headers_to_add { + header_name = "AddSomethingElseUpdated" + header_value = "MyOtherValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMe3", "AndMe4"] + } + match_rules { + full_path_match = "a full path to match" + header_matches { + header_name = "someheaderfoo" + exact_match = "match this exactly again" + invert_match = false + } + ignore_case = false + metadata_filters { + filter_match_criteria = "MATCH_ALL" + filter_labels { + name = "PLANET" + value = "EARTH" + } + } + } + url_redirect { + host_redirect = "A host again" + https_redirect = true + path_redirect = "some/path/twice" + redirect_response_code = "TEMPORARY_REDIRECT" + strip_query = false + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorRemoveRouteRule(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home2.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = "${google_compute_backend_service.home2.self_link}" + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorPath(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + default_service = "${google_compute_backend_service.home.self_link}" + + path_rule { + paths = ["/home"] + route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origin_regexes = ["abc.*"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = true + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = "${google_compute_backend_service.home.self_link}" + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = "${google_compute_backend_service.home.self_link}" + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_trafficDirectorPathUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = "${google_compute_backend_service.home2.self_link}" + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + default_service = "${google_compute_backend_service.home.self_link}" + + path_rule { + paths = ["/homeupdated"] + route_action { + cors_policy { + allow_credentials = false + allow_headers = ["Allowed content updated"] + allow_methods = ["PUT"] + allow_origin_regexes = ["abcdef.*"] + allow_origins = ["Allowed origin updated"] + expose_headers = ["Exposed header updated"] + max_age = 31 + disabled = false + } + fault_injection_policy { + abort { + http_status = 235 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 40000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = "${google_compute_backend_service.home.self_link}" + } + retry_policy { + num_retries = 5 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = "${google_compute_backend_service.home.self_link}" + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMeUpdated"] + request_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMeUpdated"] + response_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = true + } + } + } + } + } + } + + test { + service = "${google_compute_backend_service.home.self_link}" + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = ["${google_compute_health_check.default.self_link}"] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultRouteActionTrafficDirectorPath(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.home.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths" + } + + path_matcher { + name = "allpaths" + + default_route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origin_regexes = ["abc.*"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = true + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home.self_link + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = google_compute_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + } + + test { + service = google_compute_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultRouteActionTrafficDirectorPathUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + default_service = google_compute_backend_service.home2.self_link + + host_rule { + hosts = ["mysite.com"] + path_matcher = "allpaths2" + } + + path_matcher { + name = "allpaths2" + + default_route_action { + cors_policy { + allow_credentials = false + allow_headers = ["Allowed content updated"] + allow_methods = ["PUT"] + allow_origin_regexes = ["abcdef.*"] + allow_origins = ["Allowed origin updated"] + expose_headers = ["Exposed header updated"] + max_age = 31 + disabled = false + } + fault_injection_policy { + abort { + http_status = 235 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 40000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home.self_link + } + retry_policy { + num_retries = 5 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = google_compute_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMeUpdated"] + request_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMeUpdated"] + response_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = true + } + } + } + } + } + + test { + service = google_compute_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + + +func testAccComputeUrlMap_defaultRouteActionTrafficDirector(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + + default_route_action { + cors_policy { + allow_credentials = true + allow_headers = ["Allowed content"] + allow_methods = ["GET"] + allow_origin_regexes = ["abc.*"] + allow_origins = ["Allowed origin"] + expose_headers = ["Exposed header"] + max_age = 30 + disabled = true + } + fault_injection_policy { + abort { + http_status = 234 + percentage = 5.6 + } + delay { + fixed_delay { + seconds = 0 + nanos = 50000 + } + percentage = 7.8 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home.self_link + } + retry_policy { + num_retries = 4 + per_try_timeout { + seconds = 30 + } + retry_conditions = ["5xx", "deadline-exceeded"] + } + timeout { + seconds = 20 + nanos = 750000000 + } + url_rewrite { + host_rewrite = "dev.example.com" + path_prefix_rewrite = "/v1/api/" + } + weighted_backend_services { + backend_service = google_compute_backend_service.home.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMe"] + request_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = true + } + response_headers_to_remove = ["RemoveMe"] + response_headers_to_add { + header_name = "AddMe" + header_value = "MyValue" + replace = false + } + } + } + } + + test { + service = google_compute_backend_service.home.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} + +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultRouteActionTrafficDirectorUpdate(bsName, hcName, umName string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "%s" + description = "a description" + + default_route_action { + cors_policy { + allow_credentials = false + allow_headers = ["Allowed content updated"] + allow_methods = ["PUT"] + allow_origin_regexes = ["abcdef.*"] + allow_origins = ["Allowed origin updated"] + expose_headers = ["Exposed header updated"] + max_age = 31 + disabled = false + } + fault_injection_policy { + abort { + http_status = 235 + percentage = 6.7 + } + delay { + fixed_delay { + seconds = 1 + nanos = 40000 + } + percentage = 8.9 + } + } + request_mirror_policy { + backend_service = google_compute_backend_service.home2.self_link + } + retry_policy { + num_retries = 5 + per_try_timeout { + seconds = 31 + } + retry_conditions = ["5xx"] + } + timeout { + seconds = 21 + nanos = 760000000 + } + url_rewrite { + host_rewrite = "stage.example.com" # updated + path_prefix_rewrite = "/v2/api/" # updated + } + weighted_backend_services { + backend_service = google_compute_backend_service.home2.self_link + weight = 400 + header_action { + request_headers_to_remove = ["RemoveMeUpdated"] + request_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = false + } + response_headers_to_remove = ["RemoveMeUpdated"] + response_headers_to_add { + header_name = "AddMeUpdated" + header_value = "MyValueUpdated" + replace = true + } + } + } + } + + test { + service = google_compute_backend_service.home2.self_link + host = "hi.com" + path = "/home" + } +} + +resource "google_compute_backend_service" "home" { + name = "%s" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_backend_service" "home2" { + name = "%s-2" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + + health_checks = [google_compute_health_check.default.self_link] + load_balancing_scheme = "INTERNAL_SELF_MANAGED" +} + +resource "google_compute_health_check" "default" { + name = "%s" + http_health_check { + port = 80 + } +} +`, umName, bsName, bsName, hcName) +} + +func testAccComputeUrlMap_defaultUrlRedirectConfig(randomSuffix string) string { + return fmt.Sprintf(` +resource "google_compute_url_map" "foobar" { + name = "urlmap-test-%s" + default_url_redirect { + https_redirect = true + strip_query = false + } +} +`, randomSuffix) +} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeUrlMap_urlMapCustomErrorResponsePolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_url_map" "urlmap" { + provider = google-beta + name = "urlmap%{random_suffix}" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 5xx responses will be catched + path = "/*" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx", "5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/login" + override_response_code = 404 + } + error_response_rule { + match_response_codes = ["503"] # Only a 503 response will be catched on path example + path = "/example" + override_response_code = 502 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["4xx"] + path = "/register" + override_response_code = 401 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "login%{random_suffix}" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "tf-test-health-check%{random_suffix}" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "tf-test-error-backend-bucket%{random_suffix}" + bucket_name = google_storage_bucket.error.name + enable_cdn = true +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "tf-test-static-asset-bucket%{random_suffix}" + location = "US" +} +`, context) +} +{{- end }} + +{{ if ne $.TargetVersionName `ga` -}} +func testAccComputeUrlMap_urlMapCustomErrorResponsePolicyUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_compute_url_map" "urlmap" { + provider = google-beta + name = "urlmap%{random_suffix}" + description = "a description" + + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx", "4xx"] # All 5xx responses will be catched + path = "/test/*" + override_response_code = 503 + } + error_service = google_compute_backend_bucket.error.id + } + + host_rule { + hosts = ["mysite.com"] + path_matcher = "mysite" + } + + path_matcher { + name = "mysite" + default_service = google_compute_backend_service.example.id + + default_custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] # All 4xx and 5xx responses will be catched on path login + path = "/*" + override_response_code = 502 + } + error_response_rule { + match_response_codes = ["4xx"] # Only a 503 response will be catched on path example + path = "/example/test" + override_response_code = 400 + } + error_service = google_compute_backend_bucket.error.id + } + + path_rule { + paths = ["/*"] + service = google_compute_backend_service.example.id + + custom_error_response_policy { + error_response_rule { + match_response_codes = ["5xx"] + path = "/register/example/*" + override_response_code = 403 + } + error_service = google_compute_backend_bucket.error.id + } + } + } +} + +resource "google_compute_backend_service" "example" { + provider = google-beta + name = "login%{random_suffix}" + port_name = "http" + protocol = "HTTP" + timeout_sec = 10 + load_balancing_scheme = "EXTERNAL_MANAGED" + + health_checks = [google_compute_http_health_check.default.id] +} + +resource "google_compute_http_health_check" "default" { + provider = google-beta + name = "tf-test-health-check%{random_suffix}" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} + +resource "google_compute_backend_bucket" "error" { + provider = google-beta + name = "tf-test-error-backend-bucket-2%{random_suffix}" + bucket_name = google_storage_bucket.error.name + enable_cdn = true + + lifecycle { + create_before_destroy = true + } +} + +resource "google_storage_bucket" "error" { + provider = google-beta + name = "tf-test-static-asset-bucket-2%{random_suffix}" + location = "US" +} +`, context) +} +{{- end }} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl index e5a7d337eafb..cc0e77f6f4ee 100644 --- a/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl +++ b/mmv1/third_party/terraform/services/firebasehosting/go/resource_firebase_hosting_site_test.go.tmpl @@ -45,6 +45,34 @@ func TestAccFirebaseHostingSite_firebasehostingSiteUpdate(t *testing.T) { }) } +func TestAccFirebaseHostingSite_firebasehostingSiteUpsert(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project_id": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + "site_id": "tf-test-site-upsert", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckFirebaseHostingSiteDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccFirebaseHostingSite_firebasehostingSiteUpsert(context), + }, + { + ResourceName: "google_firebase_hosting_site.create2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"site_id"}, + }, + }, + }) +} + + func testAccFirebaseHostingSite_firebasehostingSiteBeforeUpdate(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_firebase_web_app" "before" { @@ -79,4 +107,22 @@ resource "google_firebase_hosting_site" "update" { `, context) } +func testAccFirebaseHostingSite_firebasehostingSiteUpsert(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_firebase_hosting_site" "create" { + provider = google-beta + project = "%{project_id}" + site_id = "%{site_id}%{random_suffix}" +} + +resource "google_firebase_hosting_site" "create2" { + provider = google-beta + project = "%{project_id}" + site_id = "%{site_id}%{random_suffix}" + + depends_on = [google_firebase_hosting_site.create] +} +`, context) +} + {{ end }} \ No newline at end of file From 70941b698762c42f979bcf5ab24c70e85dc1992e Mon Sep 17 00:00:00 2001 From: abhisheksinghigoog Date: Mon, 24 Jun 2024 12:49:35 -0400 Subject: [PATCH 197/356] Add support for CMEK datasets to terraform. (#10978) --- mmv1/products/healthcare/Dataset.yaml | 24 +++++++++++++ .../examples/healthcare_dataset_cmek.tf.erb | 36 +++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 mmv1/templates/terraform/examples/healthcare_dataset_cmek.tf.erb diff --git a/mmv1/products/healthcare/Dataset.yaml b/mmv1/products/healthcare/Dataset.yaml index eb5d12034582..7538b95986f0 100644 --- a/mmv1/products/healthcare/Dataset.yaml +++ b/mmv1/products/healthcare/Dataset.yaml @@ -35,6 +35,15 @@ examples: dataset_name: 'example-dataset' location: 'us-central1' time_zone: 'America/New_York' + - !ruby/object:Provider::Terraform::Examples + name: 'healthcare_dataset_cmek' + primary_resource_id: 'default' + vars: + dataset_name: 'example-dataset' + location: 'us-central1' + time_zone: 'America/New_York' + key_name: 'example-key' + keyring_name: 'example-keyring' custom_code: !ruby/object:Provider::Terraform::CustomCode decoder: templates/terraform/decoders/long_name_to_self_link.go.erb parameters: @@ -66,3 +75,18 @@ properties: The fully qualified name of this dataset output: true ignore_read: true + - !ruby/object:Api::Type::NestedObject + name: 'encryptionSpec' + required: false + immutable: true + default_from_api: true + properties: + - !ruby/object:Api::Type::String + name: 'kmsKeyName' + description: | + KMS encryption key that is used to secure this dataset and its sub-resources. The key used for + encryption and the dataset must be in the same location. If empty, the default Google encryption + key will be used to secure this dataset. The format is + projects/{projectId}/locations/{locationId}/keyRings/{keyRingId}/cryptoKeys/{keyId}. + required: false + immutable: true diff --git a/mmv1/templates/terraform/examples/healthcare_dataset_cmek.tf.erb b/mmv1/templates/terraform/examples/healthcare_dataset_cmek.tf.erb new file mode 100644 index 000000000000..f841ed515a0f --- /dev/null +++ b/mmv1/templates/terraform/examples/healthcare_dataset_cmek.tf.erb @@ -0,0 +1,36 @@ +data "google_project" "project" {} + +resource "google_healthcare_dataset" "default" { + name = "<%= ctx[:vars]['dataset_name'] %>" + location = "us-central1" + time_zone = "UTC" + + encryption_spec { + kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [ + google_kms_crypto_key_iam_binding.healthcare_cmek_keyuser + ] +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "<%= ctx[:vars]['key_name'] %>" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + name = "<%= ctx[:vars]['keyring_name'] %>" + location = "us-central1" +} + +resource "google_kms_crypto_key_iam_binding" "healthcare_cmek_keyuser" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + ] +} + + From 3629cbd21eba2e3b718375a3a922061fab32a518 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 24 Jun 2024 13:34:27 -0500 Subject: [PATCH 198/356] Update version_6_upgrade.html.markdown (#11025) --- .../website/docs/guides/version_6_upgrade.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown index b0530361aeb0..a32f7fa8fb58 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown @@ -1,7 +1,7 @@ --- -page_title: "Terraform Google Provider 6.0.0 Upgrade Guide" +page_title: "Terraform provider for Google Cloud 6.0.0 Upgrade Guide" description: |- - Terraform Google Provider 6.0.0 Upgrade Guide + Terraform provider for Google Cloud 6.0.0 Upgrade Guide --- # Terraform Google Provider 6.0.0 Upgrade Guide From 58298c8861fc9095ddfe3d3ce36719987f482dcb Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Mon, 24 Jun 2024 12:25:27 -0700 Subject: [PATCH 199/356] Custom Update ordering diffs (#11026) --- mmv1/api/resource.go | 19 +++++++++++++++++++ mmv1/templates/terraform/resource.go.tmpl | 15 ++++++++------- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index b5002699a107..ea122cd6ef82 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -17,6 +17,7 @@ import ( "maps" "regexp" "strings" + "sort" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/resource" @@ -1467,6 +1468,24 @@ func (r Resource) PropertiesByCustomUpdate() map[UpdateGroup][]*Type { return groupedCustomUpdateProps } +func (r Resource) PropertiesByCustomUpdateGroups() []UpdateGroup { + customUpdateProps := r.propertiesWithCustomUpdate(r.RootProperties()) + var updateGroups []UpdateGroup + for _, prop := range customUpdateProps { + groupedProperty := UpdateGroup{UpdateUrl: prop.UpdateUrl, + UpdateVerb: prop.UpdateVerb, + UpdateId: prop.UpdateId, + FingerprintName: prop.FingerprintName} + + if slices.Contains(updateGroups, groupedProperty){ + continue + } + updateGroups = append(updateGroups, groupedProperty) + } + sort.Slice(updateGroups, func(i, j int) bool { return updateGroups[i].UpdateId < updateGroups[i].UpdateId }) + return updateGroups +} + func (r Resource) FieldSpecificUpdateMethods() bool { return (len(r.PropertiesByCustomUpdate()) > 0) } diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index a4442d3436f0..62406ad197e8 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -830,10 +830,11 @@ if len(updateMask) > 0 { {{- end}}{{/*if not immutable*/}} {{ if $.FieldSpecificUpdateMethods }} d.Partial(true) -{{ range $index, $props := $.PropertiesByCustomUpdate }} -if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\""}}") { +{{ $CustomUpdateProps := $.PropertiesByCustomUpdate }} +{{ range $group := $.PropertiesByCustomUpdateGroups }} +if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $group)) "\") || d.HasChange(\""}}") { obj := make(map[string]interface{}) -{{ if $index.FingerprintName }} +{{ if $group.FingerprintName }} getUrl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}") if err != nil { return err @@ -866,10 +867,10 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("{{ $.ResourceName -}} %q", d.Id())) } - obj["{{ $index.FingerprintName }}"] = getRes["{{ $index.FingerprintName }}"] + obj["{{ $group.FingerprintName }}"] = getRes["{{ $group.FingerprintName }}"] {{ end }}{{/*if FingerprintName*/}} -{{ range $propsByKey := $.CustomUpdatePropertiesByKey $index.UpdateUrl $index.UpdateId $index.FingerprintName $index.UpdateVerb }} +{{ range $propsByKey := $.CustomUpdatePropertiesByKey $group.UpdateUrl $group.UpdateId $group.FingerprintName $group.UpdateVerb }} {{ $propsByKey.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $propsByKey.Name "upper" -}}({{ if $propsByKey.FlattenObject }}nil{{else}}d.Get("{{underscore $propsByKey.Name}}"){{ end }}, d, config) if err != nil { return err @@ -916,7 +917,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" transport_tpg.MutexStore.Lock(lockName) defer transport_tpg.MutexStore.Unlock(lockName) {{- end}} - url, err := tpgresource.ReplaceVars{{if $.LegacyLongFormProject -}}ForId{{ end -}}(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{ $index.UpdateUrl }}") + url, err := tpgresource.ReplaceVars{{if $.LegacyLongFormProject -}}ForId{{ end -}}(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{ $group.UpdateUrl }}") if err != nil { return err } @@ -939,7 +940,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings $props) "\") || d.HasChange(\" res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, - Method: "{{ $index.UpdateVerb }}", + Method: "{{ $group.UpdateVerb }}", Project: billingProject, RawURL: url, UserAgent: userAgent, From 4b0dfcccb2a48194da4ef723ced44c99f87d5903 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Mon, 24 Jun 2024 13:33:20 -0700 Subject: [PATCH 200/356] go rewrite: Fixing custom update whitespace diffs and a build failure case (#11029) --- mmv1/templates/terraform/resource.go.tmpl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 62406ad197e8..f1fce957a967 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -893,9 +893,9 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $gro -*/}} {{- if $propsByKey.SendEmptyValue -}} } else if v, ok := d.GetOkExists("{{ underscore $propsByKey.Name -}}"); ok || !reflect.DeepEqual(v, {{ $propsByKey.ApiName -}}Prop) { -{{ else if $propsByKey.FlattenObject -}} +{{- else if $propsByKey.FlattenObject -}} } else if !tpgresource.IsEmptyValue(reflect.ValueOf({{ $propsByKey.ApiName -}}Prop)) { -{{ else -}} +{{- else -}} } else if v, ok := d.GetOkExists("{{ underscore $propsByKey.Name -}}"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, {{ $propsByKey.ApiName -}}Prop)) { {{- end}} obj["{{ $propsByKey.ApiName -}}"] = {{ $propsByKey.ApiName -}}Prop @@ -960,7 +960,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $gro log.Printf("[DEBUG] Finished updating {{ $.Name }} %q: %#v", d.Id(), res) } -{{ if $.GetAsync.Allow "update" -}} +{{ if and ($.GetAsync) ($.GetAsync.Allow "update") -}} {{ if $.GetAsync.IsA "OpAsync" -}} err = {{ $.ClientNamePascal -}}OperationWaitTime( config, res, {{if or $.HasProject $.GetAsync.IncludeProject -}} {{if $.LegacyLongFormProject -}}tpgresource.GetResourceNameFromSelfLink(project){{ else }}project{{ end }}, {{ end -}} "Updating {{ $.Name -}}", userAgent, From cf92c508dd4e200ce751d2e652a7414ad2ce2258 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Mon, 24 Jun 2024 13:40:16 -0700 Subject: [PATCH 201/356] Make region from zone functions support more zone names (#10714) --- .../terraform/functions/region_from_zone.go | 18 ++++++++++-------- .../third_party/terraform/tpgresource/utils.go | 11 +++++------ .../tests/data/example_pubsub_lite_topic.tf | 2 +- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/mmv1/third_party/terraform/functions/region_from_zone.go b/mmv1/third_party/terraform/functions/region_from_zone.go index d1ba3104e88c..1a75b751169b 100644 --- a/mmv1/third_party/terraform/functions/region_from_zone.go +++ b/mmv1/third_party/terraform/functions/region_from_zone.go @@ -3,6 +3,7 @@ package functions import ( "context" "fmt" + "strings" "github.com/hashicorp/terraform-plugin-framework/function" ) @@ -35,23 +36,24 @@ func (f RegionFromZoneFunction) Definition(ctx context.Context, req function.Def func (f RegionFromZoneFunction) Run(ctx context.Context, req function.RunRequest, resp *function.RunResponse) { // Load arguments from function call - var arg0 string - resp.Error = function.ConcatFuncErrors(req.Arguments.GetArgument(ctx, 0, &arg0)) + var zone string + resp.Error = function.ConcatFuncErrors(req.Arguments.GetArgument(ctx, 0, &zone)) if resp.Error != nil { return } - if arg0 == "" { + if zone == "" { err := function.NewArgumentFuncError(0, "The input string cannot be empty.") resp.Error = function.ConcatFuncErrors(err) return } - if arg0[len(arg0)-2] != '-' { - err := function.NewArgumentFuncError(0, fmt.Sprintf("The input string \"%s\" is not a valid zone name.", arg0)) + zoneParts := strings.Split(zone, "-") + + if len(zoneParts) < 3 { + err := function.NewArgumentFuncError(0, fmt.Sprintf("The input string \"%s\" is not a valid zone name.", zone)) resp.Error = function.ConcatFuncErrors(err) - return + } else { + resp.Error = function.ConcatFuncErrors(resp.Result.Set(ctx, strings.Join(zoneParts[:len(zoneParts)-1], "-"))) } - - resp.Error = function.ConcatFuncErrors(resp.Result.Set(ctx, arg0[:len(arg0)-2])) } diff --git a/mmv1/third_party/terraform/tpgresource/utils.go b/mmv1/third_party/terraform/tpgresource/utils.go index 4d16bc9a15eb..a86a88d98a0e 100644 --- a/mmv1/third_party/terraform/tpgresource/utils.go +++ b/mmv1/third_party/terraform/tpgresource/utils.go @@ -58,15 +58,14 @@ type TerraformResourceDiff interface { // Contains functions that don't really belong anywhere else. // GetRegionFromZone returns the region from a zone for Google cloud. -// This is by removing the last two chars from the zone name to leave the region -// If there aren't enough characters in the input string, an empty string is returned +// This is by removing the characters after the last '-'. // e.g. southamerica-west1-a => southamerica-west1 func GetRegionFromZone(zone string) string { - if zone != "" && len(zone) > 2 { - region := zone[:len(zone)-2] - return region + zoneParts := strings.Split(zone, "-") + if len(zoneParts) < 3 { + return "" } - return "" + return strings.Join(zoneParts[:len(zoneParts)-1], "-") } // Infers the region based on the following (in order of priority): diff --git a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.tf b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.tf index 6697b15713e8..47607a535ce8 100644 --- a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.tf +++ b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.tf @@ -29,7 +29,7 @@ provider "google" { resource "google_pubsub_lite_topic" "example" { name = "example-topic" - zone = "us-central1a" + zone = "us-central1-a" partition_config { count = 1 From 4abf95bc8d1b3451af255c59267abd98618838b5 Mon Sep 17 00:00:00 2001 From: Lingkai Shen Date: Mon, 24 Jun 2024 18:35:16 -0400 Subject: [PATCH 202/356] Fix https://github.com/hashicorp/terraform-provider-google/issues/18085 (#11031) --- mmv1/products/identityplatform/Config.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/products/identityplatform/Config.yaml b/mmv1/products/identityplatform/Config.yaml index 97bfb4f98354..1a8ed31bceac 100644 --- a/mmv1/products/identityplatform/Config.yaml +++ b/mmv1/products/identityplatform/Config.yaml @@ -233,6 +233,7 @@ properties: name: 'smsRegionConfig' description: | Configures the regions where users are allowed to send verification SMS for the project or tenant. This is based on the calling code of the destination phone number. + default_from_api: true properties: - !ruby/object:Api::Type::NestedObject name: 'allowByDefault' From 7da2a24447ba38f3b52194b3f56ba02d1346890c Mon Sep 17 00:00:00 2001 From: Maeve <167252720+maemayve@users.noreply.github.com> Date: Tue, 25 Jun 2024 06:30:54 -0700 Subject: [PATCH 203/356] Gcf gen2 base image update (#10627) --- mmv1/products/cloudfunctions2/Function.yaml | 71 ++++++++ ...oudfunctions2_runtime_update_policy.go.erb | 15 ++ .../examples/cloudfunctions2_abiu.tf.erb | 72 ++++++++ .../cloudfunctions2_abiu_on_deploy.tf.erb | 72 ++++++++ .../resource_cloudfunctions2_function_test.go | 169 ++++++++++++++++++ 5 files changed, 399 insertions(+) create mode 100644 mmv1/templates/terraform/encoders/cloudfunctions2_runtime_update_policy.go.erb create mode 100644 mmv1/templates/terraform/examples/cloudfunctions2_abiu.tf.erb create mode 100644 mmv1/templates/terraform/examples/cloudfunctions2_abiu_on_deploy.tf.erb diff --git a/mmv1/products/cloudfunctions2/Function.yaml b/mmv1/products/cloudfunctions2/Function.yaml index 1027898e7e16..1f2fe20fb289 100644 --- a/mmv1/products/cloudfunctions2/Function.yaml +++ b/mmv1/products/cloudfunctions2/Function.yaml @@ -51,6 +51,8 @@ import_format: ['projects/{{project}}/locations/{{location}}/functions/{{name}}'] taint_resource_on_failed_create: true autogen_async: true +custom_code: !ruby/object:Provider::Terraform::CustomCode + encoder: 'templates/terraform/encoders/cloudfunctions2_runtime_update_policy.go.erb' examples: - !ruby/object:Provider::Terraform::Examples name: 'cloudfunctions2_basic' @@ -277,6 +279,48 @@ examples: unencoded-ar-repo: 'ar-repo' kms_key_name: 'cmek-key' project: 'my-project-name' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudfunctions2_abiu' + primary_resource_id: 'function' + min_version: beta + vars: + bucket_name: 'gcf-source' + service_account: 'gcf-sa' + topic: 'functions2-topic' + function: 'gcf-function' + zip_path: 'function-source.zip' + test_env_vars: + project: :PROJECT_NAME + test_vars_overrides: + zip_path: '"./test-fixtures/function-source-pubsub.zip"' + primary_resource_id: '"terraform-test"' + location: + '"europe-west6"' + # ignore these fields during import step + ignore_read_extra: + - 'build_config.0.source.0.storage_source.0.object' + - 'build_config.0.source.0.storage_source.0.bucket' + - !ruby/object:Provider::Terraform::Examples + name: 'cloudfunctions2_abiu_on_deploy' + primary_resource_id: 'function' + min_version: beta + vars: + bucket_name: 'gcf-source' + service_account: 'gcf-sa' + topic: 'functions2-topic' + function: 'gcf-function' + zip_path: 'function-source.zip' + test_env_vars: + project: :PROJECT_NAME + test_vars_overrides: + zip_path: '"./test-fixtures/function-source-pubsub.zip"' + primary_resource_id: '"terraform-test"' + location: + '"europe-west6"' + # ignore these fields during import step + ignore_read_extra: + - 'build_config.0.source.0.storage_source.0.object' + - 'build_config.0.source.0.storage_source.0.bucket' iam_policy: !ruby/object:Api::Resource::IamPolicy parent_resource_attribute: 'cloud_function' method_name_separator: ':' @@ -448,6 +492,33 @@ properties: name: 'serviceAccount' description: 'The fully-qualified name of the service account to be used for building the container.' default_from_api: true + - !ruby/object:Api::Type::NestedObject + name: 'automaticUpdatePolicy' + description: | + Security patches are applied automatically to the runtime without requiring + the function to be redeployed. + exactly_one_of: + - automatic_update_policy + - on_deploy_update_policy + send_empty_value: true + allow_empty_object: true + default_from_api: true + properties: [] + - !ruby/object:Api::Type::NestedObject + name: 'onDeployUpdatePolicy' + description: | + Security patches are only applied when a function is redeployed. + exactly_one_of: + - automatic_update_policy + - on_deploy_update_policy + send_empty_value: true + allow_empty_object: true + properties: + - !ruby/object:Api::Type::String + name: 'runtimeVersion' + output: true + description: | + The runtime version which was used during latest function deployment. - !ruby/object:Api::Type::NestedObject name: 'serviceConfig' description: 'Describes the Service being deployed.' diff --git a/mmv1/templates/terraform/encoders/cloudfunctions2_runtime_update_policy.go.erb b/mmv1/templates/terraform/encoders/cloudfunctions2_runtime_update_policy.go.erb new file mode 100644 index 000000000000..db4ef3e273ee --- /dev/null +++ b/mmv1/templates/terraform/encoders/cloudfunctions2_runtime_update_policy.go.erb @@ -0,0 +1,15 @@ +if obj == nil || obj["buildConfig"] == nil { + return obj, nil +} + +build_config := obj["buildConfig"].(map[string]interface{}) + +// Automatic Update policy is the default from API, unset it if the data +// contains the on-deploy policy. +if build_config["onDeployUpdatePolicy"] != nil { + delete(build_config, "automaticUpdatePolicy") +} + +obj["buildConfig"] = build_config + +return obj, nil diff --git a/mmv1/templates/terraform/examples/cloudfunctions2_abiu.tf.erb b/mmv1/templates/terraform/examples/cloudfunctions2_abiu.tf.erb new file mode 100644 index 000000000000..be7a2a08ac52 --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudfunctions2_abiu.tf.erb @@ -0,0 +1,72 @@ +locals { + project = "<%= ctx[:test_env_vars]['project'] %>" # Google Cloud Platform Project ID +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "<%= ctx[:vars]['service_account'] %>" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "topic" { + provider = google-beta + name = "<%= ctx[:vars]['topic'] %>" +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + name = "${local.project}-<%= ctx[:vars]['bucket_name'] %>" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "<%= ctx[:vars]['zip_path'] %>" # Add path to the zipped function source code +} + +resource "google_cloudfunctions2_function" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['function'] %>" + location = "europe-west6" + description = "a new function" + + build_config { + runtime = "nodejs16" + entry_point = "helloPubSub" # Set the entry point + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + automatic_update_policy {} + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "4Gi" + timeout_seconds = 60 + max_instance_request_concurrency = 80 + available_cpu = "4" + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.topic.id + retry_policy = "RETRY_POLICY_RETRY" + } +} diff --git a/mmv1/templates/terraform/examples/cloudfunctions2_abiu_on_deploy.tf.erb b/mmv1/templates/terraform/examples/cloudfunctions2_abiu_on_deploy.tf.erb new file mode 100644 index 000000000000..61ca7d460ce7 --- /dev/null +++ b/mmv1/templates/terraform/examples/cloudfunctions2_abiu_on_deploy.tf.erb @@ -0,0 +1,72 @@ +locals { + project = "<%= ctx[:test_env_vars]['project'] %>" # Google Cloud Platform Project ID +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "<%= ctx[:vars]['service_account'] %>" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "topic" { + provider = google-beta + name = "<%= ctx[:vars]['topic'] %>" +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + name = "${local.project}-<%= ctx[:vars]['bucket_name'] %>" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "<%= ctx[:vars]['zip_path'] %>" # Add path to the zipped function source code +} + +resource "google_cloudfunctions2_function" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['function'] %>" + location = "europe-west6" + description = "a new function" + + build_config { + runtime = "nodejs16" + entry_point = "helloPubSub" # Set the entry point + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + on_deploy_update_policy {} + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "4Gi" + timeout_seconds = 60 + max_instance_request_concurrency = 80 + available_cpu = "4" + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.topic.id + retry_policy = "RETRY_POLICY_RETRY" + } +} diff --git a/mmv1/third_party/terraform/services/cloudfunctions2/resource_cloudfunctions2_function_test.go b/mmv1/third_party/terraform/services/cloudfunctions2/resource_cloudfunctions2_function_test.go index ab82a584fbca..65e41ed25ee7 100644 --- a/mmv1/third_party/terraform/services/cloudfunctions2/resource_cloudfunctions2_function_test.go +++ b/mmv1/third_party/terraform/services/cloudfunctions2/resource_cloudfunctions2_function_test.go @@ -125,6 +125,7 @@ resource "google_cloudfunctions2_function" "terraform-test2" { object = google_storage_bucket_object.object.name } } + on_deploy_update_policy {} } service_config { @@ -319,3 +320,171 @@ resource "google_cloudfunctions2_function" "function" { } }`, context) } + +func TestAccCloudFunctions2Function_updateAbiuFull(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "zip_path": "./test-fixtures/function-source.zip", + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckCloudfunctions2functionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudfunctions2function_abiuBasic(context), + }, + { + ResourceName: "google_cloudfunctions2_function.terraform-test2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket", "labels", "terraform_labels"}, + }, + { + Config: testAccCloudFunctions2Function_test_abiuUpdate(context), + }, + { + ResourceName: "google_cloudfunctions2_function.terraform-test2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket", "labels", "terraform_labels"}, + }, + { + Config: testAccCloudFunctions2Function_test_abiuUpdate2(context), + }, + { + ResourceName: "google_cloudfunctions2_function.terraform-test2", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "build_config.0.source.0.storage_source.0.object", "build_config.0.source.0.storage_source.0.bucket", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccCloudfunctions2function_abiuBasic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "%{zip_path}" +} + +resource "google_cloudfunctions2_function" "terraform-test2" { + name = "tf-test-test-function%{random_suffix}" + location = "us-central1" + description = "a new function" + labels = { + env = "test" + } + + build_config { + runtime = "nodejs12" + entry_point = "helloHttp" + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + automatic_update_policy {} + } + + service_config { + max_instance_count = 1 + available_memory = "1536Mi" + timeout_seconds = 30 + } +} +`, context) +} + +func testAccCloudFunctions2Function_test_abiuUpdate(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "%{zip_path}" +} + +resource "google_cloudfunctions2_function" "terraform-test2" { + name = "tf-test-test-function%{random_suffix}" + location = "us-central1" + description = "an updated function with automatic runtime update specified" + labels = { + env = "test-update" + } + + build_config { + runtime = "nodejs12" + entry_point = "helloHttp" + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + on_deploy_update_policy {} + } + + service_config { + min_instance_count = 1 + } +} +`, context) +} + +func testAccCloudFunctions2Function_test_abiuUpdate2(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-cloudfunctions2-function-bucket%{random_suffix}" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "%{zip_path}" +} + +resource "google_cloudfunctions2_function" "terraform-test2" { + name = "tf-test-test-function%{random_suffix}" + location = "us-central1" + description = "an updated function with no runtime update specified" + labels = { + env = "test-update" + } + + build_config { + runtime = "nodejs12" + entry_point = "helloHttp" + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + } + + service_config { + min_instance_count = 1 + } +} +`, context) +} From 52f46e43735ede0ecd2ac5edccc0ab8ad5becb28 Mon Sep 17 00:00:00 2001 From: Douglas Bunker Date: Tue, 25 Jun 2024 08:22:19 -0700 Subject: [PATCH 204/356] Fix typos in docker image data source docs (#11033) --- .../docs/d/artifact_registry_docker_image.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown b/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown index 4065bd033515..62bed15ebcd0 100644 --- a/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/artifact_registry_docker_image.html.markdown @@ -21,9 +21,9 @@ resource "google_artifact_registry_repository" "my_repo" { } data "google_artifact_registry_docker_image" "my_image" { - repository = google_artifact_registry_repository.my_repo.id - image = "my-image" - tag = "my-tag" + location = google_artifact_registry_repository.my_repo.location + repository_id = google_artifact_registry_repository.my_repo.repository_id + image = "my-image:my-tag" } resource "google_cloud_run_v2_service" "default" { @@ -43,7 +43,7 @@ The following arguments are supported: * `location` - (Required) The location of the artifact registry. -* `repository_id` - (Required) The last part of the repository name. to fetch from. +* `repository_id` - (Required) The last part of the repository name to fetch from. * `image_name` - (Required) The image name to fetch. If no digest or tag is provided, then the latest modified image will be used. From b67395c3604f75aab6946fcb30ec952a408ef122 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Tue, 25 Jun 2024 08:26:32 -0700 Subject: [PATCH 205/356] Corrected us-central1a usage in TGC tests (#11037) --- .../tgc/tests/data/example_pubsub_lite_subscription.json | 4 ++-- .../tgc/tests/data/example_pubsub_lite_subscription.tf | 2 +- .../third_party/tgc/tests/data/example_pubsub_lite_topic.json | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.json b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.json index f3525a545514..38d12cb5204f 100644 --- a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.json +++ b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.json @@ -1,6 +1,6 @@ [ { - "name": "//pubsublite.googleapis.com/projects/{{.Provider.project}}/locations/us-central1a/subscriptions/example-subscription", + "name": "//pubsublite.googleapis.com/projects/{{.Provider.project}}/locations/us-central1-a/subscriptions/example-subscription", "asset_type": "pubsublite.googleapis.com/Subscription", "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", "resource": { @@ -12,7 +12,7 @@ "deliveryConfig": { "deliveryRequirement": "DELIVER_AFTER_STORED" }, - "topic": "projects/{{.Provider.project}}/locations/us-central1a/topics/my-topic" + "topic": "projects/{{.Provider.project}}/locations/us-central1-a/topics/my-topic" } } } diff --git a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.tf b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.tf index 8a99f4e498be..0936d1eaab69 100644 --- a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.tf +++ b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_subscription.tf @@ -30,7 +30,7 @@ provider "google" { resource "google_pubsub_lite_subscription" "example" { name = "example-subscription" topic = "my-topic" - zone = "us-central1a" + zone = "us-central1-a" delivery_config { delivery_requirement = "DELIVER_AFTER_STORED" } diff --git a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.json b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.json index 0edf91c77a40..89dd6976827a 100644 --- a/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.json +++ b/mmv1/third_party/tgc/tests/data/example_pubsub_lite_topic.json @@ -1,6 +1,6 @@ [ { - "name": "//pubsublite.googleapis.com/projects/{{.Provider.project}}/locations/us-central1a/topics/example-topic", + "name": "//pubsublite.googleapis.com/projects/{{.Provider.project}}/locations/us-central1-a/topics/example-topic", "asset_type": "pubsublite.googleapis.com/Topic", "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", "resource": { @@ -17,7 +17,7 @@ "count": 1 }, "reservationConfig": { - "throughputReservation": "projects/{{.Provider.project}}/locations/us-central/reservations/example-reservation" + "throughputReservation": "projects/{{.Provider.project}}/locations/us-central1/reservations/example-reservation" }, "retentionConfig": { "perPartitionBytes": "32212254720" From 05168ff944d3b06a68a1e6a862c97b235338bbbb Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Tue, 25 Jun 2024 09:02:57 -0700 Subject: [PATCH 206/356] chore(ci): fix test_terraform_vcr test analytics template (#11034) --- .ci/magician/cmd/test_terraform_vcr_test.go | 6 +++--- .ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.ci/magician/cmd/test_terraform_vcr_test.go b/.ci/magician/cmd/test_terraform_vcr_test.go index 773c28c7e2ef..604ec2feb06c 100644 --- a/.ci/magician/cmd/test_terraform_vcr_test.go +++ b/.ci/magician/cmd/test_terraform_vcr_test.go @@ -237,7 +237,7 @@ func TestAnalyticsComment(t *testing.T) { want: strings.Join( []string{ "#### Tests analytics", - "Total tests: 7", + "Total tests: 6", "Passed tests: 3", "Skipped tests: 2", "Affected tests: 1", @@ -268,7 +268,7 @@ func TestAnalyticsComment(t *testing.T) { want: strings.Join( []string{ "#### Tests analytics", - "Total tests: 7", + "Total tests: 6", "Passed tests: 3", "Skipped tests: 2", "Affected tests: 1", @@ -302,7 +302,7 @@ func TestAnalyticsComment(t *testing.T) { }, want: strings.Join([]string{ "#### Tests analytics", - "Total tests: 7", + "Total tests: 6", "Passed tests: 3", "Skipped tests: 2", "Affected tests: 1", diff --git a/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl b/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl index 391c07d05213..9c4006541946 100644 --- a/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl +++ b/.ci/magician/cmd/test_terraform_vcr_test_analytics.tmpl @@ -1,5 +1,5 @@ #### Tests analytics -Total tests: {{add (add (len .ReplayingResult.PassedTests) (len .ReplayingResult.PassedTests)) (len .ReplayingResult.FailedTests) }} +Total tests: {{add (add (len .ReplayingResult.PassedTests) (len .ReplayingResult.SkippedTests)) (len .ReplayingResult.FailedTests) }} Passed tests: {{len .ReplayingResult.PassedTests}} Skipped tests: {{len .ReplayingResult.SkippedTests}} Affected tests: {{len .ReplayingResult.FailedTests}} From 1c89cc941c82ee1bfad64e657206f91a3bd25338 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 25 Jun 2024 11:17:49 -0500 Subject: [PATCH 207/356] Update upgrade guide instructions (#11028) --- .../make-a-breaking-change.md | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/docs/content/develop/breaking-changes/make-a-breaking-change.md b/docs/content/develop/breaking-changes/make-a-breaking-change.md index a7ead0be56bf..3282c5ed83e9 100644 --- a/docs/content/develop/breaking-changes/make-a-breaking-change.md +++ b/docs/content/develop/breaking-changes/make-a-breaking-change.md @@ -62,7 +62,7 @@ The general process for contributing a breaking change to the 1. Make the `main` branch forwards-compatible with the major release 2. Add deprecations and warnings to the `main` branch of `magic-modules` -3. Add upgrade guide entries to the `main` branch of `magic-modules` +3. Add upgrade guide entries to the `FEATURE-BRANCH-major-release-6.0.0` branch of `magic-modules` 4. Make the breaking change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` These are covered in more detail in the following sections. The upgrade guide @@ -169,20 +169,7 @@ The deprecation message will automatically show up in the resource documentation Other breaking changes should be called out in the docs for the impacted field or resource. It is also great to log warnings at runtime if possible. -### Add upgrade guide entries to the `main` branch of `magic-modules` - -Upgrade guide entries should be added to -[{{< param upgradeGuide >}}](https://github.com/GoogleCloudPlatform/magic-modules/blob/main/mmv1/third_party/terraform/website/docs/guides/{{< param upgradeGuide >}}). -Entries should focus on the changes that users need to make when upgrading -to `{{% param "majorVersion" %}}`, rather than how to write configurations -after upgrading. - -See [Terraform provider for Google Cloud 5.0.0 Upgrade Guide](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version_5_upgrade) -and other upgrade guides for examples. - -The upgrade guide and the actual breaking change will be merged only after both are completed. - -### Make the breaking change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` +### Make the change on `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` When working on your breaking change, make sure that your base branch is `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}`. This @@ -207,14 +194,17 @@ with the following changes: are present on the major release branch. Changes to the `main` branch will be merged into the major release branch every Monday. 1. Make the breaking change. +1. Add the upgrade guide entries to +[{{< param upgradeGuide >}}](https://github.com/GoogleCloudPlatform/magic-modules/blob/FEATURE-BRANCH-major-release-6.0.0/mmv1/third_party/terraform/website/docs/guides/{{< param upgradeGuide >}}). Entries should focus on the changes that users need to make when upgrading +to `{{% param "majorVersion" %}}`, rather than how to write configurations +after upgrading. See [Terraform provider for Google Cloud 5.0.0 Upgrade Guide](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/version_5_upgrade) +and other upgrade guides for examples. 1. Remove any deprecation notices and warnings (including in documentation) not already removed by the breaking change. 1. When you create your pull request, [change the base branch](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-base-branch-of-a-pull-request) to `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` 1. To resolve merge conflicts with `git rebase` or `git merge`, use `FEATURE-BRANCH-major-release-{{% param "majorVersion" %}}` instead of `main`. -The upgrade guide and the actual breaking change will be merged only after both are completed. - ## What's next? - [Run tests]({{< ref "/develop/test/run-tests.md" >}}) From 4a08ebfe774137cf419f506ed429650bce212aae Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 25 Jun 2024 09:29:26 -0700 Subject: [PATCH 208/356] Remove diffs in iam documentations (#11027) --- mmv1/api/resource.go | 28 +++++++++++++------ .../terraform/resource_iam.html.markdown.tmpl | 17 ++++++----- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index ea122cd6ef82..319b62956127 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -985,15 +985,9 @@ func ImportIdFormats(importFormat, identity []string, baseUrl string) []string { // `{{project}}/{{%name}}` as there is no way to differentiate between // project-name/resource-name and resource-name/with-slash if !strings.Contains(idFormats[0], "%") { - idFormats = append(idFormats, shortIdFormat, shortIdDefaultProjectFormat) - if shortIdDefaultProjectFormat != shortIdDefaultFormat { - idFormats = append(idFormats, shortIdDefaultFormat) - } + idFormats = append(idFormats, shortIdFormat, shortIdDefaultProjectFormat, shortIdDefaultFormat) } - idFormats = google.Reject(slices.Compact(idFormats), func(i string) bool { - return i == "" - }) slices.SortFunc(idFormats, func(a, b string) int { i := strings.Count(a, "/") j := strings.Count(b, "/") @@ -1003,7 +997,25 @@ func ImportIdFormats(importFormat, identity []string, baseUrl string) []string { return i - j }) slices.Reverse(idFormats) - return idFormats + + // Remove duplicates from idFormats + uniq := make([]string, len(idFormats)) + uniq[0] = idFormats[0] + i := 1 + j := 1 + for j < len(idFormats) { + format := idFormats[j] + if format != uniq[i-1] { + uniq[i] = format + i++ + } + j++ + } + + uniq = google.Reject(slices.Compact(uniq), func(i string) bool { + return i == "" + }) + return uniq } func (r Resource) IgnoreReadPropertiesToString(e resource.Examples) string { diff --git a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl index 88bd230b9359..5ff2c6cc9dd0 100644 --- a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl @@ -103,7 +103,7 @@ With IAM Conditions: ```hcl data "google_iam_policy" "admin" { -{{ if eq $.MinVersionObj.Name "beta" }} +{{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta {{- end }} binding { @@ -135,7 +135,7 @@ resource "{{ $.IamTerraformName }}_policy" "policy" { resource "{{ $.IamTerraformName }}_binding" "binding" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta -{{- end -}} +{{- end }} {{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" members = [ @@ -169,22 +169,22 @@ resource "{{ $.IamTerraformName }}_binding" "binding" { ```hcl resource "{{ $.IamTerraformName }}_member" "member" { -{{- if eq $.MinVersionObj.Name "beta" -}} +{{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta -{{- end -}} +{{- end }} {{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole }}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" member = "user:jane@example.com" } ``` -{{ if $.IamPolicy.IamConditionsRequestType -}} +{{ if $.IamPolicy.IamConditionsRequestType }} With IAM Conditions: ```hcl resource "{{ $.IamTerraformName }}_member" "member" { {{- if eq $.MinVersionObj.Name "beta" }} provider = google-beta -{{- end -}} +{{- end }} {{- $.CustomTemplate $.IamPolicy.ExampleConfigBody false }} role = "{{if $.IamPolicy.AdminIamRole}}{{$.IamPolicy.AdminIamRole}}{{else}}{{$.IamPolicy.AllowedIamRole}}{{end}}" member = "user:jane@example.com" @@ -245,8 +245,7 @@ The following arguments are supported: * `policy_data` - (Required only by `{{ $.IamTerraformName }}_policy`) The policy data generated by a `google_iam_policy` data source. - -{{ if $.IamPolicy.IamConditionsRequestType -}} +{{ if $.IamPolicy.IamConditionsRequestType }} * `condition` - (Optional) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding. Structure is documented below. @@ -263,7 +262,7 @@ The `condition` block supports: ~> **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will consider it to be an entirely different resource and will treat it as such. -{{- end -}} +{{- end }} ## Attributes Reference In addition to the arguments listed above, the following computed attributes are From f66e4d6ba8fa7a72e2672bfe24b425d1b4afe90e Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 25 Jun 2024 12:43:38 -0500 Subject: [PATCH 209/356] go rewrite - compute documentation diffs (#11021) --- mmv1/api/resource.go | 5 +- mmv1/google/template_utils.go | 59 +++++++++++++++++++ ..._property_documentation.html.markdown.tmpl | 44 +++++++++----- .../property_documentation.html.markdown.tmpl | 10 ++-- .../terraform/resource.html.markdown.tmpl | 40 +++++++------ 5 files changed, 121 insertions(+), 37 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 319b62956127..865a4877728b 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1391,6 +1391,9 @@ func (r Resource) GetPropertyUpdateMasksGroups(properties []*Type, maskPrefix st // Formats whitespace in the style of the old Ruby generator's descriptions in documentation func (r Resource) FormatDocDescription(desc string, indent bool) string { + if desc == "" { + return "" + } returnString := desc if indent { returnString = strings.ReplaceAll(returnString, "\n\n", "\n") @@ -1399,7 +1402,7 @@ func (r Resource) FormatDocDescription(desc string, indent bool) string { // fix removing for ruby -> go transition diffs returnString = strings.ReplaceAll(returnString, "\n \n **Note**: This field is non-authoritative,", "\n\n **Note**: This field is non-authoritative,") - return strings.TrimSuffix(returnString, "\n ") + return fmt.Sprintf("\n %s", strings.TrimSuffix(returnString, "\n ")) } return strings.TrimSuffix(returnString, "\n") } diff --git a/mmv1/google/template_utils.go b/mmv1/google/template_utils.go index 1053894233ba..78eb2dea26ac 100644 --- a/mmv1/google/template_utils.go +++ b/mmv1/google/template_utils.go @@ -14,10 +14,15 @@ package google import ( + "bytes" "errors" + "fmt" + "path/filepath" "strings" "text/template" + + "github.com/golang/glog" ) // Build a map(map[string]interface{}) from a list of paramerter @@ -66,4 +71,58 @@ var TemplateFunctions = template.FuncMap{ "sub": subtract, "plus": plus, "firstSentence": FirstSentence, + "trimTemplate": TrimTemplate, +} + +// Temporary function to simulate how Ruby MMv1's lines() function works +// for nested documentation. Can replace with normal "template" after switchover +func TrimTemplate(templatePath string, e any) string { + templates := []string{ + fmt.Sprintf("templates/terraform/%s", templatePath), + "templates/terraform/expand_resource_ref.tmpl", + } + templateFileName := filepath.Base(templatePath) + + // Need to remake TemplateFunctions, referencing it directly here + // causes a declaration loop + var templateFunctions = template.FuncMap{ + "title": SpaceSeparatedTitle, + "replace": strings.Replace, + "replaceAll": strings.ReplaceAll, + "camelize": Camelize, + "underscore": Underscore, + "plural": Plural, + "contains": strings.Contains, + "join": strings.Join, + "lower": strings.ToLower, + "upper": strings.ToUpper, + "dict": wrapMultipleParams, + "format2regex": Format2Regex, + "hasPrefix": strings.HasPrefix, + "sub": subtract, + "plus": plus, + "firstSentence": FirstSentence, + "trimTemplate": TrimTemplate, + } + + tmpl, err := template.New(templateFileName).Funcs(templateFunctions).ParseFiles(templates...) + if err != nil { + glog.Exit(err) + } + + contents := bytes.Buffer{} + if err = tmpl.ExecuteTemplate(&contents, templateFileName, e); err != nil { + glog.Exit(err) + } + + rs := contents.String() + + if rs == "" { + return rs + } + + for strings.HasSuffix(rs, "\n") { + rs = strings.TrimSuffix(rs, "\n") + } + return fmt.Sprintf("%s\n", rs) } diff --git a/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl index f451327fe851..4b4979bc6193 100644 --- a/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/nested_property_documentation.html.markdown.tmpl @@ -1,18 +1,34 @@ -{{- define "nestedPropertyDocumentation" }} - {{- if $.FlattenObject }} - {{- range $np := $.NestedProperties }} -{{- template "nestedPropertyDocumentation" $np -}} - {{- end}} - {{- else if $.NestedProperties }} +{{ "" }} +{{- if $.FlattenObject }} + {{- range $np := $.NestedProperties }} +{{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $np -}} + {{- end -}} +{{- else if $.NestedProperties }} The `{{ underscore $.Name }}` block {{ if $.Output }}contains{{ else }}supports{{ end }}: -{{ if $.IsA "Map" }} - * `{{ underscore $.KeyName }}` - (Required) The identifier for this object. Format specified above. - {{- end}} +{{ "" }} + {{- if $.IsA "Map" }} +* `{{ underscore $.KeyName }}` - (Required) The identifier for this object. Format specified above. +{{ "" }} + {{- end -}} + {{- if $.NestedProperties }} {{- range $np := $.NestedProperties }} -{{- template "propertyDocumentation" $np }} +{{- trimTemplate "property_documentation.html.markdown.tmpl" $np -}} + {{- end -}} +{{ "" }} + {{- $innerNested := false }} + {{- range $np := $.NestedProperties }} + {{- if $np.NestedProperties }} + {{- $innerNested = true }} + {{- end }} + {{- end }} + {{- if $innerNested}} +{{ "" }} {{- end }} {{- range $np := $.NestedProperties }} -{{- template "nestedPropertyDocumentation" $np -}} - {{- end}} - {{- end}} -{{- end}} \ No newline at end of file + {{- if $np.NestedProperties }} +{{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $np -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{ "" }} \ No newline at end of file diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index 6b08df4ada86..92d35d242d26 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -1,4 +1,4 @@ -{{- define "propertyDocumentation" }} +{{ "" }} * `{{ underscore $.Name }}` - {{- if and (eq $.MinVersion "beta") (not (eq $.ResourceMetadata.MinVersion "beta")) }} {{- if $.Required }} @@ -21,12 +21,12 @@ (Deprecated) {{- end}} {{- end }} - {{ $.ResourceMetadata.FormatDocDescription $.Description true -}} + {{- $.ResourceMetadata.FormatDocDescription $.Description true -}} {{- if and (and ($.IsA "Array") ($.ItemType.IsA "Enum")) (and (not $.Output) (not $.ItemType.SkipDocsValues))}} {{- if $.ItemType.DefaultValue }} Default value is `{{ $.ItemType.DefaultValue }}`. {{- end }} - Each value may be one of: {{ $.ItemType.EnumValuesToString "`" false }}. + Each value may be one of: {{ $.ItemType.EnumValuesToString "`" false }}. {{- else if and ($.IsA "Enum") (and (not $.Output) (not (and $.ItemType $.ItemType.SkipDocsValues)))}} {{- if $.DefaultValue }} Default value is `{{ $.DefaultValue }}`. @@ -42,5 +42,5 @@ {{- if $.DeprecationMessage }} ~> **Warning:** {{ $.DeprecationMessage }} - {{- end }} -{{ end }} \ No newline at end of file + {{- end -}} +{{ "" }} \ No newline at end of file diff --git a/mmv1/templates/terraform/resource.html.markdown.tmpl b/mmv1/templates/terraform/resource.html.markdown.tmpl index cf13c07f9e3e..5ebe9a3680d0 100644 --- a/mmv1/templates/terraform/resource.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource.html.markdown.tmpl @@ -27,7 +27,7 @@ # ---------------------------------------------------------------------------- subcategory: "{{$.ProductMetadata.DisplayName}}" description: |- - {{ $.FormatDocDescription (firstSentence $.Description) true }} + {{- $.FormatDocDescription (firstSentence $.Description) true }} --- # {{$.TerraformName}} @@ -40,7 +40,7 @@ description: |- ~> **Warning:** This resource is in beta, and should be used with the terraform-provider-google-beta provider. See [Provider Versions](https://terraform.io/docs/providers/google/guides/provider_versions.html) for more details on beta resources. {{- end }} -{{ if $.References}} +{{ if or $.References.Api $.References.Guides }} To get more information about {{$.Name}}, see: {{- if $.References.Api}} @@ -53,7 +53,10 @@ To get more information about {{$.Name}}, see: * [{{$title}}]({{$link}}) {{- end }} {{- end }} -{{ end }} +{{ "" }} +{{- else }} +{{ "" }} +{{- end }} {{- if $.Docs.Warning}} ~> **Warning:** {{$.Docs.Warning}} @@ -76,37 +79,37 @@ values will be stored in the raw state as plain text: {{ $.SensitivePropsToStrin {{- end }} - {{- end }} ## Example Usage - {{ title (camelize $e.Name "upper" )}} ```hcl {{ $e.DocumentationHCLText -}} ``` + {{- end }} {{- end }} {{- end }} ## Argument Reference The following arguments are supported: - -{{ range $p := $.RootProperties }} +{{ "" }} +{{ "" }} +{{- range $p := $.RootProperties }} {{- if $p.Required }} -{{- template "propertyDocumentation" $p }} +{{- trimTemplate "property_documentation.html.markdown.tmpl" $p -}} {{- end }} {{- end }} - +{{ "" }} {{- range $p := $.AllUserProperties }} {{- if $p.Required }} -{{- template "nestedPropertyDocumentation" $p}} +{{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $p -}} {{- end}} {{- end }} - - - - {{ range $p := $.RootProperties }} {{- if and (not $p.Required) (not $p.Output) }} -{{- template "propertyDocumentation" $p -}} +{{- trimTemplate "property_documentation.html.markdown.tmpl" $p -}} {{- end }} {{- end }} {{- if or (contains $.BaseUrl "{{project}}") (contains $.CreateUrl "{{project}}")}} @@ -121,8 +124,8 @@ The following arguments are supported: {{- end }} {{- range $p := $.AllUserProperties }} {{- if and (not $p.Required) (not $p.Output) }} -{{- template "nestedPropertyDocumentation" $p -}} -{{ end}} +{{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $p -}} +{{- end}} {{- end }} ## Attributes Reference @@ -131,15 +134,16 @@ In addition to the arguments listed above, the following computed attributes are * `id` - an identifier for the resource with format `{{$.IdFormat}}` {{ range $p := $.RootProperties }} {{- if $p.Output }} -{{- template "propertyDocumentation" $p -}} +{{- trimTemplate "property_documentation.html.markdown.tmpl" $p }} {{- end}} {{- end }} -{{- if $.HasSelfLink }} +{{- if $.HasSelfLink -}} * `self_link` - The URI of the created resource. +{{ "" }} {{- end }} {{ range $p := $.AllUserProperties }} {{- if $p.Output }} -{{- template "nestedPropertyDocumentation" $p -}} +{{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $p }} {{- end }} {{- end }} ## Timeouts @@ -148,7 +152,9 @@ This resource provides the following [Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: - `create` - Default is {{$.Timeouts.InsertMinutes}} minutes. -- `update` - Default is {{$.Timeouts.UpdateMinutes}} minutes.{{/*TODO Q2: <% if updatable?(object, properties) || object.root_labels? -%> */}} +{{- if or $.Updatable $.RootLabels }} +- `update` - Default is {{$.Timeouts.UpdateMinutes}} minutes. +{{- end }} - `delete` - Default is {{$.Timeouts.DeleteMinutes}} minutes. ## Import From 5cfcabfe526a777872dc88cdf2f40a2b4c2070a9 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Tue, 25 Jun 2024 10:50:41 -0700 Subject: [PATCH 210/356] 6.0.0 cassettes (#11040) --- .ci/magician/vcr/tester.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.ci/magician/vcr/tester.go b/.ci/magician/vcr/tester.go index 46e4a36473c1..c7b1343b4ef3 100644 --- a/.ci/magician/vcr/tester.go +++ b/.ci/magician/vcr/tester.go @@ -97,7 +97,8 @@ func (vt *Tester) FetchCassettes(version provider.Version, baseBranch, prNumber } cassettePath = filepath.Join(vt.baseDir, "cassettes", version.String()) vt.rnr.Mkdir(cassettePath) - if baseBranch != "FEATURE-BRANCH-major-release-5.0.0" { + if baseBranch != "FEATURE-BRANCH-major-release-6.0.0" { + // pull main cassettes (major release uses branch specific casssettes as primary ones) bucketPath := fmt.Sprintf("gs://ci-vcr-cassettes/%sfixtures/*", version.BucketPath()) if err := vt.fetchBucketPath(bucketPath, cassettePath); err != nil { fmt.Println("Error fetching cassettes: ", err) From c8d4f427e148d0c1a72869183303e787d8d93b08 Mon Sep 17 00:00:00 2001 From: z-nand Date: Tue, 25 Jun 2024 14:15:34 -0400 Subject: [PATCH 211/356] Add support for links in Monitoring AlertPolicy (#11017) Co-authored-by: Zhenhua Li --- mmv1/products/monitoring/AlertPolicy.yaml | 22 +++++++++++++++++++ .../resource_monitoring_alert_policy_test.go | 18 ++++++++++++++- 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/mmv1/products/monitoring/AlertPolicy.yaml b/mmv1/products/monitoring/AlertPolicy.yaml index 52e00415e3c2..ca0bf80921e5 100644 --- a/mmv1/products/monitoring/AlertPolicy.yaml +++ b/mmv1/products/monitoring/AlertPolicy.yaml @@ -983,6 +983,7 @@ properties: - documentation.0.content - documentation.0.mime_type - documentation.0.subject + - documentation.0.links description: | The text of the documentation, interpreted according to mimeType. The content may not exceed 8,192 Unicode characters and may not @@ -994,6 +995,7 @@ properties: - documentation.0.content - documentation.0.mime_type - documentation.0.subject + - documentation.0.links default_value: text/markdown description: | The format of the content field. Presently, only the value @@ -1004,8 +1006,28 @@ properties: - documentation.0.content - documentation.0.mime_type - documentation.0.subject + - documentation.0.links description: | The subject line of the notification. The subject line may not exceed 10,240 bytes. In notifications generated by this policy the contents of the subject line after variable expansion will be truncated to 255 bytes or shorter at the latest UTF-8 character boundary. + - !ruby/object:Api::Type::Array + name: links + at_least_one_of: + - documentation.0.content + - documentation.0.mime_type + - documentation.0.subject + - documentation.0.links + description: | + Links to content such as playbooks, repositories, and other resources. This field can contain up to 3 entries. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: displayName + description: | + A short display name for the link. The display name must not be empty or exceed 63 characters. Example: "playbook". + - !ruby/object:Api::Type::String + name: url + description: | + The url of a webpage. A url can be templatized by using variables in the path or the query parameters. The total length of a URL should not exceed 2083 characters before and after variable expansion. Example: "https://my_domain.com/playbook?name=${resource.name}". diff --git a/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go b/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go index c21ebca4a15d..2cf6d3c5194c 100644 --- a/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go +++ b/mmv1/third_party/terraform/services/monitoring/resource_monitoring_alert_policy_test.go @@ -329,6 +329,10 @@ resource "google_monitoring_alert_policy" "full" { content = "test content" mime_type = "text/markdown" subject = "test subject" + links { + display_name = "link display name" + url = "http://mydomain.com" + } } } `, alertName, conditionName1, conditionName2) @@ -360,6 +364,14 @@ resource "google_monitoring_alert_policy" "mql" { content = "test content" mime_type = "text/markdown" subject = "test subject" + links { + display_name = "link display name" + url = "http://mydomain.com" + } + links { + display_name = "link display name2" + url = "http://mydomain2.com" + } } } `, alertName, conditionName) @@ -395,7 +407,7 @@ resource "google_monitoring_alert_policy" "log" { documentation { content = "test content" mime_type = "text/markdown" - subject = "test subject" + subject = "test subject" } } `, alertName, conditionName) @@ -457,6 +469,10 @@ resource "google_monitoring_alert_policy" "promql" { content = "test content" mime_type = "text/markdown" subject = "test subject" + links { + display_name = "link display name" + url = "http://mydomain.com" + } } } `, alertName, conditionName) From 167ad5f2228223e948b669d64767dbff650f1fc3 Mon Sep 17 00:00:00 2001 From: Mauricio Alvarez Leon <65101411+BBBmau@users.noreply.github.com> Date: Tue, 25 Jun 2024 12:58:23 -0700 Subject: [PATCH 212/356] Move diff suppress funcs used by a single services into that service's package (#9962) --- mmv1/products/cloudscheduler/Job.yaml | 2 +- mmv1/products/compute/Disk.yaml | 2 +- mmv1/products/compute/ForwardingRule.yaml | 4 +- .../compute/GlobalForwardingRule.yaml | 4 +- .../compute/ManagedSslCertificate.yaml | 4 +- .../compute/NetworkEndpointGroup.yaml | 4 +- mmv1/products/compute/RegionDisk.yaml | 2 +- .../compute/RegionSslCertificate.yaml | 3 +- mmv1/products/compute/Route.yaml | 3 +- mmv1/products/compute/SslCertificate.yaml | 2 +- mmv1/products/compute/go_Disk.yaml | 2 +- mmv1/products/compute/go_ForwardingRule.yaml | 4 +- .../compute/go_GlobalForwardingRule.yaml | 4 +- .../compute/go_ManagedSslCertificate.yaml | 5 +- .../compute/go_NetworkEndpointGroup.yaml | 5 +- mmv1/products/compute/go_RegionDisk.yaml | 2 +- .../compute/go_RegionSslCertificate.yaml | 2 +- mmv1/products/compute/go_Route.yaml | 3 +- mmv1/products/compute/go_SslCertificate.yaml | 2 +- mmv1/products/pubsub/Subscription.yaml | 2 +- mmv1/products/pubsub/go_Subscription.yaml | 2 +- .../constants/compute_certificate.go.erb | 5 + .../constants/compute_forwarding_rule.go.erb | 51 +++ .../constants/compute_instance.go.erb | 0 .../compute_managed_ssl_certificate.go.erb | 7 + .../compute_network_endpoint_group.go.erb | 9 + .../terraform/constants/compute_route.go.erb | 12 + .../constants/dataproc_cluster.go.erb | 10 + mmv1/templates/terraform/constants/disk.erb | 7 + .../constants/go/compute_certificate.go.tmpl | 5 + .../go/compute_forwarding_rule.go.tmpl | 51 +++ .../terraform/constants/go/disk.tmpl | 7 + .../terraform/constants/go/scheduler.tmpl | 12 + .../constants/go/subscription.go.tmpl | 22 ++ .../constants/pubsub_subscription.go.erb | 0 .../terraform/constants/scheduler.erb | 12 + .../terraform/constants/subscription.go.erb | 22 ++ .../resource_cloud_scheduler_job_test.go | 40 ++ .../go/resource_compute_instance.go.tmpl | 26 +- ...resource_compute_instance_template.go.tmpl | 2 +- ...rce_compute_instance_template_test.go.tmpl | 3 +- .../go/resource_compute_instance_test.go.tmpl | 2 +- ...e_compute_region_instance_template.go.tmpl | 2 +- ...pute_region_instance_template_test.go.tmpl | 3 +- ...compute_global_forwarding_rule_test.go.erb | 139 +++++++ .../compute/resource_compute_instance.go.erb | 26 +- .../resource_compute_instance_template.go.erb | 2 +- ...urce_compute_instance_template_test.go.erb | 47 ++- .../resource_compute_instance_test.go.erb | 2 +- ...ce_compute_region_instance_template.go.erb | 2 +- ...mpute_region_instance_template_test.go.erb | 3 +- .../resource_compute_ssl_certificate_test.go | 40 ++ .../go/resource_container_cluster.go.tmpl | 11 +- ...source_container_cluster_migratev1.go.tmpl | 2 +- .../resource_container_cluster.go.erb | 11 +- ...esource_container_cluster_migratev1.go.erb | 2 +- .../resource_container_cluster_test.go.erb | 44 +++ .../dataproc/resource_dataproc_cluster.go | 13 +- .../resource_dataproc_cluster_migrate.go | 2 +- .../data_source_google_logging_sink_test.go | 35 ++ .../logging/logging_exclusion_folder.go | 8 +- .../resource_logging_folder_exclusion_test.go | 39 ++ .../services/logging/resource_logging_sink.go | 7 +- .../resource_pubsub_subscription_test.go | 33 ++ .../tpgresource/common_diff_suppress.go.erb | 191 --------- .../tpgresource/common_diff_suppress_test.go | 361 ------------------ .../terraform/tpgresource/utils_test.go | 87 ----- .../common_diff_suppress.go | 66 ---- 68 files changed, 793 insertions(+), 753 deletions(-) create mode 100644 mmv1/templates/terraform/constants/compute_certificate.go.erb create mode 100644 mmv1/templates/terraform/constants/compute_instance.go.erb create mode 100644 mmv1/templates/terraform/constants/compute_managed_ssl_certificate.go.erb create mode 100644 mmv1/templates/terraform/constants/compute_network_endpoint_group.go.erb create mode 100644 mmv1/templates/terraform/constants/compute_route.go.erb create mode 100644 mmv1/templates/terraform/constants/dataproc_cluster.go.erb create mode 100644 mmv1/templates/terraform/constants/go/compute_certificate.go.tmpl create mode 100644 mmv1/templates/terraform/constants/pubsub_subscription.go.erb diff --git a/mmv1/products/cloudscheduler/Job.yaml b/mmv1/products/cloudscheduler/Job.yaml index f65975609557..bd22c694415c 100644 --- a/mmv1/products/cloudscheduler/Job.yaml +++ b/mmv1/products/cloudscheduler/Job.yaml @@ -343,7 +343,7 @@ properties: description: | The full URI path that the request will be sent to. required: true - diff_suppress_func: 'tpgresource.LastSlashDiffSuppress' + diff_suppress_func: 'LastSlashDiffSuppress' - !ruby/object:Api::Type::String name: httpMethod description: | diff --git a/mmv1/products/compute/Disk.yaml b/mmv1/products/compute/Disk.yaml index f8737babfc7d..2f794dbc11e9 100644 --- a/mmv1/products/compute/Disk.yaml +++ b/mmv1/products/compute/Disk.yaml @@ -362,7 +362,7 @@ properties: disk interfaces are automatically determined on attachment. description: | Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. - diff_suppress_func: 'tpgresource.AlwaysDiffSuppress' + diff_suppress_func: AlwaysDiffSuppress - !ruby/object:Api::Type::String name: 'sourceDisk' description: | diff --git a/mmv1/products/compute/ForwardingRule.yaml b/mmv1/products/compute/ForwardingRule.yaml index 4e6f3b6655ab..b28d4ad67374 100644 --- a/mmv1/products/compute/ForwardingRule.yaml +++ b/mmv1/products/compute/ForwardingRule.yaml @@ -312,7 +312,7 @@ properties: When reading an `IPAddress`, the API always returns the IP address number. default_from_api: true - diff_suppress_func: 'tpgresource.InternalIpDiffSuppress' + diff_suppress_func: InternalIpDiffSuppress - !ruby/object:Api::Type::Enum name: 'IPProtocol' description: | @@ -430,7 +430,7 @@ properties: cannot have overlapping `portRange`s. @pattern: \d+(?:-\d+)? - diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' + diff_suppress_func: PortRangeDiffSuppress default_from_api: true - !ruby/object:Api::Type::Array name: 'ports' diff --git a/mmv1/products/compute/GlobalForwardingRule.yaml b/mmv1/products/compute/GlobalForwardingRule.yaml index af9a88d4d512..f5d3dfde458b 100644 --- a/mmv1/products/compute/GlobalForwardingRule.yaml +++ b/mmv1/products/compute/GlobalForwardingRule.yaml @@ -264,7 +264,7 @@ properties: When reading an `IPAddress`, the API always returns the IP address number. default_from_api: true - diff_suppress_func: 'tpgresource.InternalIpDiffSuppress' + diff_suppress_func: InternalIpDiffSuppress - !ruby/object:Api::Type::Enum name: 'IPProtocol' description: | @@ -441,7 +441,7 @@ properties: cannot have overlapping `portRange`s. @pattern: \d+(?:-\d+)? - diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' + diff_suppress_func: PortRangeDiffSuppress # This is a multi-resource resource reference (TargetHttp(s)Proxy, # TargetSslProxy, TargetTcpProxy, TargetVpnGateway, TargetPool, # TargetInstance) diff --git a/mmv1/products/compute/ManagedSslCertificate.yaml b/mmv1/products/compute/ManagedSslCertificate.yaml index 1975c56387a5..030079063920 100644 --- a/mmv1/products/compute/ManagedSslCertificate.yaml +++ b/mmv1/products/compute/ManagedSslCertificate.yaml @@ -74,6 +74,8 @@ docs: !ruby/object:Provider::Terraform::Docs certificates may entail some downtime while the certificate provisions. In conclusion: Be extremely cautious. +custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/compute_managed_ssl_certificate.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'managed_ssl_certificate_basic' @@ -131,7 +133,7 @@ properties: there can be up to 100 domains in this list. max_size: 100 required: true - diff_suppress_func: 'tpgresource.AbsoluteDomainSuppress' + diff_suppress_func: 'AbsoluteDomainSuppress' item_type: Api::Type::String - !ruby/object:Api::Type::Enum name: 'type' diff --git a/mmv1/products/compute/NetworkEndpointGroup.yaml b/mmv1/products/compute/NetworkEndpointGroup.yaml index f37a3365d013..dfda899db1d6 100644 --- a/mmv1/products/compute/NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/NetworkEndpointGroup.yaml @@ -55,6 +55,8 @@ async: !ruby/object:Api::OpAsync error: !ruby/object:Api::OpAsync::Error path: 'error/errors' message: 'message' +custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/compute_network_endpoint_group.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: 'network_endpoint_group' @@ -139,7 +141,7 @@ properties: imports: 'selfLink' description: | Optional subnetwork to which all network endpoints in the NEG belong. - diff_suppress_func: 'tpgresource.CompareOptionalSubnet' + diff_suppress_func: 'compareOptionalSubnet' custom_expand: 'templates/terraform/custom_expand/resourceref_with_validation.go.erb' - !ruby/object:Api::Type::Integer name: 'defaultPort' diff --git a/mmv1/products/compute/RegionDisk.yaml b/mmv1/products/compute/RegionDisk.yaml index d421a950923f..e03aad8c4431 100644 --- a/mmv1/products/compute/RegionDisk.yaml +++ b/mmv1/products/compute/RegionDisk.yaml @@ -308,7 +308,7 @@ properties: disk interfaces are automatically determined on attachment. description: | Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. - diff_suppress_func: 'tpgresource.AlwaysDiffSuppress' + diff_suppress_func: AlwaysDiffSuppress - !ruby/object:Api::Type::String name: 'sourceDisk' description: | diff --git a/mmv1/products/compute/RegionSslCertificate.yaml b/mmv1/products/compute/RegionSslCertificate.yaml index ba2941812207..e9d8b451803b 100644 --- a/mmv1/products/compute/RegionSslCertificate.yaml +++ b/mmv1/products/compute/RegionSslCertificate.yaml @@ -78,6 +78,7 @@ examples: ignore_read_extra: - 'name_prefix' custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/go/compute_certificate.go.tmpl extra_schema_entry: templates/terraform/extra_schema_entry/ssl_certificate.erb parameters: - !ruby/object:Api::Type::ResourceRef @@ -141,4 +142,4 @@ properties: sensitive: true ignore_read: true custom_flatten: 'templates/terraform/custom_flatten/sha256.erb' - diff_suppress_func: 'tpgresource.Sha256DiffSuppress' + diff_suppress_func: 'sha256DiffSuppress' diff --git a/mmv1/products/compute/Route.yaml b/mmv1/products/compute/Route.yaml index 9b6b70de1f4d..3c54de91e78f 100644 --- a/mmv1/products/compute/Route.yaml +++ b/mmv1/products/compute/Route.yaml @@ -95,6 +95,7 @@ examples: backend_name: 'compute-backend' route_name: 'route-ilb' custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/compute_route.go.erb decoder: templates/terraform/decoders/route.erb extra_schema_entry: templates/terraform/extra_schema_entry/route.erb docs: !ruby/object:Provider::Terraform::Docs @@ -252,4 +253,4 @@ properties: - next_hop_ip - next_hop_vpn_tunnel - next_hop_ilb - diff_suppress_func: 'tpgresource.CompareIpAddressOrSelfLinkOrResourceName' + diff_suppress_func: 'CompareIpAddressOrSelfLinkOrResourceName' diff --git a/mmv1/products/compute/SslCertificate.yaml b/mmv1/products/compute/SslCertificate.yaml index f00bbfe1d91c..dc9e40dc16ed 100644 --- a/mmv1/products/compute/SslCertificate.yaml +++ b/mmv1/products/compute/SslCertificate.yaml @@ -128,4 +128,4 @@ properties: sensitive: true ignore_read: true custom_flatten: 'templates/terraform/custom_flatten/sha256.erb' - diff_suppress_func: 'tpgresource.Sha256DiffSuppress' + diff_suppress_func: 'sha256DiffSuppress' diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml index 9b0f045ee0fe..5cfc9920a63e 100644 --- a/mmv1/products/compute/go_Disk.yaml +++ b/mmv1/products/compute/go_Disk.yaml @@ -347,7 +347,7 @@ properties: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. min_version: 'beta' url_param_only: true - diff_suppress_func: 'tpgresource.AlwaysDiffSuppress' + diff_suppress_func: AlwaysDiffSuppress default_value: SCSI deprecation_message: '`interface` is deprecated and will be removed in a future major release. This field is no longer used and can be safely removed from your configurations; disk interfaces are automatically determined on attachment.' - name: 'sourceDisk' diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index 2b84b92a45a4..ca3d6425cf88 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -299,7 +299,7 @@ properties: When reading an `IPAddress`, the API always returns the IP address number. default_from_api: true - diff_suppress_func: 'tpgresource.InternalIpDiffSuppress' + diff_suppress_func: InternalIpDiffSuppress - name: 'IPProtocol' type: Enum description: | @@ -412,7 +412,7 @@ properties: @pattern: \d+(?:-\d+)? default_from_api: true - diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' + diff_suppress_func: PortRangeDiffSuppress - name: 'ports' type: Array description: | diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index c85cfd9c7525..c61a5b1e78b0 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -255,7 +255,7 @@ properties: When reading an `IPAddress`, the API always returns the IP address number. default_from_api: true - diff_suppress_func: 'tpgresource.InternalIpDiffSuppress' + diff_suppress_func: InternalIpDiffSuppress - name: 'IPProtocol' type: Enum description: | @@ -433,7 +433,7 @@ properties: cannot have overlapping `portRange`s. @pattern: \d+(?:-\d+)? - diff_suppress_func: 'tpgresource.PortRangeDiffSuppress' + diff_suppress_func: PortRangeDiffSuppress - name: 'subnetwork' type: ResourceRef description: | diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml index 5f1f840af51f..580fed2333d3 100644 --- a/mmv1/products/compute/go_ManagedSslCertificate.yaml +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -65,7 +65,8 @@ async: path: 'error/errors' message: 'message' collection_url_key: 'items' -custom_code: +custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/compute_managed_ssl_certificate.go.erb examples: - name: 'managed_ssl_certificate_basic' primary_resource_id: 'default' @@ -120,7 +121,7 @@ properties: Domains for which a managed SSL certificate will be valid. Currently, there can be up to 100 domains in this list. required: true - diff_suppress_func: 'tpgresource.AbsoluteDomainSuppress' + diff_suppress_func: 'AbsoluteDomainSuppress' item_type: type: String max_size: 100 diff --git a/mmv1/products/compute/go_NetworkEndpointGroup.yaml b/mmv1/products/compute/go_NetworkEndpointGroup.yaml index 433530c951d0..3ffa4af09e61 100644 --- a/mmv1/products/compute/go_NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_NetworkEndpointGroup.yaml @@ -56,7 +56,8 @@ async: path: 'error/errors' message: 'message' collection_url_key: 'items' -custom_code: +custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/compute_network_endpoint_group.go.erb examples: - name: 'network_endpoint_group' primary_resource_id: 'neg' @@ -137,7 +138,7 @@ properties: type: ResourceRef description: | Optional subnetwork to which all network endpoints in the NEG belong. - diff_suppress_func: 'tpgresource.CompareOptionalSubnet' + diff_suppress_func: 'compareOptionalSubnet' custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' resource: 'Subnetwork' imports: 'selfLink' diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml index 3a4d7e04b8af..084e0ff3a5ac 100644 --- a/mmv1/products/compute/go_RegionDisk.yaml +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -295,7 +295,7 @@ properties: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. min_version: 'beta' url_param_only: true - diff_suppress_func: 'tpgresource.AlwaysDiffSuppress' + diff_suppress_func: AlwaysDiffSuppress default_value: SCSI deprecation_message: '`interface` is deprecated and will be removed in a future major release. This field is no longer used and can be safely removed from your configurations; disk interfaces are automatically determined on attachment.' - name: 'sourceDisk' diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml index a365f1cacf13..029e4331c90f 100644 --- a/mmv1/products/compute/go_RegionSslCertificate.yaml +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -131,5 +131,5 @@ properties: immutable: true ignore_read: true sensitive: true - diff_suppress_func: 'tpgresource.Sha256DiffSuppress' + diff_suppress_func: 'sha256DiffSuppress' custom_flatten: 'templates/terraform/custom_flatten/go/sha256.tmpl' diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index ef5b1a44c013..c53dfd1dd17e 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -72,6 +72,7 @@ async: message: 'message' collection_url_key: 'items' custom_code: + constants: templates/terraform/constants/compute_route.go.erb extra_schema_entry: 'templates/terraform/extra_schema_entry/go/route.tmpl' decoder: 'templates/terraform/decoders/go/route.tmpl' error_retry_predicates: @@ -253,4 +254,4 @@ properties: - 'next_hop_ip' - 'next_hop_vpn_tunnel' - 'next_hop_ilb' - diff_suppress_func: 'tpgresource.CompareIpAddressOrSelfLinkOrResourceName' + diff_suppress_func: 'CompareIpAddressOrSelfLinkOrResourceName' diff --git a/mmv1/products/compute/go_SslCertificate.yaml b/mmv1/products/compute/go_SslCertificate.yaml index 97bc3263f0a8..d57ea5e11265 100644 --- a/mmv1/products/compute/go_SslCertificate.yaml +++ b/mmv1/products/compute/go_SslCertificate.yaml @@ -119,5 +119,5 @@ properties: immutable: true ignore_read: true sensitive: true - diff_suppress_func: 'tpgresource.Sha256DiffSuppress' + diff_suppress_func: 'sha256DiffSuppress' custom_flatten: 'templates/terraform/custom_flatten/go/sha256.tmpl' diff --git a/mmv1/products/pubsub/Subscription.yaml b/mmv1/products/pubsub/Subscription.yaml index d70884b88cfa..bba44db6cfd5 100644 --- a/mmv1/products/pubsub/Subscription.yaml +++ b/mmv1/products/pubsub/Subscription.yaml @@ -302,7 +302,7 @@ properties: - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API. - diff_suppress_func: 'tpgresource.IgnoreMissingKeyInMap("x-goog-version")' + diff_suppress_func: 'IgnoreMissingKeyInMap("x-goog-version")' - !ruby/object:Api::Type::NestedObject name: 'noWrapper' custom_flatten: 'templates/terraform/custom_flatten/pubsub_no_wrapper_write_metadata_flatten.go.erb' diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 02ab2a488f57..9ec82cb69821 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -303,7 +303,7 @@ properties: - v1beta1: uses the push format defined in the v1beta1 Pub/Sub API. - v1 or v1beta2: uses the push format defined in the v1 Pub/Sub API. - diff_suppress_func: 'tpgresource.IgnoreMissingKeyInMap("x-goog-version")' + diff_suppress_func: 'IgnoreMissingKeyInMap("x-goog-version")' - name: 'noWrapper' type: NestedObject description: | diff --git a/mmv1/templates/terraform/constants/compute_certificate.go.erb b/mmv1/templates/terraform/constants/compute_certificate.go.erb new file mode 100644 index 000000000000..f5f6aae3c09d --- /dev/null +++ b/mmv1/templates/terraform/constants/compute_certificate.go.erb @@ -0,0 +1,5 @@ +// sha256DiffSuppress +// if old is the hex-encoded sha256 sum of new, treat them as equal +func sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/compute_forwarding_rule.go.erb b/mmv1/templates/terraform/constants/compute_forwarding_rule.go.erb index 92afd4d61d0a..19580c9b79b3 100644 --- a/mmv1/templates/terraform/constants/compute_forwarding_rule.go.erb +++ b/mmv1/templates/terraform/constants/compute_forwarding_rule.go.erb @@ -14,3 +14,54 @@ func forwardingRuleCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v } return nil } + +// Port range '80' and '80-80' is equivalent. +// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). +// `new` can be either a single port or a port range. +func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return old == new+"-"+new +} + +// Suppresses diff for IPv4 and IPv6 different formats. +// It also suppresses diffs if an IP is changing to a reference. +func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + addr_equality := false + netmask_equality := false + + addr_netmask_old := strings.Split(old, "/") + addr_netmask_new := strings.Split(new, "/") + + // Check if old or new are IPs (with or without netmask) + var addr_old net.IP + if net.ParseIP(addr_netmask_old[0]) == nil { + addr_old = net.ParseIP(old) + } else { + addr_old = net.ParseIP(addr_netmask_old[0]) + } + var addr_new net.IP + if net.ParseIP(addr_netmask_new[0]) == nil { + addr_new = net.ParseIP(new) + } else { + addr_new = net.ParseIP(addr_netmask_new[0]) + } + + if addr_old != nil { + if addr_new == nil { + // old is an IP and new is a reference + addr_equality = true + } else { + // old and new are IP addresses + addr_equality = bytes.Equal(addr_old, addr_new) + } + } + + // If old and new both have a netmask compare them, otherwise suppress + // This is not technically correct but prevents the permadiff described in https://github.com/hashicorp/terraform-provider-google/issues/16400 + if (len(addr_netmask_old)) == 2 && (len(addr_netmask_new) == 2) { + netmask_equality = addr_netmask_old[1] == addr_netmask_new[1] + } else { + netmask_equality = true + } + + return addr_equality && netmask_equality +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/compute_instance.go.erb b/mmv1/templates/terraform/constants/compute_instance.go.erb new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mmv1/templates/terraform/constants/compute_managed_ssl_certificate.go.erb b/mmv1/templates/terraform/constants/compute_managed_ssl_certificate.go.erb new file mode 100644 index 000000000000..e80b7c1f9075 --- /dev/null +++ b/mmv1/templates/terraform/constants/compute_managed_ssl_certificate.go.erb @@ -0,0 +1,7 @@ +// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. +func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { + if strings.HasPrefix(k, "managed.0.domains.") { + return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") + } + return false +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/compute_network_endpoint_group.go.erb b/mmv1/templates/terraform/constants/compute_network_endpoint_group.go.erb new file mode 100644 index 000000000000..ad1828dfe457 --- /dev/null +++ b/mmv1/templates/terraform/constants/compute_network_endpoint_group.go.erb @@ -0,0 +1,9 @@ +// Use this method when subnet is optioanl and auto_create_subnetworks = true +// API sometimes choose a subnet so the diff needs to be ignored +func compareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { + if tpgresource.IsEmptyValue(reflect.ValueOf(new)) { + return true + } + // otherwise compare as self links + return tpgresource.CompareSelfLinkOrResourceName("", old, new, nil) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/compute_route.go.erb b/mmv1/templates/terraform/constants/compute_route.go.erb new file mode 100644 index 000000000000..915d204a137f --- /dev/null +++ b/mmv1/templates/terraform/constants/compute_route.go.erb @@ -0,0 +1,12 @@ +// Use this method when the field accepts either an IP address or a +// self_link referencing a resource (such as google_compute_route's +// next_hop_ilb) +func CompareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { + // if we can parse `new` as an IP address, then compare as strings + if net.ParseIP(new) != nil { + return new == old + } + + // otherwise compare as self links + return tpgresource.CompareSelfLinkOrResourceName("", old, new, nil) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/dataproc_cluster.go.erb b/mmv1/templates/terraform/constants/dataproc_cluster.go.erb new file mode 100644 index 000000000000..2761e8c4a5e2 --- /dev/null +++ b/mmv1/templates/terraform/constants/dataproc_cluster.go.erb @@ -0,0 +1,10 @@ +// Suppress diffs for values that are equivalent except for their use of the words "location" +// compared to "region" or "zone" +func locationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return locationDiffSuppressHelper(old, new) || locationDiffSuppressHelper(new, old) +} + +func locationDiffSuppressHelper(a, b string) bool { + return strings.Replace(a, "/locations/", "/regions/", 1) == b || + strings.Replace(a, "/locations/", "/zones/", 1) == b +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/disk.erb b/mmv1/templates/terraform/constants/disk.erb index 52cc7aecd070..409d59f3fb99 100644 --- a/mmv1/templates/terraform/constants/disk.erb +++ b/mmv1/templates/terraform/constants/disk.erb @@ -17,6 +17,13 @@ func hyperDiskIopsUpdateDiffSupress(_ context.Context, d *schema.ResourceDiff, m } <% end -%> +<% unless version == "ga" -%> +// Suppress all diffs, used for Disk.Interface which is a nonfunctional field +func AlwaysDiffSuppress(_, _, _ string, _ *schema.ResourceData) bool { + return true +} +<% end -%> + // diffsupress for beta and to check change in source_disk attribute func sourceDiskDiffSupress(_, old, new string, _ *schema.ResourceData) bool { s1 := strings.TrimPrefix(old, "https://www.googleapis.com/compute/beta") diff --git a/mmv1/templates/terraform/constants/go/compute_certificate.go.tmpl b/mmv1/templates/terraform/constants/go/compute_certificate.go.tmpl new file mode 100644 index 000000000000..f5f6aae3c09d --- /dev/null +++ b/mmv1/templates/terraform/constants/go/compute_certificate.go.tmpl @@ -0,0 +1,5 @@ +// sha256DiffSuppress +// if old is the hex-encoded sha256 sum of new, treat them as equal +func sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/compute_forwarding_rule.go.tmpl b/mmv1/templates/terraform/constants/go/compute_forwarding_rule.go.tmpl index 92afd4d61d0a..19580c9b79b3 100644 --- a/mmv1/templates/terraform/constants/go/compute_forwarding_rule.go.tmpl +++ b/mmv1/templates/terraform/constants/go/compute_forwarding_rule.go.tmpl @@ -14,3 +14,54 @@ func forwardingRuleCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, v } return nil } + +// Port range '80' and '80-80' is equivalent. +// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). +// `new` can be either a single port or a port range. +func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return old == new+"-"+new +} + +// Suppresses diff for IPv4 and IPv6 different formats. +// It also suppresses diffs if an IP is changing to a reference. +func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + addr_equality := false + netmask_equality := false + + addr_netmask_old := strings.Split(old, "/") + addr_netmask_new := strings.Split(new, "/") + + // Check if old or new are IPs (with or without netmask) + var addr_old net.IP + if net.ParseIP(addr_netmask_old[0]) == nil { + addr_old = net.ParseIP(old) + } else { + addr_old = net.ParseIP(addr_netmask_old[0]) + } + var addr_new net.IP + if net.ParseIP(addr_netmask_new[0]) == nil { + addr_new = net.ParseIP(new) + } else { + addr_new = net.ParseIP(addr_netmask_new[0]) + } + + if addr_old != nil { + if addr_new == nil { + // old is an IP and new is a reference + addr_equality = true + } else { + // old and new are IP addresses + addr_equality = bytes.Equal(addr_old, addr_new) + } + } + + // If old and new both have a netmask compare them, otherwise suppress + // This is not technically correct but prevents the permadiff described in https://github.com/hashicorp/terraform-provider-google/issues/16400 + if (len(addr_netmask_old)) == 2 && (len(addr_netmask_new) == 2) { + netmask_equality = addr_netmask_old[1] == addr_netmask_new[1] + } else { + netmask_equality = true + } + + return addr_equality && netmask_equality +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/disk.tmpl b/mmv1/templates/terraform/constants/go/disk.tmpl index 0c554b930b78..4e1c27e01cb1 100644 --- a/mmv1/templates/terraform/constants/go/disk.tmpl +++ b/mmv1/templates/terraform/constants/go/disk.tmpl @@ -17,6 +17,13 @@ func hyperDiskIopsUpdateDiffSupress(_ context.Context, d *schema.ResourceDiff, m } {{- end }} +<% if version == "ga" -%> +// Suppress all diffs, used for Disk.Interface which is a nonfunctional field +func AlwaysDiffSuppress(_, _, _ string, _ *schema.ResourceData) bool { + return true +} +<% end -%> + // diffsupress for beta and to check change in source_disk attribute func sourceDiskDiffSupress(_, old, new string, _ *schema.ResourceData) bool { s1 := strings.TrimPrefix(old, "https://www.googleapis.com/compute/beta") diff --git a/mmv1/templates/terraform/constants/go/scheduler.tmpl b/mmv1/templates/terraform/constants/go/scheduler.tmpl index e509161d7110..0764f1dfd8e4 100644 --- a/mmv1/templates/terraform/constants/go/scheduler.tmpl +++ b/mmv1/templates/terraform/constants/go/scheduler.tmpl @@ -16,7 +16,19 @@ func validateAuthHeaders(_ context.Context, diff *schema.ResourceDiff, v interfa return nil } +// Suppress diffs in below cases +// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" +// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" +func LastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if last := len(new) - 1; last >= 0 && new[last] == '/' { + new = new[:last] + } + if last := len(old) - 1; last >= 0 && old[last] == '/' { + old = old[:last] + } + return new == old +} func authHeaderDiffSuppress(k, old, new string, d *schema.ResourceData) bool { // If generating an `oauth_token` and `scope` is not provided in the configuration, diff --git a/mmv1/templates/terraform/constants/go/subscription.go.tmpl b/mmv1/templates/terraform/constants/go/subscription.go.tmpl index 1fb3cf747795..336517ee07a7 100644 --- a/mmv1/templates/terraform/constants/go/subscription.go.tmpl +++ b/mmv1/templates/terraform/constants/go/subscription.go.tmpl @@ -22,3 +22,25 @@ func comparePubsubSubscriptionExpirationPolicy(_, old, new string, _ *schema.Res } return trimmedNew == trimmedOld } + +func IgnoreMissingKeyInMap(key string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) + if strings.HasSuffix(k, ".%") { + oldNum, err := strconv.Atoi(old) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) + return false + } + newNum, err := strconv.Atoi(new) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) + return false + } + return oldNum+1 == newNum + } else if strings.HasSuffix(k, "." + key) { + return old == "" + } + return false + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/pubsub_subscription.go.erb b/mmv1/templates/terraform/constants/pubsub_subscription.go.erb new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mmv1/templates/terraform/constants/scheduler.erb b/mmv1/templates/terraform/constants/scheduler.erb index e509161d7110..0764f1dfd8e4 100644 --- a/mmv1/templates/terraform/constants/scheduler.erb +++ b/mmv1/templates/terraform/constants/scheduler.erb @@ -16,7 +16,19 @@ func validateAuthHeaders(_ context.Context, diff *schema.ResourceDiff, v interfa return nil } +// Suppress diffs in below cases +// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" +// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" +func LastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + if last := len(new) - 1; last >= 0 && new[last] == '/' { + new = new[:last] + } + if last := len(old) - 1; last >= 0 && old[last] == '/' { + old = old[:last] + } + return new == old +} func authHeaderDiffSuppress(k, old, new string, d *schema.ResourceData) bool { // If generating an `oauth_token` and `scope` is not provided in the configuration, diff --git a/mmv1/templates/terraform/constants/subscription.go.erb b/mmv1/templates/terraform/constants/subscription.go.erb index 187ca2e22d69..4b40b1748bd7 100644 --- a/mmv1/templates/terraform/constants/subscription.go.erb +++ b/mmv1/templates/terraform/constants/subscription.go.erb @@ -24,3 +24,25 @@ func comparePubsubSubscriptionExpirationPolicy(_, old, new string, _ *schema.Res } return trimmedNew == trimmedOld } + +func IgnoreMissingKeyInMap(key string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) + if strings.HasSuffix(k, ".%") { + oldNum, err := strconv.Atoi(old) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) + return false + } + newNum, err := strconv.Atoi(new) + if err != nil { + log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) + return false + } + return oldNum+1 == newNum + } else if strings.HasSuffix(k, "." + key) { + return old == "" + } + return false + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/cloudscheduler/resource_cloud_scheduler_job_test.go b/mmv1/third_party/terraform/services/cloudscheduler/resource_cloud_scheduler_job_test.go index afc24a42df70..26711bb51297 100644 --- a/mmv1/third_party/terraform/services/cloudscheduler/resource_cloud_scheduler_job_test.go +++ b/mmv1/third_party/terraform/services/cloudscheduler/resource_cloud_scheduler_job_test.go @@ -5,6 +5,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/cloudscheduler" ) func TestAccCloudSchedulerJob_schedulerPausedExample(t *testing.T) { @@ -37,6 +38,45 @@ func TestAccCloudSchedulerJob_schedulerPausedExample(t *testing.T) { }) } +func TestUnitCloudSchedulerJob_LastSlashDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "slash to no slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app/", + New: "https://hello-rehvs75zla-uc.a.run.app", + ExpectDiffSuppress: true, + }, + "no slash to slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app", + New: "https://hello-rehvs75zla-uc.a.run.app/", + ExpectDiffSuppress: true, + }, + "slash to slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app/", + New: "https://hello-rehvs75zla-uc.a.run.app/", + ExpectDiffSuppress: true, + }, + "no slash to no slash": { + Old: "https://hello-rehvs75zla-uc.a.run.app", + New: "https://hello-rehvs75zla-uc.a.run.app", + ExpectDiffSuppress: true, + }, + "different domains": { + Old: "https://x.a.run.app/", + New: "https://y.a.run.app", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if cloudscheduler.LastSlashDiffSuppress("uri", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccCloudSchedulerJob_schedulerPaused(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_cloud_scheduler_job" "job" { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl index 045b0be451fa..aadfe28a5828 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl @@ -28,6 +28,30 @@ import ( {{- end }} ) +func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // The range may be a: + // A) single IP address (e.g. 10.2.3.4) + // B) CIDR format string (e.g. 10.1.2.0/24) + // C) netmask (e.g. /24) + // + // For A) and B), no diff to suppress, they have to match completely. + // For C), The API picks a network IP address and this creates a diff of the form: + // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" + // We should only compare the mask portion for this case. + if len(new) > 0 && new[0] == '/' { + oldNetmaskStartPos := strings.LastIndex(old, "/") + + if oldNetmaskStartPos != -1 { + oldNetmask := old[strings.LastIndex(old, "/"):] + if oldNetmask == new { + return true + } + } + } + + return false +} + var ( bootDiskKeys = []string{ "boot_disk.0.auto_delete", @@ -417,7 +441,7 @@ func ResourceComputeInstance() *schema.Resource { "ip_cidr_range": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + DiffSuppressFunc: IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range.`, }, "subnetwork_range_name": { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl index cd2c9d8bfaf9..b2feb876803c 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl @@ -523,7 +523,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + DiffSuppressFunc: IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, }, "subnetwork_range_name": { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index a2c8876a0639..ba0a4fbbe2d1 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" "github.com/hashicorp/terraform-provider-google/google/tpgresource" {{ if eq $.TargetVersionName `ga` }} @@ -1859,7 +1860,7 @@ func testAccCheckComputeInstanceTemplateHasAliasIpRange(instanceTemplate *comput return func(s *terraform.State) error { for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { for _, aliasIpRange := range networkInterface.AliasIpRanges { - if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { return nil } } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 27e65e9281ec..3761400e3f07 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -4310,7 +4310,7 @@ func testAccCheckComputeInstanceHasAliasIpRange(instance *compute.Instance, subn return func(s *terraform.State) error { for _, networkInterface := range instance.NetworkInterfaces { for _, aliasIpRange := range networkInterface.AliasIpRanges { - if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { return nil } } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl index 68df06e180b7..c88a923a92f4 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl @@ -493,7 +493,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + DiffSuppressFunc: IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, }, "subnetwork_range_name": { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl index f66690696fcb..1e4d118736e2 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/tpgresource" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" {{ if eq $.TargetVersionName `ga` }} @@ -1618,7 +1619,7 @@ func testAccCheckComputeRegionInstanceTemplateHasAliasIpRange(instanceTemplate * return func(s *terraform.State) error { for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { for _, aliasIpRange := range networkInterface.AliasIpRanges { - if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { return nil } } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_global_forwarding_rule_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_global_forwarding_rule_test.go.erb index e4f0036ae180..cbf00c941c03 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_global_forwarding_rule_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_global_forwarding_rule_test.go.erb @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/compute" ) func TestAccComputeGlobalForwardingRule_updateTarget(t *testing.T) { @@ -164,6 +165,144 @@ func TestAccComputeGlobalForwardingRule_internalLoadBalancing(t *testing.T) { } <% end -%> +func TestUnitComputeGlobalForwardingRule_PortRangeDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "different single values": { + Old: "80-80", + New: "443", + ExpectDiffSuppress: false, + }, + "different ranges": { + Old: "80-80", + New: "443-444", + ExpectDiffSuppress: false, + }, + "same single values": { + Old: "80-80", + New: "80", + ExpectDiffSuppress: true, + }, + "same ranges": { + Old: "80-80", + New: "80-80", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if compute.PortRangeDiffSuppress("ports", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + +func TestUnitComputeGlobalForwardingRule_InternalIpDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "suppress - same long and short ipv6 IPs without netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8000::", + ExpectDiffSuppress: true, + }, + "suppress - long and short ipv6 IPs with netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "2600:1900:4020:31cd:8000::/96", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP with netmask and short ipv6 IP without netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "2600:1900:4020:31cd:8000::", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP without netmask and short ipv6 IP with netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8000::/96", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP with netmask and reference": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP without netmask and reference": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: true, + }, + "do not suppress - ipv6 IPs different netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "2600:1900:4020:31cd:8000:0:0:0/95", + ExpectDiffSuppress: false, + }, + "do not suppress - reference and ipv6 IP with netmask": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "2600:1900:4020:31cd:8000:0:0:0/96", + ExpectDiffSuppress: false, + }, + "do not suppress - ipv6 IPs - 1": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8001::", + ExpectDiffSuppress: false, + }, + "do not suppress - ipv6 IPs - 2": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8000:0:0:8000", + ExpectDiffSuppress: false, + }, + "suppress - ipv4 IPs": { + Old: "1.2.3.4", + New: "1.2.3.4", + ExpectDiffSuppress: true, + }, + "suppress - ipv4 IP without netmask and ipv4 IP with netmask": { + Old: "1.2.3.4", + New: "1.2.3.4/24", + ExpectDiffSuppress: true, + }, + "suppress - ipv4 IP without netmask and reference": { + Old: "1.2.3.4", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: true, + }, + "do not suppress - reference and ipv4 IP without netmask": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "1.2.3.4", + ExpectDiffSuppress: false, + }, + "do not suppress - different ipv4 IPs": { + Old: "1.2.3.4", + New: "1.2.3.5", + ExpectDiffSuppress: false, + }, + "do not suppress - ipv4 IPs different netmask": { + Old: "1.2.3.4/24", + New: "1.2.3.5/25", + ExpectDiffSuppress: false, + }, + "do not suppress - different references": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "projects/project_id/regions/region/addresses/address-name-1", + ExpectDiffSuppress: false, + }, + "do not suppress - same references": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if compute.InternalIpDiffSuppress("ipv4/v6_compare", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccComputeGlobalForwardingRule_httpProxy(fr, targetProxy, proxy, proxy2, backend, hc, urlmap string) string { return fmt.Sprintf(` resource "google_compute_global_forwarding_rule" "forwarding_rule" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index f292b032a776..4d3fec856166 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -29,6 +29,30 @@ import ( <% end -%> ) +func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // The range may be a: + // A) single IP address (e.g. 10.2.3.4) + // B) CIDR format string (e.g. 10.1.2.0/24) + // C) netmask (e.g. /24) + // + // For A) and B), no diff to suppress, they have to match completely. + // For C), The API picks a network IP address and this creates a diff of the form: + // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" + // We should only compare the mask portion for this case. + if len(new) > 0 && new[0] == '/' { + oldNetmaskStartPos := strings.LastIndex(old, "/") + + if oldNetmaskStartPos != -1 { + oldNetmask := old[strings.LastIndex(old, "/"):] + if oldNetmask == new { + return true + } + } + } + + return false +} + var ( bootDiskKeys = []string{ "boot_disk.0.auto_delete", @@ -418,7 +442,7 @@ func ResourceComputeInstance() *schema.Resource { "ip_cidr_range": { Type: schema.TypeString, Required: true, - DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + DiffSuppressFunc: IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range.`, }, "subnetwork_range_name": { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index a76fdd9f75aa..44d17cd0b2a7 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -524,7 +524,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + DiffSuppressFunc: IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, }, "subnetwork_range_name": { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index c7adb60d0e11..bd30c505102e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" "github.com/hashicorp/terraform-provider-google/google/tpgresource" <% if version == "ga" -%> @@ -1534,6 +1535,50 @@ func TestAccComputeInstanceTemplate_resourceManagerTags(t *testing.T) { }) } +func TestUnitComputeInstanceTemplate_IpCidrRangeDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "single ip address": { + Old: "10.2.3.4", + New: "10.2.3.5", + ExpectDiffSuppress: false, + }, + "cidr format string": { + Old: "10.1.2.0/24", + New: "10.1.3.0/24", + ExpectDiffSuppress: false, + }, + "netmask same mask": { + Old: "10.1.2.0/24", + New: "/24", + ExpectDiffSuppress: true, + }, + "netmask different mask": { + Old: "10.1.2.0/24", + New: "/32", + ExpectDiffSuppress: false, + }, + "add netmask": { + Old: "", + New: "/24", + ExpectDiffSuppress: false, + }, + "remove netmask": { + Old: "/24", + New: "", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccCheckComputeInstanceTemplateDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -1860,7 +1905,7 @@ func testAccCheckComputeInstanceTemplateHasAliasIpRange(instanceTemplate *comput return func(s *terraform.State) error { for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { for _, aliasIpRange := range networkInterface.AliasIpRanges { - if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { return nil } } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index dde0a9397a76..c7d960b15729 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -4311,7 +4311,7 @@ func testAccCheckComputeInstanceHasAliasIpRange(instance *compute.Instance, subn return func(s *terraform.State) error { for _, networkInterface := range instance.NetworkInterfaces { for _, aliasIpRange := range networkInterface.AliasIpRanges { - if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { return nil } } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index 46817b0944e8..b6f33aba7985 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -494,7 +494,7 @@ Google Cloud KMS.`, Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.IpCidrRangeDiffSuppress, + DiffSuppressFunc: IpCidrRangeDiffSuppress, Description: `The IP CIDR range represented by this alias IP range. This IP CIDR range must belong to the specified subnetwork and cannot contain IP addresses reserved by system or used by other network interfaces. At the time of writing only a netmask (e.g. /24) may be supplied, with a CIDR format resulting in an API error.`, }, "subnetwork_range_name": { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index bbba450d3d5a..c70b9ae57e3e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-provider-google/google/tpgresource" + tpgcompute "github.com/hashicorp/terraform-provider-google/google/services/compute" transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" <% if version == "ga" -%> @@ -1619,7 +1620,7 @@ func testAccCheckComputeRegionInstanceTemplateHasAliasIpRange(instanceTemplate * return func(s *terraform.State) error { for _, networkInterface := range instanceTemplate.Properties.NetworkInterfaces { for _, aliasIpRange := range networkInterface.AliasIpRanges { - if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { + if aliasIpRange.SubnetworkRangeName == subnetworkRangeName && (aliasIpRange.IpCidrRange == iPCidrRange || tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", aliasIpRange.IpCidrRange, iPCidrRange, nil)) { return nil } } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_ssl_certificate_test.go b/mmv1/third_party/terraform/services/compute/resource_compute_ssl_certificate_test.go index 84031c22e724..2df10cc9c932 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_ssl_certificate_test.go +++ b/mmv1/third_party/terraform/services/compute/resource_compute_ssl_certificate_test.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/compute" ) func TestAccComputeSslCertificate_no_name(t *testing.T) { @@ -36,6 +37,45 @@ func TestAccComputeSslCertificate_no_name(t *testing.T) { }) } +func TestUnitComputeManagedSslCertificate_AbsoluteDomainSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "new trailing dot": { + Old: "sslcert.tf-test.club", + New: "sslcert.tf-test.club.", + ExpectDiffSuppress: true, + }, + "old trailing dot": { + Old: "sslcert.tf-test.club.", + New: "sslcert.tf-test.club", + ExpectDiffSuppress: true, + }, + "same trailing dot": { + Old: "sslcert.tf-test.club.", + New: "sslcert.tf-test.club.", + ExpectDiffSuppress: false, + }, + "different trailing dot": { + Old: "sslcert.tf-test.club.", + New: "sslcert.tf-test.clubs.", + ExpectDiffSuppress: false, + }, + "different no trailing dot": { + Old: "sslcert.tf-test.club", + New: "sslcert.tf-test.clubs", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if compute.AbsoluteDomainSuppress("managed.0.domains.", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccCheckComputeSslCertificateExists(t *testing.T, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl index 8abea04b38ca..b6279ad6f280 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl @@ -27,6 +27,15 @@ import ( {{- end }} ) +// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. +// Assume either value could be in either format. +func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { + return true + } + return false +} + var ( instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", verify.ProjectRegex)) @@ -1022,7 +1031,7 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: verify.ValidateRFC3339Time, - DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, + DiffSuppressFunc: Rfc3339TimeDiffSuppress, }, "duration": { Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl index e61fc211f072..9c2a03316054 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl @@ -792,7 +792,7 @@ func resourceContainerClusterResourceV1() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: verify.ValidateRFC3339Time, - DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, + DiffSuppressFunc: Rfc3339TimeDiffSuppress, }, "duration": { Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index c128e2604441..bf16ba801320 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -28,6 +28,15 @@ import ( <% end -%> ) +// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. +// Assume either value could be in either format. +func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { + return true + } + return false +} + var ( instanceGroupManagerURL = regexp.MustCompile(fmt.Sprintf("projects/(%s)/zones/([a-z0-9-]*)/instanceGroupManagers/([^/]*)", verify.ProjectRegex)) @@ -1023,7 +1032,7 @@ func ResourceContainerCluster() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: verify.ValidateRFC3339Time, - DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, + DiffSuppressFunc: Rfc3339TimeDiffSuppress, }, "duration": { Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb index 5e73abe2175f..108d576fa96a 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb @@ -793,7 +793,7 @@ func resourceContainerClusterResourceV1() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: verify.ValidateRFC3339Time, - DiffSuppressFunc: tpgresource.Rfc3339TimeDiffSuppress, + DiffSuppressFunc: Rfc3339TimeDiffSuppress, }, "duration": { Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb index 5a917f5cf280..1f018f2a937a 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_test.go.erb @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/container" ) func TestAccContainerCluster_basic(t *testing.T) { @@ -597,6 +598,49 @@ func TestAccContainerCluster_withAuthenticatorGroupsConfig(t *testing.T) { }) } +func TestUnitContainerCluster_Rfc3339TimeDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "same time, format changed to have leading zero": { + Old: "2:00", + New: "02:00", + ExpectDiffSuppress: true, + }, + "same time, format changed not to have leading zero": { + Old: "02:00", + New: "2:00", + ExpectDiffSuppress: true, + }, + "different time, both without leading zero": { + Old: "2:00", + New: "3:00", + ExpectDiffSuppress: false, + }, + "different time, old with leading zero, new without": { + Old: "02:00", + New: "3:00", + ExpectDiffSuppress: false, + }, + "different time, new with leading zero, oldwithout": { + Old: "2:00", + New: "03:00", + ExpectDiffSuppress: false, + }, + "different time, both with leading zero": { + Old: "02:00", + New: "03:00", + ExpectDiffSuppress: false, + }, + } + for tn, tc := range cases { + if container.Rfc3339TimeDiffSuppress("time", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Errorf("bad: %s, '%s' => '%s' expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + <% unless version == 'ga' -%> func testAccContainerCluster_enableMultiNetworking(clusterName string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go index 0b3d660053d1..0abed2ab6332 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster.go @@ -130,6 +130,17 @@ func diskConfigKeys(configName string) []string { } } +// Suppress diffs for values that are equivalent except for their use of the words "location" +// compared to "region" or "zone" +func LocationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return LocationDiffSuppressHelper(old, new) || LocationDiffSuppressHelper(new, old) +} + +func LocationDiffSuppressHelper(a, b string) bool { + return strings.Replace(a, "/locations/", "/regions/", 1) == b || + strings.Replace(a, "/locations/", "/zones/", 1) == b +} + func resourceDataprocLabelDiffSuppress(k, old, new string, d *schema.ResourceData) bool { if strings.HasPrefix(k, resourceDataprocGoogleProvidedLabelPrefix) && new == "" { return true @@ -1430,7 +1441,7 @@ by Dataproc`, Type: schema.TypeString, Required: true, Description: `The autoscaling policy used by the cluster.`, - DiffSuppressFunc: tpgresource.LocationDiffSuppress, + DiffSuppressFunc: LocationDiffSuppress, }, }, }, diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go index 768d639642b1..0e503027fbc5 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_migrate.go @@ -1241,7 +1241,7 @@ by Dataproc`, Type: schema.TypeString, Required: true, Description: `The autoscaling policy used by the cluster.`, - DiffSuppressFunc: tpgresource.LocationDiffSuppress, + DiffSuppressFunc: LocationDiffSuppress, }, }, }, diff --git a/mmv1/third_party/terraform/services/logging/data_source_google_logging_sink_test.go b/mmv1/third_party/terraform/services/logging/data_source_google_logging_sink_test.go index 8818dfa7ac75..2cefb0789866 100644 --- a/mmv1/third_party/terraform/services/logging/data_source_google_logging_sink_test.go +++ b/mmv1/third_party/terraform/services/logging/data_source_google_logging_sink_test.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/logging" ) func TestAccDataSourceGoogleLoggingSink_basic(t *testing.T) { @@ -38,6 +39,40 @@ func TestAccDataSourceGoogleLoggingSink_basic(t *testing.T) { }) } +func TestUnitLoggingSink_OptionalSurroundingSpacesSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "surrounding spaces": { + Old: "value", + New: " value ", + ExpectDiffSuppress: true, + }, + "no surrounding spaces": { + Old: "value", + New: "value", + ExpectDiffSuppress: true, + }, + "one space each": { + Old: " value", + New: "value ", + ExpectDiffSuppress: true, + }, + "different values": { + Old: " different", + New: "values ", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if logging.OptionalSurroundingSpacesSuppress("filter", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccDataSourceGoogleLoggingSink_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_logging_project_sink" "basic" { diff --git a/mmv1/third_party/terraform/services/logging/logging_exclusion_folder.go b/mmv1/third_party/terraform/services/logging/logging_exclusion_folder.go index e891c3b999e8..20286027c9ef 100644 --- a/mmv1/third_party/terraform/services/logging/logging_exclusion_folder.go +++ b/mmv1/third_party/terraform/services/logging/logging_exclusion_folder.go @@ -11,12 +11,18 @@ import ( "google.golang.org/api/logging/v2" ) +func OptionalPrefixSuppress(prefix string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + return prefix+old == new || prefix+new == old + } +} + var FolderLoggingExclusionSchema = map[string]*schema.Schema{ "folder": { Type: schema.TypeString, Required: true, ForceNew: true, - DiffSuppressFunc: tpgresource.OptionalPrefixSuppress("folders/"), + DiffSuppressFunc: OptionalPrefixSuppress("folders/"), }, } diff --git a/mmv1/third_party/terraform/services/logging/resource_logging_folder_exclusion_test.go b/mmv1/third_party/terraform/services/logging/resource_logging_folder_exclusion_test.go index 1c1498fd94d2..52d770106ba4 100644 --- a/mmv1/third_party/terraform/services/logging/resource_logging_folder_exclusion_test.go +++ b/mmv1/third_party/terraform/services/logging/resource_logging_folder_exclusion_test.go @@ -34,6 +34,45 @@ func TestAccLoggingFolderExclusion(t *testing.T) { } } +func TestUnitLoggingFolder_OptionalPrefixSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + Prefix string + ExpectDiffSuppress bool + }{ + "with same prefix": { + Old: "my-folder", + New: "folders/my-folder", + Prefix: "folders/", + ExpectDiffSuppress: true, + }, + "with different prefix": { + Old: "folders/my-folder", + New: "organizations/my-folder", + Prefix: "folders/", + ExpectDiffSuppress: false, + }, + "same without prefix": { + Old: "my-folder", + New: "my-folder", + Prefix: "folders/", + ExpectDiffSuppress: false, + }, + "different without prefix": { + Old: "my-folder", + New: "my-new-folder", + Prefix: "folders/", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if logging.OptionalPrefixSuppress(tc.Prefix)("folder", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccLoggingFolderExclusion_basic(t *testing.T) { org := envvar.GetTestOrgFromEnv(t) exclusionName := "tf-test-exclusion-" + acctest.RandString(t, 10) diff --git a/mmv1/third_party/terraform/services/logging/resource_logging_sink.go b/mmv1/third_party/terraform/services/logging/resource_logging_sink.go index 7f1b43538b49..65fcdae6f3a7 100644 --- a/mmv1/third_party/terraform/services/logging/resource_logging_sink.go +++ b/mmv1/third_party/terraform/services/logging/resource_logging_sink.go @@ -5,10 +5,13 @@ import ( "strings" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-google/google/tpgresource" "google.golang.org/api/logging/v2" ) +func OptionalSurroundingSpacesSuppress(k, old, new string, d *schema.ResourceData) bool { + return strings.TrimSpace(old) == strings.TrimSpace(new) +} + func resourceLoggingSinkSchema() map[string]*schema.Schema { return map[string]*schema.Schema{ "name": { @@ -27,7 +30,7 @@ func resourceLoggingSinkSchema() map[string]*schema.Schema { "filter": { Type: schema.TypeString, Optional: true, - DiffSuppressFunc: tpgresource.OptionalSurroundingSpacesSuppress, + DiffSuppressFunc: OptionalSurroundingSpacesSuppress, Description: `The filter to apply when exporting logs. Only log entries that match the filter are exported.`, }, diff --git a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go index e368b8d1fa35..c142112b69e0 100644 --- a/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go +++ b/mmv1/third_party/terraform/services/pubsub/resource_pubsub_subscription_test.go @@ -365,6 +365,39 @@ func TestAccPubsubSubscription_pollOnCreate(t *testing.T) { }) } +func TestUnitPubsubSubscription_IgnoreMissingKeyInMap(t *testing.T) { + cases := map[string]struct { + Old, New string + Key string + ExpectDiffSuppress bool + }{ + "missing key in map": { + Old: "", + New: "v1", + Key: "x-goog-version", + ExpectDiffSuppress: true, + }, + "different values": { + Old: "v1", + New: "v2", + Key: "x-goog-version", + ExpectDiffSuppress: false, + }, + "same values": { + Old: "v1", + New: "v1", + Key: "x-goog-version", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if pubsub.IgnoreMissingKeyInMap(tc.Key)("push_config.0.attributes."+tc.Key, tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccPubsubSubscription_emptyTTL(topic, subscription string) string { return fmt.Sprintf(` resource "google_pubsub_topic" "foo" { diff --git a/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb b/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb index 3d2346a82c6f..4f28d140e426 100644 --- a/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb +++ b/mmv1/third_party/terraform/tpgresource/common_diff_suppress.go.erb @@ -4,52 +4,14 @@ package tpgresource import ( - "crypto/sha256" - "log" - "encoding/hex" - "net" "reflect" "regexp" - "strconv" "strings" "time" - "bytes" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func OptionalPrefixSuppress(prefix string) schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *schema.ResourceData) bool { - return prefix+old == new || prefix+new == old - } -} - -func IgnoreMissingKeyInMap(key string) schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *schema.ResourceData) bool { - log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) - if strings.HasSuffix(k, ".%") { - oldNum, err := strconv.Atoi(old) - if err != nil { - log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) - return false - } - newNum, err := strconv.Atoi(new) - if err != nil { - log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) - return false - } - return oldNum+1 == newNum - } else if strings.HasSuffix(k, "." + key) { - return old == "" - } - return false - } -} - -func OptionalSurroundingSpacesSuppress(k, old, new string, d *schema.ResourceData) bool { - return strings.TrimSpace(old) == strings.TrimSpace(new) -} - func EmptyOrDefaultStringSuppress(defaultVal string) schema.SchemaDiffSuppressFunc { return func(k, old, new string, d *schema.ResourceData) bool { return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) @@ -61,56 +23,10 @@ func EmptyOrFalseSuppressBoolean(k, old, new string, d *schema.ResourceData) boo return (o == nil && !n.(bool)) } -func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // The range may be a: - // A) single IP address (e.g. 10.2.3.4) - // B) CIDR format string (e.g. 10.1.2.0/24) - // C) netmask (e.g. /24) - // - // For A) and B), no diff to suppress, they have to match completely. - // For C), The API picks a network IP address and this creates a diff of the form: - // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" - // We should only compare the mask portion for this case. - if len(new) > 0 && new[0] == '/' { - oldNetmaskStartPos := strings.LastIndex(old, "/") - - if oldNetmaskStartPos != -1 { - oldNetmask := old[strings.LastIndex(old, "/"):] - if oldNetmask == new { - return true - } - } - } - - return false -} - -// Sha256DiffSuppress -// if old is the hex-encoded sha256 sum of new, treat them as equal -func Sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new -} - func CaseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { return strings.ToUpper(old) == strings.ToUpper(new) } -// Port range '80' and '80-80' is equivalent. -// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). -// `new` can be either a single port or a port range. -func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return old == new+"-"+new -} - -// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. -// Assume either value could be in either format. -func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { - return true - } - return false -} - func EmptyOrUnsetBlockDiffSuppress(k, old, new string, d *schema.ResourceData) bool { o, n := d.GetChange(strings.TrimSuffix(k, ".#")) return EmptyOrUnsetBlockDiffSuppressLogic(k, old, new, o, n) @@ -144,25 +60,6 @@ func EmptyOrUnsetBlockDiffSuppressLogic(k, old, new string, o, n interface{}) bo return true } -// Suppress diffs for values that are equivalent except for their use of the words "location" -// compared to "region" or "zone" -func LocationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return LocationDiffSuppressHelper(old, new) || LocationDiffSuppressHelper(new, old) -} - -func LocationDiffSuppressHelper(a, b string) bool { - return strings.Replace(a, "/locations/", "/regions/", 1) == b || - strings.Replace(a, "/locations/", "/zones/", 1) == b -} - -// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. -func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { - if strings.HasPrefix(k, "managed.0.domains.") { - return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") - } - return false -} - func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { return func(_, old, new string, _ *schema.ResourceData) bool { oldT, err := time.Parse(format, old) @@ -179,50 +76,6 @@ func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { } } -// Suppresses diff for IPv4 and IPv6 different formats. -// It also suppresses diffs if an IP is changing to a reference. -func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - addr_equality := false - netmask_equality := false - - addr_netmask_old := strings.Split(old, "/") - addr_netmask_new := strings.Split(new, "/") - - // Check if old or new are IPs (with or without netmask) - var addr_old net.IP - if net.ParseIP(addr_netmask_old[0]) == nil { - addr_old = net.ParseIP(old) - } else { - addr_old = net.ParseIP(addr_netmask_old[0]) - } - var addr_new net.IP - if net.ParseIP(addr_netmask_new[0]) == nil { - addr_new = net.ParseIP(new) - } else { - addr_new = net.ParseIP(addr_netmask_new[0]) - } - - if addr_old != nil { - if addr_new == nil { - // old is an IP and new is a reference - addr_equality = true - } else { - // old and new are IP addresses - addr_equality = bytes.Equal(addr_old, addr_new) - } - } - - // If old and new both have a netmask compare them, otherwise suppress - // This is not technically correct but prevents the permadiff described in https://github.com/hashicorp/terraform-provider-google/issues/16400 - if (len(addr_netmask_old)) == 2 && (len(addr_netmask_new) == 2) { - netmask_equality = addr_netmask_old[1] == addr_netmask_new[1] - } else { - netmask_equality = true - } - - return addr_equality && netmask_equality -} - // Suppress diffs for duration format. ex "60.0s" and "60s" same // https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration func DurationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { @@ -237,50 +90,6 @@ func DurationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { return oDuration == nDuration } -// Use this method when the field accepts either an IP address or a -// self_link referencing a resource (such as google_compute_route's -// next_hop_ilb) -func CompareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { - // if we can parse `new` as an IP address, then compare as strings - if net.ParseIP(new) != nil { - return new == old - } - - // otherwise compare as self links - return CompareSelfLinkOrResourceName("", old, new, nil) -} - -<% unless version == "ga" -%> -// Suppress all diffs, used for Disk.Interface which is a nonfunctional field -func AlwaysDiffSuppress(_, _, _ string, _ *schema.ResourceData) bool { - return true -} -<% end -%> - -// Use this method when subnet is optioanl and auto_create_subnetworks = true -// API sometimes choose a subnet so the diff needs to be ignored -func CompareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { - if IsEmptyValue(reflect.ValueOf(new)) { - return true - } - // otherwise compare as self links - return CompareSelfLinkOrResourceName("", old, new, nil) -} - -// Suppress diffs in below cases -// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" -// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" -func LastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - if last := len(new) - 1; last >= 0 && new[last] == '/' { - new = new[:last] - } - - if last := len(old) - 1; last >= 0 && old[last] == '/' { - old = old[:last] - } - return new == old -} - // Suppress diffs when the value read from api // has the project number instead of the project name func ProjectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { diff --git a/mmv1/third_party/terraform/tpgresource/common_diff_suppress_test.go b/mmv1/third_party/terraform/tpgresource/common_diff_suppress_test.go index e9ef36952061..3cb2ef2d21c6 100644 --- a/mmv1/third_party/terraform/tpgresource/common_diff_suppress_test.go +++ b/mmv1/third_party/terraform/tpgresource/common_diff_suppress_test.go @@ -4,112 +4,6 @@ package tpgresource import "testing" -func TestOptionalPrefixSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - Prefix string - ExpectDiffSuppress bool - }{ - "with same prefix": { - Old: "my-folder", - New: "folders/my-folder", - Prefix: "folders/", - ExpectDiffSuppress: true, - }, - "with different prefix": { - Old: "folders/my-folder", - New: "organizations/my-folder", - Prefix: "folders/", - ExpectDiffSuppress: false, - }, - "same without prefix": { - Old: "my-folder", - New: "my-folder", - Prefix: "folders/", - ExpectDiffSuppress: false, - }, - "different without prefix": { - Old: "my-folder", - New: "my-new-folder", - Prefix: "folders/", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if OptionalPrefixSuppress(tc.Prefix)("folder", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - -func TestIgnoreMissingKeyInMap(t *testing.T) { - cases := map[string]struct { - Old, New string - Key string - ExpectDiffSuppress bool - }{ - "missing key in map": { - Old: "", - New: "v1", - Key: "x-goog-version", - ExpectDiffSuppress: true, - }, - "different values": { - Old: "v1", - New: "v2", - Key: "x-goog-version", - ExpectDiffSuppress: false, - }, - "same values": { - Old: "v1", - New: "v1", - Key: "x-goog-version", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if IgnoreMissingKeyInMap(tc.Key)("push_config.0.attributes."+tc.Key, tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - -func TestOptionalSurroundingSpacesSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "surrounding spaces": { - Old: "value", - New: " value ", - ExpectDiffSuppress: true, - }, - "no surrounding spaces": { - Old: "value", - New: "value", - ExpectDiffSuppress: true, - }, - "one space each": { - Old: " value", - New: "value ", - ExpectDiffSuppress: true, - }, - "different values": { - Old: " different", - New: "values ", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if OptionalSurroundingSpacesSuppress("filter", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func TestCaseDiffSuppress(t *testing.T) { cases := map[string]struct { Old, New string @@ -139,118 +33,6 @@ func TestCaseDiffSuppress(t *testing.T) { } } -func TestPortRangeDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "different single values": { - Old: "80-80", - New: "443", - ExpectDiffSuppress: false, - }, - "different ranges": { - Old: "80-80", - New: "443-444", - ExpectDiffSuppress: false, - }, - "same single values": { - Old: "80-80", - New: "80", - ExpectDiffSuppress: true, - }, - "same ranges": { - Old: "80-80", - New: "80-80", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if PortRangeDiffSuppress("ports", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - -func TestLocationDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "locations to zones": { - Old: "projects/x/locations/y/resource/z", - New: "projects/x/zones/y/resource/z", - ExpectDiffSuppress: true, - }, - "regions to locations": { - Old: "projects/x/regions/y/resource/z", - New: "projects/x/locations/y/resource/z", - ExpectDiffSuppress: true, - }, - "locations to locations": { - Old: "projects/x/locations/y/resource/z", - New: "projects/x/locations/y/resource/z", - ExpectDiffSuppress: false, - }, - "zones to regions": { - Old: "projects/x/zones/y/resource/z", - New: "projects/x/regions/y/resource/z", - ExpectDiffSuppress: false, - }, - "different locations": { - Old: "projects/x/locations/a/resource/z", - New: "projects/x/locations/b/resource/z", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if LocationDiffSuppress("policy_uri", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - -func TestAbsoluteDomainSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "new trailing dot": { - Old: "sslcert.tf-test.club", - New: "sslcert.tf-test.club.", - ExpectDiffSuppress: true, - }, - "old trailing dot": { - Old: "sslcert.tf-test.club.", - New: "sslcert.tf-test.club", - ExpectDiffSuppress: true, - }, - "same trailing dot": { - Old: "sslcert.tf-test.club.", - New: "sslcert.tf-test.club.", - ExpectDiffSuppress: false, - }, - "different trailing dot": { - Old: "sslcert.tf-test.club.", - New: "sslcert.tf-test.clubs.", - ExpectDiffSuppress: false, - }, - "different no trailing dot": { - Old: "sslcert.tf-test.club", - New: "sslcert.tf-test.clubs", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if AbsoluteDomainSuppress("managed.0.domains.", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func TestDurationDiffSuppress(t *testing.T) { cases := map[string]struct { Old, New string @@ -285,149 +67,6 @@ func TestDurationDiffSuppress(t *testing.T) { } } -func TestInternalIpDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "suppress - same long and short ipv6 IPs without netmask": { - Old: "2600:1900:4020:31cd:8000:0:0:0", - New: "2600:1900:4020:31cd:8000::", - ExpectDiffSuppress: true, - }, - "suppress - long and short ipv6 IPs with netmask": { - Old: "2600:1900:4020:31cd:8000:0:0:0/96", - New: "2600:1900:4020:31cd:8000::/96", - ExpectDiffSuppress: true, - }, - "suppress - long ipv6 IP with netmask and short ipv6 IP without netmask": { - Old: "2600:1900:4020:31cd:8000:0:0:0/96", - New: "2600:1900:4020:31cd:8000::", - ExpectDiffSuppress: true, - }, - "suppress - long ipv6 IP without netmask and short ipv6 IP with netmask": { - Old: "2600:1900:4020:31cd:8000:0:0:0", - New: "2600:1900:4020:31cd:8000::/96", - ExpectDiffSuppress: true, - }, - "suppress - long ipv6 IP with netmask and reference": { - Old: "2600:1900:4020:31cd:8000:0:0:0/96", - New: "projects/project_id/regions/region/addresses/address-name", - ExpectDiffSuppress: true, - }, - "suppress - long ipv6 IP without netmask and reference": { - Old: "2600:1900:4020:31cd:8000:0:0:0", - New: "projects/project_id/regions/region/addresses/address-name", - ExpectDiffSuppress: true, - }, - "do not suppress - ipv6 IPs different netmask": { - Old: "2600:1900:4020:31cd:8000:0:0:0/96", - New: "2600:1900:4020:31cd:8000:0:0:0/95", - ExpectDiffSuppress: false, - }, - "do not suppress - reference and ipv6 IP with netmask": { - Old: "projects/project_id/regions/region/addresses/address-name", - New: "2600:1900:4020:31cd:8000:0:0:0/96", - ExpectDiffSuppress: false, - }, - "do not suppress - ipv6 IPs - 1": { - Old: "2600:1900:4020:31cd:8000:0:0:0", - New: "2600:1900:4020:31cd:8001::", - ExpectDiffSuppress: false, - }, - "do not suppress - ipv6 IPs - 2": { - Old: "2600:1900:4020:31cd:8000:0:0:0", - New: "2600:1900:4020:31cd:8000:0:0:8000", - ExpectDiffSuppress: false, - }, - "suppress - ipv4 IPs": { - Old: "1.2.3.4", - New: "1.2.3.4", - ExpectDiffSuppress: true, - }, - "suppress - ipv4 IP without netmask and ipv4 IP with netmask": { - Old: "1.2.3.4", - New: "1.2.3.4/24", - ExpectDiffSuppress: true, - }, - "suppress - ipv4 IP without netmask and reference": { - Old: "1.2.3.4", - New: "projects/project_id/regions/region/addresses/address-name", - ExpectDiffSuppress: true, - }, - "do not suppress - reference and ipv4 IP without netmask": { - Old: "projects/project_id/regions/region/addresses/address-name", - New: "1.2.3.4", - ExpectDiffSuppress: false, - }, - "do not suppress - different ipv4 IPs": { - Old: "1.2.3.4", - New: "1.2.3.5", - ExpectDiffSuppress: false, - }, - "do not suppress - ipv4 IPs different netmask": { - Old: "1.2.3.4/24", - New: "1.2.3.5/25", - ExpectDiffSuppress: false, - }, - "do not suppress - different references": { - Old: "projects/project_id/regions/region/addresses/address-name", - New: "projects/project_id/regions/region/addresses/address-name-1", - ExpectDiffSuppress: false, - }, - "do not suppress - same references": { - Old: "projects/project_id/regions/region/addresses/address-name", - New: "projects/project_id/regions/region/addresses/address-name", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if InternalIpDiffSuppress("ipv4/v6_compare", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - -func TestLastSlashDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "slash to no slash": { - Old: "https://hello-rehvs75zla-uc.a.run.app/", - New: "https://hello-rehvs75zla-uc.a.run.app", - ExpectDiffSuppress: true, - }, - "no slash to slash": { - Old: "https://hello-rehvs75zla-uc.a.run.app", - New: "https://hello-rehvs75zla-uc.a.run.app/", - ExpectDiffSuppress: true, - }, - "slash to slash": { - Old: "https://hello-rehvs75zla-uc.a.run.app/", - New: "https://hello-rehvs75zla-uc.a.run.app/", - ExpectDiffSuppress: true, - }, - "no slash to no slash": { - Old: "https://hello-rehvs75zla-uc.a.run.app", - New: "https://hello-rehvs75zla-uc.a.run.app", - ExpectDiffSuppress: true, - }, - "different domains": { - Old: "https://x.a.run.app/", - New: "https://y.a.run.app", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if LastSlashDiffSuppress("uri", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func TestEmptyOrUnsetBlockDiffSuppress(t *testing.T) { cases := map[string]struct { Key, Old, New string diff --git a/mmv1/third_party/terraform/tpgresource/utils_test.go b/mmv1/third_party/terraform/tpgresource/utils_test.go index 19222a730565..0a657aa9dcaf 100644 --- a/mmv1/third_party/terraform/tpgresource/utils_test.go +++ b/mmv1/third_party/terraform/tpgresource/utils_test.go @@ -225,93 +225,6 @@ func TestConvertStringMap(t *testing.T) { } } -func TestIpCidrRangeDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "single ip address": { - Old: "10.2.3.4", - New: "10.2.3.5", - ExpectDiffSuppress: false, - }, - "cidr format string": { - Old: "10.1.2.0/24", - New: "10.1.3.0/24", - ExpectDiffSuppress: false, - }, - "netmask same mask": { - Old: "10.1.2.0/24", - New: "/24", - ExpectDiffSuppress: true, - }, - "netmask different mask": { - Old: "10.1.2.0/24", - New: "/32", - ExpectDiffSuppress: false, - }, - "add netmask": { - Old: "", - New: "/24", - ExpectDiffSuppress: false, - }, - "remove netmask": { - Old: "/24", - New: "", - ExpectDiffSuppress: false, - }, - } - - for tn, tc := range cases { - if tpgresource.IpCidrRangeDiffSuppress("ip_cidr_range", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - -func TestRfc3339TimeDiffSuppress(t *testing.T) { - cases := map[string]struct { - Old, New string - ExpectDiffSuppress bool - }{ - "same time, format changed to have leading zero": { - Old: "2:00", - New: "02:00", - ExpectDiffSuppress: true, - }, - "same time, format changed not to have leading zero": { - Old: "02:00", - New: "2:00", - ExpectDiffSuppress: true, - }, - "different time, both without leading zero": { - Old: "2:00", - New: "3:00", - ExpectDiffSuppress: false, - }, - "different time, old with leading zero, new without": { - Old: "02:00", - New: "3:00", - ExpectDiffSuppress: false, - }, - "different time, new with leading zero, oldwithout": { - Old: "2:00", - New: "03:00", - ExpectDiffSuppress: false, - }, - "different time, both with leading zero": { - Old: "02:00", - New: "03:00", - ExpectDiffSuppress: false, - }, - } - for tn, tc := range cases { - if tpgresource.Rfc3339TimeDiffSuppress("time", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { - t.Errorf("bad: %s, '%s' => '%s' expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) - } - } -} - func TestGetProject(t *testing.T) { cases := map[string]struct { ResourceConfig map[string]interface{} diff --git a/tpgtools/ignored_handwritten/common_diff_suppress.go b/tpgtools/ignored_handwritten/common_diff_suppress.go index c82d7c1fe5e4..0c4af16934ee 100644 --- a/tpgtools/ignored_handwritten/common_diff_suppress.go +++ b/tpgtools/ignored_handwritten/common_diff_suppress.go @@ -3,8 +3,6 @@ package google import ( - "crypto/sha256" - "encoding/hex" "net" "strings" "time" @@ -12,80 +10,16 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func OptionalPrefixSuppress(prefix string) schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *schema.ResourceData) bool { - return prefix+old == new || prefix+new == old - } -} - -func OptionalSurroundingSpacesSuppress(k, old, new string, d *schema.ResourceData) bool { - return strings.TrimSpace(old) == strings.TrimSpace(new) -} - func EmptyOrDefaultStringSuppress(defaultVal string) schema.SchemaDiffSuppressFunc { return func(k, old, new string, d *schema.ResourceData) bool { return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) } } -func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // The range may be a: - // A) single IP address (e.g. 10.2.3.4) - // B) CIDR format string (e.g. 10.1.2.0/24) - // C) netmask (e.g. /24) - // - // For A) and B), no diff to suppress, they have to match completely. - // For C), The API picks a network IP address and this creates a diff of the form: - // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" - // We should only compare the mask portion for this case. - if len(new) > 0 && new[0] == '/' { - oldNetmaskStartPos := strings.LastIndex(old, "/") - - if oldNetmaskStartPos != -1 { - oldNetmask := old[strings.LastIndex(old, "/"):] - if oldNetmask == new { - return true - } - } - } - - return false -} - -// Sha256DiffSuppress -// if old is the hex-encoded sha256 sum of new, treat them as equal -func Sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new -} - func CaseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { return strings.ToUpper(old) == strings.ToUpper(new) } -// Port range '80' and '80-80' is equivalent. -// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). -// `new` can be either a single port or a port range. -func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return old == new+"-"+new -} - -// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. -// Assume either value could be in either format. -func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { - return true - } - return false -} - -// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. -func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { - if k == "managed.0.domains.0" { - return old == strings.TrimRight(new, ".") - } - return old == new -} - func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { return func(_, old, new string, _ *schema.ResourceData) bool { oldT, err := time.Parse(format, old) From c3161ce03c78a071e9062612c19443af8633ab49 Mon Sep 17 00:00:00 2001 From: Dionna Amalie Glaze Date: Tue, 25 Jun 2024 16:52:56 -0700 Subject: [PATCH 213/356] Add GA support for confidentialInstanceType (#10887) --- mmv1/products/compute/Instance.yaml | 2 +- mmv1/third_party/terraform/go.mod.erb | 28 ++++---- mmv1/third_party/terraform/go.sum | 64 +++++++++---------- .../compute/compute_instance_helpers.go.erb | 4 -- ...pute_region_instance_template_test.go.tmpl | 18 ------ .../compute/resource_compute_instance.go.erb | 8 --- .../resource_compute_instance_template.go.erb | 9 --- ...urce_compute_instance_template_test.go.erb | 18 ------ .../resource_compute_instance_test.go.erb | 18 ------ ...ce_compute_region_instance_template.go.erb | 9 --- ...mpute_region_instance_template_test.go.erb | 18 ------ .../docs/r/compute_instance.html.markdown | 2 +- .../r/compute_instance_template.html.markdown | 2 +- ...ute_region_instance_template.html.markdown | 2 +- 14 files changed, 50 insertions(+), 152 deletions(-) diff --git a/mmv1/products/compute/Instance.yaml b/mmv1/products/compute/Instance.yaml index d316af142a77..72d208dcab1e 100644 --- a/mmv1/products/compute/Instance.yaml +++ b/mmv1/products/compute/Instance.yaml @@ -609,9 +609,9 @@ properties: at_least_one_of: - confidential_instance_config.0.enable_confidential_compute - confidential_instance_config.0.confidential_instance_type + deprecation_message: "`enableConfidentialCompute` is deprecated and will be removed in a future major release. Use `confidentialInstanceType: SEV` instead." - !ruby/object:Api::Type::Enum name: 'confidentialInstanceType' - min_version: beta description: | The confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: SEV, SEV_SNP. diff --git a/mmv1/third_party/terraform/go.mod.erb b/mmv1/third_party/terraform/go.mod.erb index eca77fa58285..20470932ebef 100644 --- a/mmv1/third_party/terraform/go.mod.erb +++ b/mmv1/third_party/terraform/go.mod.erb @@ -28,18 +28,18 @@ require ( github.com/sirupsen/logrus v1.8.1 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 - golang.org/x/net v0.25.0 - golang.org/x/oauth2 v0.20.0 - google.golang.org/api v0.180.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 - google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.34.1 + golang.org/x/net v0.26.0 + golang.org/x/oauth2 v0.21.0 + google.golang.org/api v0.185.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.2 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.113.0 // indirect - cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.5.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect @@ -51,7 +51,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fatih/color v1.16.0 // indirect @@ -99,14 +99,14 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/mmv1/third_party/terraform/go.sum b/mmv1/third_party/terraform/go.sum index 30534d3e3b1c..04ec71650cc1 100644 --- a/mmv1/third_party/terraform/go.sum +++ b/mmv1/third_party/terraform/go.sum @@ -1,10 +1,10 @@ bitbucket.org/creachadair/stringset v0.0.8 h1:gQqe4vs8XWgMyijfyKE6K8o4TcyGGrRXe0JvHgx5H+M= bitbucket.org/creachadair/stringset v0.0.8/go.mod h1:AgthVMyMxC/6FK1KBJ2ALdqkZObGN8hOetgpwXyMn34= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= -cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= -cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= -cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14= +cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigtable v1.24.0 h1:RtBERIoZZsQm3LUExDGFWgOwMEHCO04O9/pDA0KoAZI= @@ -44,8 +44,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa h1:jQCWAUqqlij9Pgj2i/PB79y4KOPYVyFYdROxgaCwdTQ= -github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa/go.mod h1:x/1Gn8zydmfq8dk6e9PdstVsDgu9RuyIIJqAaF//0IM= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 h1:DBmgJDC9dTfkVyGgipamEh2BpGYxScCH1TOF1LL1cXc= +github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50/go.mod h1:5e1+Vvlzido69INQaVO6d87Qn543Xr6nooe9Kz7oBFM= github.com/creachadair/staticfile v0.1.2/go.mod h1:a3qySzCIXEprDGxk6tSxSI+dBBdLzqeBOMhZ+o2d3pM= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= @@ -293,8 +293,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 h1:ESSUROHIBHg7USnszlcdmjBEwdMj9VUvU+OPk4yl2mc= golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8/go.mod h1:/lliqkxwWAhPjf5oSOIJup2XcqJaw8RGS6k3TGEc7GI= @@ -317,11 +317,11 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -347,19 +347,19 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -371,14 +371,14 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.20.0 h1:hz/CVckiOxybQvFw6h7b/q80NTr9IUQb4s1IIzW7KNY= -golang.org/x/tools v0.20.0/go.mod h1:WvitBU7JJf6A4jOdg4S1tviW9bhUxkgeCui/0JHctQg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.180.0 h1:M2D87Yo0rGBPWpo1orwfCLehUUL6E7/TYe5gvMQWDh4= -google.golang.org/api v0.180.0/go.mod h1:51AiyoEg1MJPSZ9zvklA8VnRILPXxn1iVen9v25XHAE= +google.golang.org/api v0.185.0 h1:ENEKk1k4jW8SmmaT6RE+ZasxmxezCrD5Vw4npvr+pAU= +google.golang.org/api v0.185.0/go.mod h1:HNfvIkJGlgrIlrbYkAm9W9IdkmKZjOTVh33YltygGbg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -387,20 +387,20 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6 h1:MTmrc2F5TZKDKXigcZetYkH04YwqtOPEQJwh4PPOgfk= -google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6/go.mod h1:2ROWwqCIx97Y7CSyp11xB8fori0wzvD6+gbacaf5c8I= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae h1:AH34z6WAGVNkllnKs5raNq3yRq93VnjBG6rpfub/jYk= -google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae/go.mod h1:FfiGhwUm6CJviekPrc0oJ+7h29e+DmWU6UtjX0ZvI7Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 h1:CUiCqkPw1nNrNQzCCG4WA65m0nAmQiwXHpub3dNyruU= +google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4/go.mod h1:EvuUDCulqGgV80RvP1BHuom+smhX4qtlhnNatHuroGQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE= +google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 h1:Di6ANFilr+S60a4S61ZM00vLdw0IrQOSMS2/6mrnOU0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -412,8 +412,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= -google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index 297a10389cda..b0a01710a046 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -613,9 +613,7 @@ func expandConfidentialInstanceConfig(d tpgresource.TerraformResourceData) *comp prefix := "confidential_instance_config.0" return &compute.ConfidentialInstanceConfig{ EnableConfidentialCompute: d.Get(prefix + ".enable_confidential_compute").(bool), - <% unless version == "ga" -%> ConfidentialInstanceType: d.Get(prefix + ".confidential_instance_type").(string), - <% end -%> } } @@ -626,9 +624,7 @@ func flattenConfidentialInstanceConfig(ConfidentialInstanceConfig *compute.Confi return []map[string]interface{}{{ "enable_confidential_compute": ConfidentialInstanceConfig.EnableConfidentialCompute, - <% unless version == "ga" -%> "confidential_instance_type": ConfidentialInstanceConfig.ConfidentialInstanceType, - <% end -%> }} } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl index 1e4d118736e2..ddbcb20d5f58 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -689,9 +689,7 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test t.Parallel() var instanceTemplate compute.InstanceTemplate - {{- if ne $.TargetVersionName "ga" }} var instanceTemplate2 compute.InstanceTemplate - {{- end }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -703,13 +701,10 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test Check: resource.ComposeTestCheckFunc( testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, true, "SEV"), - {{- if ne $.TargetVersionName "ga" }} testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar2", &instanceTemplate2), testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, true, ""), - {{- end }} ), }, - {{- if ne $.TargetVersionName "ga" }} { Config: testAccComputeRegionInstanceTemplateConfidentialInstanceConfigNoEnable(acctest.RandString(t, 10), "AMD Milan", "SEV_SNP"), Check: resource.ComposeTestCheckFunc( @@ -719,7 +714,6 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, - {{- end }} }, }) } @@ -1733,11 +1727,9 @@ func testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(inst if instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute) } - {{- if ne $.TargetVersionName "ga" }} if instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType) } - {{- end }} return nil } @@ -2884,9 +2876,7 @@ resource "google_compute_region_instance_template" "foobar" { confidential_instance_config { enable_confidential_compute = true -{{- if ne $.TargetVersionName "ga" }} confidential_instance_type = %q -{{- end }} } scheduling { @@ -2895,7 +2885,6 @@ resource "google_compute_region_instance_template" "foobar" { } -{{ if ne $.TargetVersionName `ga` -}} resource "google_compute_region_instance_template" "foobar2" { name = "tf-test-instance2-template-%s" machine_type = "n2d-standard-2" @@ -2920,15 +2909,9 @@ resource "google_compute_region_instance_template" "foobar2" { } } -{{- end }} -{{- if eq $.TargetVersionName "ga" }} -`, suffix) -{{- else }} `, suffix, confidentialInstanceType, suffix) -{{- end }} } -{{ if ne $.TargetVersionName `ga` -}} func testAccComputeRegionInstanceTemplateConfidentialInstanceConfigNoEnable(suffix string, minCpuPlatform, confidentialInstanceType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image2" { @@ -2991,7 +2974,6 @@ resource "google_compute_region_instance_template" "foobar4" { } `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } -{{- end }} func testAccComputeRegionInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 4d3fec856166..68f26d8eaa4c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -1049,13 +1049,6 @@ be from 0 to 999,999,999 inclusive.`, Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - <% if version == "ga" -%> - "enable_confidential_compute": { - Type: schema.TypeBool, - Required: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - <% else -%> "enable_confidential_compute": { Type: schema.TypeBool, Optional: true, @@ -1071,7 +1064,6 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, - <% end -%> }, }, }, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index 44d17cd0b2a7..29496540192c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -899,14 +899,6 @@ be from 0 to 999,999,999 inclusive.`, Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - <% if version == "ga" -%> - "enable_confidential_compute": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - <% else -%> "enable_confidential_compute": { Type: schema.TypeBool, Optional: true, @@ -924,7 +916,6 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, - <% end -%> }, }, }, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index bd30c505102e..f1a511161174 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -780,9 +780,7 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) t.Parallel() var instanceTemplate compute.InstanceTemplate - <% unless version == "ga" -%> var instanceTemplate2 compute.InstanceTemplate - <% end -%> acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -794,13 +792,10 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, true, "SEV"), - <% unless version == "ga" -%> testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar2", &instanceTemplate2), testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, true, ""), - <% end -%> ), }, - <% unless version == "ga" -%> { Config: testAccComputeInstanceTemplateConfidentialInstanceConfigNoEnable(acctest.RandString(t, 10), "AMD Milan", "SEV_SNP"), Check: resource.ComposeTestCheckFunc( @@ -810,7 +805,6 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, - <% end -%> }, }) } @@ -2032,11 +2026,9 @@ func testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(instanceTe if instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute) } - <% unless version == "ga" -%> if instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType) } - <% end -%> return nil } @@ -3357,9 +3349,7 @@ resource "google_compute_instance_template" "foobar" { confidential_instance_config { enable_confidential_compute = true -<% unless version == "ga" -%> confidential_instance_type = %q -<% end -%> } scheduling { @@ -3368,7 +3358,6 @@ resource "google_compute_instance_template" "foobar" { } -<% unless version == "ga" -%> resource "google_compute_instance_template" "foobar2" { name = "tf-test-instance2-template-%s" machine_type = "n2d-standard-2" @@ -3392,15 +3381,9 @@ resource "google_compute_instance_template" "foobar2" { } } -<% end -%> -<% if version == "ga" -%> -`, suffix) -<% else -%> `, suffix, confidentialInstanceType, suffix) -<% end -%> } -<% unless version == "ga" -%> func testAccComputeInstanceTemplateConfidentialInstanceConfigNoEnable(suffix string, minCpuPlatform, confidentialInstanceType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image2" { @@ -3461,7 +3444,6 @@ resource "google_compute_instance_template" "foobar4" { } `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } -<% end -%> func testAccComputeInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index c7d960b15729..682c7c6e8ace 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -1890,9 +1890,7 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { t.Parallel() var instance compute.Instance - <% unless version == "ga" -%> var instance2 compute.Instance - <% end -%> instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ @@ -1905,13 +1903,10 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), - <% unless version == "ga" -%> testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar2", &instance2), testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, true, ""), - <% end -%> ), }, - <% unless version == "ga" -%> { Config: testAccComputeInstanceConfidentialInstanceConfigNoEnable(instanceName, "AMD Milan", "SEV_SNP"), Check: resource.ComposeTestCheckFunc( @@ -1921,7 +1916,6 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), ), }, - <% end -%> }, }) } @@ -4396,11 +4390,9 @@ func testAccCheckComputeInstanceHasConfidentialInstanceConfig(instance *compute. if instance.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instance.ConfidentialInstanceConfig.EnableConfidentialCompute) } - <% unless version == "ga" -%> if instance.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instance.ConfidentialInstanceConfig.ConfidentialInstanceType) } - <% end -%> return nil } @@ -7682,9 +7674,7 @@ resource "google_compute_instance" "foobar" { confidential_instance_config { enable_confidential_compute = true -<% unless version == "ga" -%> confidential_instance_type = %q -<% end -%> } scheduling { @@ -7693,7 +7683,6 @@ resource "google_compute_instance" "foobar" { } -<% unless version == "ga" -%> resource "google_compute_instance" "foobar2" { name = "%s2" machine_type = "n2d-standard-2" @@ -7718,15 +7707,9 @@ resource "google_compute_instance" "foobar2" { } } -<% end -%> -<% if version == "ga" -%> -`, instance) -<% else -%> `, instance, confidentialInstanceType, instance) -<% end -%> } -<% unless version == "ga" -%> func testAccComputeInstanceConfidentialInstanceConfigNoEnable(instance string, minCpuPlatform, confidentialInstanceType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image2" { @@ -7789,7 +7772,6 @@ resource "google_compute_instance" "foobar4" { } `, instance, minCpuPlatform, confidentialInstanceType, instance, minCpuPlatform, confidentialInstanceType) } -<% end -%> func testAccComputeInstance_attributionLabelCreate(instance, add, strategy string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index b6f33aba7985..99de89c3dd39 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -852,14 +852,6 @@ be from 0 to 999,999,999 inclusive.`, Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - <% if version == "ga" -%> - "enable_confidential_compute": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - <% else -%> "enable_confidential_compute": { Type: schema.TypeBool, Optional: true, @@ -877,7 +869,6 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, - <% end -%> }, }, }, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index c70b9ae57e3e..2947f3ccb095 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -690,9 +690,7 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test t.Parallel() var instanceTemplate compute.InstanceTemplate - <% unless version == "ga" -%> var instanceTemplate2 compute.InstanceTemplate - <% end -%> acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -704,13 +702,10 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test Check: resource.ComposeTestCheckFunc( testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar", &instanceTemplate), testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, true, "SEV"), - <% unless version == "ga" -%> testAccCheckComputeRegionInstanceTemplateExists(t, "google_compute_region_instance_template.foobar2", &instanceTemplate2), testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, true, ""), - <% end -%> ), }, - <% unless version == "ga" -%> { Config: testAccComputeRegionInstanceTemplateConfidentialInstanceConfigNoEnable(acctest.RandString(t, 10), "AMD Milan", "SEV_SNP"), Check: resource.ComposeTestCheckFunc( @@ -720,7 +715,6 @@ func TestAccComputeRegionInstanceTemplate_ConfidentialInstanceConfigMain(t *test testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, - <% end -%> }, }) } @@ -1734,11 +1728,9 @@ func testAccCheckComputeRegionInstanceTemplateHasConfidentialInstanceConfig(inst if instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute) } - <% unless version == "ga" -%> if instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType) } - <% end -%> return nil } @@ -2885,9 +2877,7 @@ resource "google_compute_region_instance_template" "foobar" { confidential_instance_config { enable_confidential_compute = true -<% unless version == "ga" -%> confidential_instance_type = %q -<% end -%> } scheduling { @@ -2896,7 +2886,6 @@ resource "google_compute_region_instance_template" "foobar" { } -<% unless version == "ga" -%> resource "google_compute_region_instance_template" "foobar2" { name = "tf-test-instance2-template-%s" machine_type = "n2d-standard-2" @@ -2921,15 +2910,9 @@ resource "google_compute_region_instance_template" "foobar2" { } } -<% end -%> -<% if version == "ga" -%> -`, suffix) -<% else -%> `, suffix, confidentialInstanceType, suffix) -<% end -%> } -<% unless version == "ga" -%> func testAccComputeRegionInstanceTemplateConfidentialInstanceConfigNoEnable(suffix string, minCpuPlatform, confidentialInstanceType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image2" { @@ -2992,7 +2975,6 @@ resource "google_compute_region_instance_template" "foobar4" { } `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } -<% end -%> func testAccComputeRegionInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index 9f7553b529dd..04dc4c466681 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -499,7 +499,7 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `enable_confidential_compute` (Optional) Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. -* `confidential_instance_type` (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Defines the confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: `SEV`, `SEV_SNP`. [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`confidential_instance_type`](#confidential_instance_type) is set to `SEV` and [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently [`min_cpu_platform`](#min_cpu_platform) has to be set to `"AMD Milan"` or this will fail to create the VM. +* `confidential_instance_type` (Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: `SEV`, `SEV_SNP`. [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`confidential_instance_type`](#confidential_instance_type) is set to `SEV` and [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently [`min_cpu_platform`](#min_cpu_platform) has to be set to `"AMD Milan"` or this will fail to create the VM. The `advanced_machine_features` block supports: diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index e1030a25b6d9..e984cd86b4ee 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -662,7 +662,7 @@ The `specific_reservation` block supports: * `enable_confidential_compute` (Optional) Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. -* `confidential_instance_type` (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Defines the confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: `SEV`, `SEV_SNP`. [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`confidential_instance_type`](#confidential_instance_type) is set to `SEV` and [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently [`min_cpu_platform`](#min_cpu_platform) has to be set to `"AMD Milan"` or this will fail to create the VM. +* `confidential_instance_type` (Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: `SEV`, `SEV_SNP`. [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`confidential_instance_type`](#confidential_instance_type) is set to `SEV` and [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently [`min_cpu_platform`](#min_cpu_platform) has to be set to `"AMD Milan"` or this will fail to create the VM. The `network_performance_config` block supports: diff --git a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown index daf0291d5b01..02d67574847d 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_region_instance_template.html.markdown @@ -663,7 +663,7 @@ The `specific_reservation` block supports: * `enable_confidential_compute` (Optional) Defines whether the instance should have confidential compute enabled with AMD SEV. If enabled, [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. -* `confidential_instance_type` (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Defines the confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: `SEV`, `SEV_SNP`. [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`confidential_instance_type`](#confidential_instance_type) is set to `SEV` and [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently [`min_cpu_platform`](#min_cpu_platform) has to be set to `"AMD Milan"` or this will fail to create the VM. +* `confidential_instance_type` (Optional) Defines the confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: `SEV`, `SEV_SNP`. [`on_host_maintenance`](#on_host_maintenance) can be set to MIGRATE if [`confidential_instance_type`](#confidential_instance_type) is set to `SEV` and [`min_cpu_platform`](#min_cpu_platform) is set to `"AMD Milan"`. Otherwise, [`on_host_maintenance`](#on_host_maintenance) has to be set to TERMINATE or this will fail to create the VM. If `SEV_SNP`, currently [`min_cpu_platform`](#min_cpu_platform) has to be set to `"AMD Milan"` or this will fail to create the VM. The `network_performance_config` block supports: From 0836fbe77e64a07242c695326680a72bfe3861af Mon Sep 17 00:00:00 2001 From: Julio Castillo Date: Wed, 26 Jun 2024 02:58:06 +0200 Subject: [PATCH 214/356] Add support for GCS managed folders (#10786) Co-authored-by: Stephen Lewis (Burrows) --- mmv1/products/storage/ManagedFolder.yaml | 83 +++ .../storage_managed_folder_basic.tf.erb | 10 + .../storage_managed_folder.tf.erb | 2 + .../provider/provider_mmv1_resources.go.erb | 4 + .../storage/iam_storage_managed_folder.go | 184 +++++ .../iam_storage_managed_folder_test.go | 651 ++++++++++++++++++ .../storage_managed_folder_iam.html.markdown | 203 ++++++ 7 files changed, 1137 insertions(+) create mode 100644 mmv1/products/storage/ManagedFolder.yaml create mode 100644 mmv1/templates/terraform/examples/storage_managed_folder_basic.tf.erb create mode 100644 mmv1/templates/terraform/iam/example_config_body/storage_managed_folder.tf.erb create mode 100644 mmv1/third_party/terraform/services/storage/iam_storage_managed_folder.go create mode 100644 mmv1/third_party/terraform/services/storage/iam_storage_managed_folder_test.go create mode 100644 mmv1/third_party/terraform/website/docs/r/storage_managed_folder_iam.html.markdown diff --git a/mmv1/products/storage/ManagedFolder.yaml b/mmv1/products/storage/ManagedFolder.yaml new file mode 100644 index 000000000000..41bde5e0855e --- /dev/null +++ b/mmv1/products/storage/ManagedFolder.yaml @@ -0,0 +1,83 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ManagedFolder' +kind: 'storage#managedFolder' +base_url: 'b/{{bucket}}/managedFolders' +self_link: 'b/{{bucket}}/managedFolders/{{%name}}' +id_format: '{{bucket}}/{{name}}' +delete_url: 'b/{{bucket}}/managedFolders/{{%name}}' +has_self_link: true +immutable: true +skip_sweeper: true # Skipping sweeper since this is a child resource. +description: | + A Google Cloud Storage Managed Folder. + + You can apply Identity and Access Management (IAM) policies to + managed folders to grant principals access only to the objects + within the managed folder, which lets you more finely control access + for specific data sets and tables within a bucket. You can nest + managed folders up to 15 levels deep, including the parent managed + folder. + + Managed folders can only be created in buckets that have uniform + bucket-level access enabled. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/storage/docs/managed-folders' + api: 'https://cloud.google.com/storage/docs/json_api/v1/managedFolder' +# iam_policy: handwritten in mmv1/third_party/terraform/services/storage/iam_storage_managed_folder.go +import_format: + - '{{bucket}}/managedFolders/{{%name}}' + - '{{bucket}}/{{%name}}' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'storage_managed_folder_basic' + primary_resource_id: 'folder' + vars: + bucket_name: 'my-bucket' +parameters: + - !ruby/object:Api::Type::ResourceRef + name: 'bucket' + resource: 'Bucket' + imports: 'name' + description: 'The name of the bucket that contains the managed folder.' + required: true + - !ruby/object:Api::Type::String + name: 'name' + description: | + The name of the managed folder expressed as a path. Must include + trailing '/'. For example, `example_dir/example_dir2/`. + required: true + # The API returns values with trailing slashes, even if not + # provided. Enforcing trailing slashes prevents diffs and ensures + # consistent output. + validation: !ruby/object:Provider::Terraform::Validation + regex: '/$' +properties: + - !ruby/object:Api::Type::String + name: createTime + description: | + The timestamp at which this managed folder was created. + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: | + The timestamp at which this managed folder was most recently updated. + output: true + - !ruby/object:Api::Type::String + name: metageneration + description: | + The metadata generation of the managed folder. + output: true diff --git a/mmv1/templates/terraform/examples/storage_managed_folder_basic.tf.erb b/mmv1/templates/terraform/examples/storage_managed_folder_basic.tf.erb new file mode 100644 index 000000000000..4f349c62e0ab --- /dev/null +++ b/mmv1/templates/terraform/examples/storage_managed_folder_basic.tf.erb @@ -0,0 +1,10 @@ +resource "google_storage_bucket" "bucket" { + name = "<%= ctx[:vars]['bucket_name'] %>" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "<%= ctx[:primary_resource_id] %>" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} diff --git a/mmv1/templates/terraform/iam/example_config_body/storage_managed_folder.tf.erb b/mmv1/templates/terraform/iam/example_config_body/storage_managed_folder.tf.erb new file mode 100644 index 000000000000..2c5d96e844fb --- /dev/null +++ b/mmv1/templates/terraform/iam/example_config_body/storage_managed_folder.tf.erb @@ -0,0 +1,2 @@ + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 9e26302c84c0..a301a030551d 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -252,6 +252,7 @@ var handwrittenIAMDatasources = map[string]*schema.Resource{ "google_kms_crypto_key_iam_policy": tpgiamresource.DataSourceIamPolicy(kms.IamKmsCryptoKeySchema, kms.NewKmsCryptoKeyIamUpdater), "google_spanner_instance_iam_policy": tpgiamresource.DataSourceIamPolicy(spanner.IamSpannerInstanceSchema, spanner.NewSpannerInstanceIamUpdater), "google_spanner_database_iam_policy": tpgiamresource.DataSourceIamPolicy(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater), + "google_storage_managed_folder_iam_policy": tpgiamresource.DataSourceIamPolicy(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer), "google_organization_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater), "google_project_iam_policy": tpgiamresource.DataSourceIamPolicy(resourcemanager.IamProjectSchema, resourcemanager.NewProjectIamUpdater), "google_pubsub_subscription_iam_policy": tpgiamresource.DataSourceIamPolicy(pubsub.IamPubsubSubscriptionSchema, pubsub.NewPubsubSubscriptionIamUpdater), @@ -426,6 +427,9 @@ var handwrittenIAMResources = map[string]*schema.Resource{ "google_spanner_database_iam_binding": tpgiamresource.ResourceIamBinding(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), "google_spanner_database_iam_member": tpgiamresource.ResourceIamMember(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), "google_spanner_database_iam_policy": tpgiamresource.ResourceIamPolicy(spanner.IamSpannerDatabaseSchema, spanner.NewSpannerDatabaseIamUpdater, spanner.SpannerDatabaseIdParseFunc), + "google_storage_managed_folder_iam_binding": tpgiamresource.ResourceIamBinding(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer, storage.StorageManagedFolderIdParseFunc), + "google_storage_managed_folder_iam_member": tpgiamresource.ResourceIamMember(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer, storage.StorageManagedFolderIdParseFunc), + "google_storage_managed_folder_iam_policy": tpgiamresource.ResourceIamPolicy(storage.StorageManagedFolderIamSchema, storage.StorageManagedFolderIamUpdaterProducer, storage.StorageManagedFolderIdParseFunc), "google_organization_iam_binding": tpgiamresource.ResourceIamBinding(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), "google_organization_iam_member": tpgiamresource.ResourceIamMember(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), "google_organization_iam_policy": tpgiamresource.ResourceIamPolicy(resourcemanager.IamOrganizationSchema, resourcemanager.NewOrganizationIamUpdater, resourcemanager.OrgIdParseFunc), diff --git a/mmv1/third_party/terraform/services/storage/iam_storage_managed_folder.go b/mmv1/third_party/terraform/services/storage/iam_storage_managed_folder.go new file mode 100644 index 000000000000..925a8e2131bb --- /dev/null +++ b/mmv1/third_party/terraform/services/storage/iam_storage_managed_folder.go @@ -0,0 +1,184 @@ +package storage + +import ( + "fmt" + "net/url" + + "github.com/hashicorp/errwrap" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "google.golang.org/api/cloudresourcemanager/v1" + + "github.com/hashicorp/terraform-provider-google/google/tpgiamresource" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +var StorageManagedFolderIamSchema = map[string]*schema.Schema{ + "bucket": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "managed_folder": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, + ValidateFunc: verify.ValidateRegexp(`/$`), + }, +} + +type StorageManagedFolderIamUpdater struct { + bucket string + managedFolder string + d tpgresource.TerraformResourceData + Config *transport_tpg.Config +} + +func StorageManagedFolderIamUpdaterProducer(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (tpgiamresource.ResourceIamUpdater, error) { + values := make(map[string]string) + + if v, ok := d.GetOk("bucket"); ok { + values["bucket"] = v.(string) + } + + if v, ok := d.GetOk("managed_folder"); ok { + values["managed_folder"] = v.(string) + } + + u := &StorageManagedFolderIamUpdater{ + bucket: values["bucket"], + managedFolder: values["managed_folder"], + d: d, + Config: config, + } + + if err := d.Set("bucket", u.bucket); err != nil { + return nil, fmt.Errorf("Error setting bucket: %s", err) + } + if err := d.Set("managed_folder", u.managedFolder); err != nil { + return nil, fmt.Errorf("Error setting managed_folder: %s", err) + } + + return u, nil +} + +func StorageManagedFolderIdParseFunc(d *schema.ResourceData, config *transport_tpg.Config) error { + values := make(map[string]string) + + m, err := tpgresource.GetImportIdQualifiers([]string{"(?P[^/]+)/managedFolders/(?P.+)", "(?P[^/]+)/(?P.+)"}, d, config, d.Id()) + if err != nil { + return err + } + + for k, v := range m { + values[k] = v + } + + u := &StorageManagedFolderIamUpdater{ + bucket: values["bucket"], + managedFolder: values["managed_folder"], + d: d, + Config: config, + } + if err := d.Set("bucket", u.bucket); err != nil { + return fmt.Errorf("Error setting bucket: %s", err) + } + if err := d.Set("managed_folder", u.managedFolder); err != nil { + return fmt.Errorf("Error setting managed_folder: %s", err) + } + d.SetId(u.GetResourceId()) + return nil +} + +func (u *StorageManagedFolderIamUpdater) GetResourceIamPolicy() (*cloudresourcemanager.Policy, error) { + url, err := u.qualifyManagedFolderUrl("iam") + if err != nil { + return nil, err + } + + var obj map[string]interface{} + url, err = transport_tpg.AddQueryParams(url, map[string]string{"optionsRequestedPolicyVersion": fmt.Sprintf("%d", tpgiamresource.IamPolicyVersion)}) + if err != nil { + return nil, err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return nil, err + } + + policy, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "GET", + RawURL: url, + UserAgent: userAgent, + Body: obj, + }) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("Error retrieving IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + out := &cloudresourcemanager.Policy{} + err = tpgresource.Convert(policy, out) + if err != nil { + return nil, errwrap.Wrapf("Cannot convert a policy to a resource manager policy: {{err}}", err) + } + + return out, nil +} + +func (u *StorageManagedFolderIamUpdater) SetResourceIamPolicy(policy *cloudresourcemanager.Policy) error { + json, err := tpgresource.ConvertToMap(policy) + if err != nil { + return err + } + + obj := json + + url, err := u.qualifyManagedFolderUrl("iam") + if err != nil { + return err + } + + userAgent, err := tpgresource.GenerateUserAgentString(u.d, u.Config.UserAgent) + if err != nil { + return err + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: u.Config, + Method: "PUT", + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: u.d.Timeout(schema.TimeoutCreate), + }) + if err != nil { + return errwrap.Wrapf(fmt.Sprintf("Error setting IAM policy for %s: {{err}}", u.DescribeResource()), err) + } + + return nil +} + +func (u *StorageManagedFolderIamUpdater) qualifyManagedFolderUrl(methodIdentifier string) (string, error) { + urlTemplate := fmt.Sprintf("{{StorageBasePath}}b/%s/managedFolders/%s/%s", u.bucket, url.PathEscape(u.managedFolder), methodIdentifier) + url, err := tpgresource.ReplaceVars(u.d, u.Config, urlTemplate) + if err != nil { + return "", err + } + return url, nil +} + +func (u *StorageManagedFolderIamUpdater) GetResourceId() string { + return fmt.Sprintf("b/%s/managedFolders/%s", u.bucket, u.managedFolder) +} + +func (u *StorageManagedFolderIamUpdater) GetMutexKey() string { + return fmt.Sprintf("iam-storage-managedfolder-%s", u.GetResourceId()) +} + +func (u *StorageManagedFolderIamUpdater) DescribeResource() string { + return fmt.Sprintf("storage managedfolder %q", u.GetResourceId()) +} diff --git a/mmv1/third_party/terraform/services/storage/iam_storage_managed_folder_test.go b/mmv1/third_party/terraform/services/storage/iam_storage_managed_folder_test.go new file mode 100644 index 000000000000..d965ef054a2b --- /dev/null +++ b/mmv1/third_party/terraform/services/storage/iam_storage_managed_folder_test.go @@ -0,0 +1,651 @@ +package storage_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" +) + +func TestAccStorageManagedFolderIamBindingGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamBinding_basicGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_binding.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + // Test Iam Binding update + Config: testAccStorageManagedFolderIamBinding_updateGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_binding.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamMemberGenerated(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + // Test Iam Member creation (no update for member, no need to test) + Config: testAccStorageManagedFolderIamMember_basicGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_member.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer user:admin@hashicorptest.com", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamPolicyGenerated(t *testing.T) { + t.Parallel() + + // This may skip test, so do it first + sa := envvar.GetTestServiceAccountFromEnv(t) + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + context["service_account"] = sa + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamPolicy_basicGenerated(context), + Check: resource.TestCheckResourceAttrSet("data.google_storage_managed_folder_iam_policy.foo", "policy_data"), + }, + { + ResourceName: "google_storage_managed_folder_iam_policy.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccStorageManagedFolderIamPolicy_emptyBinding(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_policy.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamBindingGenerated_withCondition(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamBinding_withConditionGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_binding.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer %s", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"]), context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamBindingGenerated_withAndWithoutCondition(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamBinding_withAndWithoutConditionGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_binding.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_storage_managed_folder_iam_binding.foo2", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer %s", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"]), context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_storage_managed_folder_iam_binding.foo3", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer %s", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"]), context["condition_title_no_desc"]), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamMemberGenerated_withCondition(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamMember_withConditionGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_member.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer user:admin@hashicorptest.com %s", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"]), context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamMemberGenerated_withAndWithoutCondition(t *testing.T) { + // Multiple fine-grained resources + acctest.SkipIfVcr(t) + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamMember_withAndWithoutConditionGenerated(context), + }, + { + ResourceName: "google_storage_managed_folder_iam_member.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer user:admin@hashicorptest.com", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_storage_managed_folder_iam_member.foo2", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer user:admin@hashicorptest.com %s", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"]), context["condition_title"]), + ImportState: true, + ImportStateVerify: true, + }, + { + ResourceName: "google_storage_managed_folder_iam_member.foo3", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/ roles/storage.objectViewer user:admin@hashicorptest.com %s", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"]), context["condition_title_no_desc"]), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccStorageManagedFolderIamPolicyGenerated_withCondition(t *testing.T) { + t.Parallel() + + // This may skip test, so do it first + sa := envvar.GetTestServiceAccountFromEnv(t) + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + "role": "roles/storage.objectViewer", + "admin_role": "roles/storage.admin", + "condition_title": "expires_after_2019_12_31", + "condition_expr": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + "condition_desc": "Expiring at midnight of 2019-12-31", + "condition_title_no_desc": "expires_after_2019_12_31-no-description", + "condition_expr_no_desc": `request.time < timestamp(\"2020-01-01T00:00:00Z\")`, + } + context["service_account"] = sa + + // Test should have 3 bindings: one with a description and one without, and a third for an admin role. Any < chars are converted to a unicode character by the API. + expectedPolicyData := acctest.Nprintf(`{"bindings":[{"members":["serviceAccount:%{service_account}"],"role":"%{admin_role}"},{"condition":{"description":"%{condition_desc}","expression":"%{condition_expr}","title":"%{condition_title}"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"},{"condition":{"expression":"%{condition_expr}","title":"%{condition_title}-no-description"},"members":["user:admin@hashicorptest.com"],"role":"%{role}"}]}`, context) + expectedPolicyData = strings.Replace(expectedPolicyData, "<", "\\u003c", -1) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccStorageManagedFolderIamPolicy_withConditionGenerated(context), + Check: resource.ComposeAggregateTestCheckFunc( + // TODO(SarahFrench) - uncomment once https://github.com/GoogleCloudPlatform/magic-modules/pull/6466 merged + // resource.TestCheckResourceAttr("data.google_iam_policy.foo", "policy_data", expectedPolicyData), + resource.TestCheckResourceAttr("google_storage_managed_folder_iam_policy.foo", "policy_data", expectedPolicyData), + resource.TestCheckResourceAttrWith("data.google_iam_policy.foo", "policy_data", tpgresource.CheckGoogleIamPolicy), + ), + }, + { + ResourceName: "google_storage_managed_folder_iam_policy.foo", + ImportStateId: fmt.Sprintf("b/%s/managedFolders/managed/folder/name/", fmt.Sprintf("tf-test-my-bucket%s", context["random_suffix"])), + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func testAccStorageManagedFolderIamMember_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_member" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} +`, context) +} + +func testAccStorageManagedFolderIamPolicy_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + } + binding { + role = "%{admin_role}" + members = ["serviceAccount:%{service_account}"] + } +} + +resource "google_storage_managed_folder_iam_policy" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + policy_data = data.google_iam_policy.foo.policy_data +} + +data "google_storage_managed_folder_iam_policy" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + depends_on = [ + google_storage_managed_folder_iam_policy.foo + ] +} +`, context) +} + +func testAccStorageManagedFolderIamPolicy_emptyBinding(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +data "google_iam_policy" "foo" { +} + +resource "google_storage_managed_folder_iam_policy" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} + +func testAccStorageManagedFolderIamBinding_basicGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_binding" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} +`, context) +} + +func testAccStorageManagedFolderIamBinding_updateGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_binding" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + members = ["user:admin@hashicorptest.com", "user:gterraformtest1@gmail.com"] +} +`, context) +} + +func testAccStorageManagedFolderIamBinding_withConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_binding" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} +`, context) +} + +func testAccStorageManagedFolderIamBinding_withAndWithoutConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_binding" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] +} + +resource "google_storage_managed_folder_iam_binding" "foo2" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} + +resource "google_storage_managed_folder_iam_binding" "foo3" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + # Check that lack of description doesn't cause any issues + # Relates to issue : https://github.com/hashicorp/terraform-provider-google/issues/8701 + title = "%{condition_title_no_desc}" + expression = "%{condition_expr_no_desc}" + } +} +`, context) +} + +func testAccStorageManagedFolderIamMember_withConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_member" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + member = "user:admin@hashicorptest.com" + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} +`, context) +} + +func testAccStorageManagedFolderIamMember_withAndWithoutConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +resource "google_storage_managed_folder_iam_member" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + member = "user:admin@hashicorptest.com" +} + +resource "google_storage_managed_folder_iam_member" "foo2" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + member = "user:admin@hashicorptest.com" + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } +} + +resource "google_storage_managed_folder_iam_member" "foo3" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "%{role}" + member = "user:admin@hashicorptest.com" + condition { + # Check that lack of description doesn't cause any issues + # Relates to issue : https://github.com/hashicorp/terraform-provider-google/issues/8701 + title = "%{condition_title_no_desc}" + expression = "%{condition_expr_no_desc}" + } +} +`, context) +} + +func testAccStorageManagedFolderIamPolicy_withConditionGenerated(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_storage_bucket" "bucket" { + name = "tf-test-my-bucket%{random_suffix}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "folder" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} + +data "google_iam_policy" "foo" { + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + # Check that lack of description doesn't cause any issues + # Relates to issue : https://github.com/hashicorp/terraform-provider-google/issues/8701 + title = "%{condition_title_no_desc}" + expression = "%{condition_expr_no_desc}" + } + } + binding { + role = "%{role}" + members = ["user:admin@hashicorptest.com"] + condition { + title = "%{condition_title}" + description = "%{condition_desc}" + expression = "%{condition_expr}" + } + } + binding { + role = "%{admin_role}" + members = ["serviceAccount:%{service_account}"] + } +} + +resource "google_storage_managed_folder_iam_policy" "foo" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + policy_data = data.google_iam_policy.foo.policy_data +} +`, context) +} diff --git a/mmv1/third_party/terraform/website/docs/r/storage_managed_folder_iam.html.markdown b/mmv1/third_party/terraform/website/docs/r/storage_managed_folder_iam.html.markdown new file mode 100644 index 000000000000..92894e372454 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/r/storage_managed_folder_iam.html.markdown @@ -0,0 +1,203 @@ +--- +subcategory: "Cloud Storage" +description: |- + Collection of resources to manage IAM policy for Cloud Storage ManagedFolder +--- + +# IAM policy for Cloud Storage ManagedFolder +Three different resources help you manage your IAM policy for Cloud Storage ManagedFolder. Each of these resources serves a different use case: + +* `google_storage_managed_folder_iam_policy`: Authoritative. Sets the IAM policy for the managedfolder and replaces any existing policy already attached. +* `google_storage_managed_folder_iam_binding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the managedfolder are preserved. +* `google_storage_managed_folder_iam_member`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the managedfolder are preserved. + +A data source can be used to retrieve policy data in advent you do not need creation + +* `google_storage_managed_folder_iam_policy`: Retrieves the IAM policy for the managedfolder + +~> **Note:** `google_storage_managed_folder_iam_policy` **cannot** be used in conjunction with `google_storage_managed_folder_iam_binding` and `google_storage_managed_folder_iam_member` or they will fight over what your policy should be. + +~> **Note:** `google_storage_managed_folder_iam_binding` resources **can be** used in conjunction with `google_storage_managed_folder_iam_member` resources **only if** they do not grant privilege to the same role. + +~> **Note:** This resource supports IAM Conditions but they have some known limitations which can be found [here](https://cloud.google.com/iam/docs/conditions-overview#limitations). Please review this article if you are having issues with IAM Conditions. + + +## google_storage_managed_folder_iam_policy + +```hcl +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.admin" + members = [ + "user:jane@example.com", + ] + } +} + +resource "google_storage_managed_folder_iam_policy" "policy" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + policy_data = data.google_iam_policy.admin.policy_data +} +``` + +With IAM Conditions: + +```hcl +data "google_iam_policy" "admin" { + binding { + role = "roles/storage.admin" + members = [ + "user:jane@example.com", + ] + + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } + } +} + +resource "google_storage_managed_folder_iam_policy" "policy" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + policy_data = data.google_iam_policy.admin.policy_data +} +``` +## google_storage_managed_folder_iam_binding + +```hcl +resource "google_storage_managed_folder_iam_binding" "binding" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "roles/storage.admin" + members = [ + "user:jane@example.com", + ] +} +``` + +With IAM Conditions: + +```hcl +resource "google_storage_managed_folder_iam_binding" "binding" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "roles/storage.admin" + members = [ + "user:jane@example.com", + ] + + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +``` +## google_storage_managed_folder_iam_member + +```hcl +resource "google_storage_managed_folder_iam_member" "member" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "roles/storage.admin" + member = "user:jane@example.com" +} +``` + +With IAM Conditions: + +```hcl +resource "google_storage_managed_folder_iam_member" "member" { + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name + role = "roles/storage.admin" + member = "user:jane@example.com" + + condition { + title = "expires_after_2019_12_31" + description = "Expiring at midnight of 2019-12-31" + expression = "request.time < timestamp(\"2020-01-01T00:00:00Z\")" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `bucket` - (Required) The name of the bucket that contains the managed folder. Used to find the parent resource to bind the IAM policy to +* `managed_folder` - (Required) Used to find the parent resource to bind the IAM policy to + +* `member/members` - (Required) Identities that will be granted the privilege in `role`. + Each entry can have one of the following values: + * **allUsers**: A special identifier that represents anyone who is on the internet; with or without a Google account. + * **allAuthenticatedUsers**: A special identifier that represents anyone who is authenticated with a Google account or a service account. + * **user:{emailid}**: An email address that represents a specific Google account. For example, alice@gmail.com or joe@example.com. + * **serviceAccount:{emailid}**: An email address that represents a service account. For example, my-other-app@appspot.gserviceaccount.com. + * **group:{emailid}**: An email address that represents a Google group. For example, admins@example.com. + * **domain:{domain}**: A G Suite domain (primary, instead of alias) name that represents all the users of that domain. For example, google.com or example.com. + * **projectOwner:projectid**: Owners of the given project. For example, "projectOwner:my-example-project" + * **projectEditor:projectid**: Editors of the given project. For example, "projectEditor:my-example-project" + * **projectViewer:projectid**: Viewers of the given project. For example, "projectViewer:my-example-project" + +* `role` - (Required) The role that should be applied. Only one + `google_storage_managed_folder_iam_binding` can be used per role. Note that custom roles must be of the format + `[projects|organizations]/{parent-name}/roles/{role-name}`. + +* `policy_data` - (Required only by `google_storage_managed_folder_iam_policy`) The policy data generated by + a `google_iam_policy` data source. + +* `condition` - (Optional) An [IAM Condition](https://cloud.google.com/iam/docs/conditions-overview) for a given binding. + Structure is documented below. + +--- + +The `condition` block supports: + +* `expression` - (Required) Textual representation of an expression in Common Expression Language syntax. + +* `title` - (Required) A title for the expression, i.e. a short string describing its purpose. + +* `description` - (Optional) An optional description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + +~> **Warning:** Terraform considers the `role` and condition contents (`title`+`description`+`expression`) as the + identifier for the binding. This means that if any part of the condition is changed out-of-band, Terraform will + consider it to be an entirely different resource and will treat it as such. +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are +exported: + +* `etag` - (Computed) The etag of the IAM policy. + +## Import + +For all import syntaxes, the "resource in question" can take any of the following forms: + +* b/{{bucket}}/managedFolders/{{managed_folder}} +* {{bucket}}/{{managed_folder}} + +Any variables not passed in the import command will be taken from the provider configuration. + +Cloud Storage managedfolder IAM resources can be imported using the resource identifiers, role, and member. + +IAM member imports use space-delimited identifiers: the resource in question, the role, and the member identity, e.g. +``` +$ terraform import google_storage_managed_folder_iam_member.editor "b/{{bucket}}/managedFolders/{{managed_folder}} roles/storage.objectViewer user:jane@example.com" +``` + +IAM binding imports use space-delimited identifiers: the resource in question and the role, e.g. +``` +$ terraform import google_storage_managed_folder_iam_binding.editor "b/{{bucket}}/managedFolders/{{managed_folder}} roles/storage.objectViewer" +``` + +IAM policy imports use the identifier of the resource in question, e.g. +``` +$ terraform import google_storage_managed_folder_iam_policy.editor b/{{bucket}}/managedFolders/{{managed_folder}} +``` + +-> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the + full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. From eb25cea968f4f164ece76e48e61fab7013076458 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 26 Jun 2024 07:50:16 -0500 Subject: [PATCH 215/356] go rewrite - all remaining compute documentation diffs (#11046) --- mmv1/api/resource.go | 13 ++-- mmv1/api/resource/docs.go | 4 +- mmv1/api/resource/examples.go | 1 + mmv1/description-copy.go | 4 ++ mmv1/products/beyondcorp/AppConnection.yaml | 4 +- .../compute/GlobalForwardingRule.yaml | 3 +- mmv1/products/compute/RegionUrlMap.yaml | 4 +- mmv1/products/compute/ResourcePolicy.yaml | 3 +- mmv1/products/compute/UrlMap.yaml | 4 +- .../compute/go_GlobalForwardingRule.yaml | 2 +- mmv1/products/compute/go_RegionUrlMap.yaml | 36 ++++++++++ mmv1/products/compute/go_ResourcePolicy.yaml | 2 +- mmv1/products/compute/go_Subnetwork.yaml | 2 + mmv1/products/compute/go_UrlMap.yaml | 53 ++++++++++++++ mmv1/products/datafusion/go_Instance.yaml | 51 ++++++++++++- ...udfunctions2_runtime_update_policy.go.tmpl | 15 ++++ .../examples/go/cloudfunctions2_abiu.tf.tmpl | 72 +++++++++++++++++++ .../go/cloudfunctions2_abiu_on_deploy.tf.tmpl | 72 +++++++++++++++++++ .../go/data_fusion_instance_psc.tf.tmpl | 39 ++++++++++ .../go/healthcare_dataset_cmek.tf.tmpl | 36 ++++++++++ ...n_network_endpoint_group_appengine.tf.tmpl | 2 +- ...ork_endpoint_group_appengine_empty.tf.tmpl | 2 +- ...on_network_endpoint_group_cloudrun.tf.tmpl | 2 +- ...on_network_endpoint_group_appengine.tf.erb | 2 +- ...work_endpoint_group_appengine_empty.tf.erb | 2 +- ...ion_network_endpoint_group_cloudrun.tf.erb | 2 +- .../property_documentation.html.markdown.tmpl | 2 +- .../terraform/resource.html.markdown.erb | 2 +- .../terraform/resource.html.markdown.tmpl | 25 ++++--- 29 files changed, 420 insertions(+), 41 deletions(-) create mode 100644 mmv1/templates/terraform/encoders/go/cloudfunctions2_runtime_update_policy.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloudfunctions2_abiu.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/cloudfunctions2_abiu_on_deploy.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/data_fusion_instance_psc.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/healthcare_dataset_cmek.tf.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 865a4877728b..07659cb582e6 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -16,8 +16,8 @@ import ( "fmt" "maps" "regexp" - "strings" "sort" + "strings" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/product" "github.com/GoogleCloudPlatform/magic-modules/mmv1/api/resource" @@ -948,10 +948,7 @@ func ImportIdFormats(importFormat, identity []string, baseUrl string) []string { var idFormats []string if len(importFormat) == 0 { underscoredBaseUrl := baseUrl - // TODO Q2: underscore base url needed? - // underscored_base_url = base_url.gsub( - // /{{[[:word:]]+}}/, &:underscore - // ) + if len(identity) == 0 { idFormats = []string{fmt.Sprintf("%s/{{name}}", underscoredBaseUrl)} } else { @@ -960,7 +957,7 @@ func ImportIdFormats(importFormat, identity []string, baseUrl string) []string { transformedIdentity = append(transformedIdentity, fmt.Sprintf("{{%s}}", id)) } identityPath := strings.Join(transformedIdentity, "/") - idFormats = []string{fmt.Sprintf("%s/%s", underscoredBaseUrl, identityPath)} + idFormats = []string{fmt.Sprintf("%s/%s", underscoredBaseUrl, google.Underscore(identityPath))} } } else { idFormats = importFormat @@ -1032,7 +1029,7 @@ func (r Resource) IgnoreReadPropertiesToString(e resource.Examples) string { props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp))) } for _, tp := range ignoreReadFields(r.AllUserProperties()) { - props = append(props, fmt.Sprintf("\"%s\"", tp)) + props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp))) } slices.Sort(props) @@ -1492,7 +1489,7 @@ func (r Resource) PropertiesByCustomUpdateGroups() []UpdateGroup { UpdateId: prop.UpdateId, FingerprintName: prop.FingerprintName} - if slices.Contains(updateGroups, groupedProperty){ + if slices.Contains(updateGroups, groupedProperty) { continue } updateGroups = append(updateGroups, groupedProperty) diff --git a/mmv1/api/resource/docs.go b/mmv1/api/resource/docs.go index 01fec263dd87..2b81541c09c3 100644 --- a/mmv1/api/resource/docs.go +++ b/mmv1/api/resource/docs.go @@ -33,10 +33,10 @@ type Docs struct { Note string // attr_reader : - RequiredProperties string + RequiredProperties string `yaml:"required_properties"` // attr_reader : - OptionalProperties string + OptionalProperties string `yaml:"optional_properties"` // attr_reader : Attributes string diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index a1386f8f7081..c669e2e89570 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -204,6 +204,7 @@ func (e *Examples) SetHCLText() { } e.TestEnvVars = docTestEnvVars e.DocumentationHCLText = ExecuteTemplate(e, e.ConfigPath, true) + e.DocumentationHCLText = regexp.MustCompile(`\n\n$`).ReplaceAllString(e.DocumentationHCLText, "\n") // Remove region tags re1 := regexp.MustCompile(`# \[[a-zA-Z_ ]+\]\n`) diff --git a/mmv1/description-copy.go b/mmv1/description-copy.go index 0294174e95c8..1cd004b31a2d 100644 --- a/mmv1/description-copy.go +++ b/mmv1/description-copy.go @@ -161,5 +161,9 @@ func terminateText(line string) bool { } } + if regexp.MustCompile(`^\s*https:[\s$]*`).MatchString(line) { + return false + } + return regexp.MustCompile(`^\s*[a-z_]+:[\s$]*`).MatchString(line) } diff --git a/mmv1/products/beyondcorp/AppConnection.yaml b/mmv1/products/beyondcorp/AppConnection.yaml index 1ebf2c7368fb..55bdb8dc1d9b 100644 --- a/mmv1/products/beyondcorp/AppConnection.yaml +++ b/mmv1/products/beyondcorp/AppConnection.yaml @@ -95,8 +95,8 @@ properties: - !ruby/object:Api::Type::String name: 'type' description: | - The type of network connectivity used by the AppConnection. Refer to - https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type + The type of network connectivity used by the AppConnection. Refer + to https://cloud.google.com/beyondcorp/docs/reference/rest/v1/projects.locations.appConnections#type for a list of possible values. immutable: true - !ruby/object:Api::Type::NestedObject diff --git a/mmv1/products/compute/GlobalForwardingRule.yaml b/mmv1/products/compute/GlobalForwardingRule.yaml index f5d3dfde458b..a8c365e59bb6 100644 --- a/mmv1/products/compute/GlobalForwardingRule.yaml +++ b/mmv1/products/compute/GlobalForwardingRule.yaml @@ -25,8 +25,7 @@ description: | balancing. Global forwarding rules can only be used for HTTP load balancing. - For more information, see - https://cloud.google.com/compute/docs/load-balancing/http/ + For more information, see https://cloud.google.com/compute/docs/load-balancing/http/ async: !ruby/object:Api::OpAsync operation: !ruby/object:Api::OpAsync::Operation kind: 'compute#operation' diff --git a/mmv1/products/compute/RegionUrlMap.yaml b/mmv1/products/compute/RegionUrlMap.yaml index 2d2645f08398..2e31eea20ab9 100644 --- a/mmv1/products/compute/RegionUrlMap.yaml +++ b/mmv1/products/compute/RegionUrlMap.yaml @@ -749,7 +749,7 @@ properties: * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, - example: disconnects, reset, read timeout, connection failure, and refused + for example: disconnects, reset, read timeout, connection failure, and refused streams. * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. @@ -1197,7 +1197,7 @@ properties: - 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, - example: disconnects, reset, read timeout, connection failure, and refused + for example: disconnects, reset, read timeout, connection failure, and refused streams. - gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. diff --git a/mmv1/products/compute/ResourcePolicy.yaml b/mmv1/products/compute/ResourcePolicy.yaml index 8e645187a41e..e6ba60cf598c 100644 --- a/mmv1/products/compute/ResourcePolicy.yaml +++ b/mmv1/products/compute/ResourcePolicy.yaml @@ -138,8 +138,7 @@ properties: description: | Time within the window to start the operations. It must be in an hourly format "HH:MM", - where HH : [00-23] and MM : [00] GMT. - eg: 21:00 + where HH : [00-23] and MM : [00] GMT. eg: 21:00 required: true validation: !ruby/object:Provider::Terraform::Validation function: 'verify.ValidateHourlyOnly' diff --git a/mmv1/products/compute/UrlMap.yaml b/mmv1/products/compute/UrlMap.yaml index 5b0cda5141d1..054b6427feb1 100644 --- a/mmv1/products/compute/UrlMap.yaml +++ b/mmv1/products/compute/UrlMap.yaml @@ -703,7 +703,7 @@ properties: * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, - example: disconnects, reset, read timeout, connection failure, and refused + for example: disconnects, reset, read timeout, connection failure, and refused streams. * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. @@ -1415,7 +1415,7 @@ properties: * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, - example: disconnects, reset, read timeout, connection failure, and refused + for example: disconnects, reset, read timeout, connection failure, and refused streams. * gateway-error: Similar to 5xx, but only applies to response codes 502, 503 or 504. diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index c61a5b1e78b0..9ebea92c0892 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -21,7 +21,7 @@ description: | balancing. Global forwarding rules can only be used for HTTP load balancing. - For more information, see + For more information, see https://cloud.google.com/compute/docs/load-balancing/http/ docs: base_url: 'projects/{{project}}/global/forwardingRules' has_self_link: true diff --git a/mmv1/products/compute/go_RegionUrlMap.yaml b/mmv1/products/compute/go_RegionUrlMap.yaml index f1f796d6c24c..0094adf584ad 100644 --- a/mmv1/products/compute/go_RegionUrlMap.yaml +++ b/mmv1/products/compute/go_RegionUrlMap.yaml @@ -753,6 +753,24 @@ properties: * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, + for example: disconnects, reset, read timeout, connection failure, and refused + streams. + * gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504. + * connect-failure: Loadbalancer will retry on failures + connecting to backend services, for example due to connection timeouts. + * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. + Currently the only retriable error supported is 409. + * refused-stream: Loadbalancer will retry if the backend service resets the stream with a + REFUSED_STREAM error code. This reset type indicates that it is safe to retry. + * cancelled: Loadbalancer will retry if the gRPC status code in the response + header is set to cancelled + * deadline-exceeded: Loadbalancer will retry if the + gRPC status code in the response header is set to deadline-exceeded + * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response + header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in + the response header is set to unavailable item_type: type: String - name: 'timeout' @@ -1196,6 +1214,24 @@ properties: - 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, + for example: disconnects, reset, read timeout, connection failure, and refused + streams. + - gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504. + - connect-failure: Loadbalancer will retry on failures + connecting to backend services, for example due to connection timeouts. + - retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. + Currently the only retriable error supported is 409. + - refused-stream: Loadbalancer will retry if the backend service resets the stream with a + REFUSED_STREAM error code. This reset type indicates that it is safe to retry. + - cancelled: Loadbalancer will retry if the gRPC status code in the response + header is set to cancelled + - deadline-exceeded: Loadbalancer will retry if the + gRPC status code in the response header is set to deadline-exceeded + - resource-exhausted: Loadbalancer will retry if the gRPC status code in the response + header is set to resource-exhausted + - unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable item_type: type: String - name: 'timeout' diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml index 1e689c492b05..b44f07dfc4b1 100644 --- a/mmv1/products/compute/go_ResourcePolicy.yaml +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -134,7 +134,7 @@ properties: description: | Time within the window to start the operations. It must be in an hourly format "HH:MM", - where HH : [00-23] and MM : [00] GMT. + where HH : [00-23] and MM : [00] GMT. eg: 21:00 required: true validation: function: 'verify.ValidateHourlyOnly' diff --git a/mmv1/products/compute/go_Subnetwork.yaml b/mmv1/products/compute/go_Subnetwork.yaml index 4184e5a30d5a..2778d4992a3d 100644 --- a/mmv1/products/compute/go_Subnetwork.yaml +++ b/mmv1/products/compute/go_Subnetwork.yaml @@ -342,6 +342,8 @@ properties: type: String description: | Export filter used to define which VPC flow logs should be logged, as as CEL expression. See + https://cloud.google.com/vpc/docs/flow-logs#filtering for details on how to format this field. + The default value is 'true', which evaluates to include everything. at_least_one_of: - 'log_config.0.aggregation_interval' - 'log_config.0.flow_sampling' diff --git a/mmv1/products/compute/go_UrlMap.yaml b/mmv1/products/compute/go_UrlMap.yaml index dee0cd5963de..1890eafba34a 100644 --- a/mmv1/products/compute/go_UrlMap.yaml +++ b/mmv1/products/compute/go_UrlMap.yaml @@ -365,6 +365,11 @@ properties: type: ResourceRef description: | The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). resource: 'BackendBucket' imports: 'selfLink' - name: 'headerAction' @@ -524,6 +529,12 @@ properties: description: | The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). resource: 'BackendBucket' imports: 'selfLink' - name: 'routeAction' @@ -702,6 +713,24 @@ properties: * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, + for example: disconnects, reset, read timeout, connection failure, and refused + streams. + * gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504. + * connect-failure: Loadbalancer will retry on failures + connecting to backend services, for example due to connection timeouts. + * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. + Currently the only retriable error supported is 409. + * refused-stream: Loadbalancer will retry if the backend service resets the stream with a + REFUSED_STREAM error code. This reset type indicates that it is safe to retry. + * cancelled: Loadbalancer will retry if the gRPC status code in the response + header is set to cancelled + * deadline-exceeded: Loadbalancer will retry if the + gRPC status code in the response header is set to deadline-exceeded + * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response + header is set to resource-exhausted + * unavailable: Loadbalancer will retry if + the gRPC status code in the response header is set to unavailable item_type: type: String - name: 'timeout' @@ -1417,6 +1446,24 @@ properties: * 5xx: Loadbalancer will attempt a retry if the backend service responds with any 5xx response code, or if the backend service does not respond at all, + for example: disconnects, reset, read timeout, connection failure, and refused + streams. + * gateway-error: Similar to 5xx, but only applies to response codes + 502, 503 or 504. + * connect-failure: Loadbalancer will retry on failures + connecting to backend services, for example due to connection timeouts. + * retriable-4xx: Loadbalancer will retry for retriable 4xx response codes. + Currently the only retriable error supported is 409. + * refused-stream: Loadbalancer will retry if the backend service resets the stream with a + REFUSED_STREAM error code. This reset type indicates that it is safe to retry. + * cancelled: Loadbalancer will retry if the gRPC status code in the response + header is set to cancelled + * deadline-exceeded: Loadbalancer will retry if the + gRPC status code in the response header is set to deadline-exceeded + * resource-exhausted: Loadbalancer will retry if the gRPC status code in the response + header is set to resource-exhausted + * unavailable: Loadbalancer will retry if the gRPC status code in + the response header is set to unavailable item_type: type: String - name: 'timeout' @@ -2086,6 +2133,12 @@ properties: description: | The full or partial URL to the BackendBucket resource that contains the custom error content. Examples are: + https://www.googleapis.com/compute/v1/projects/project/global/backendBuckets/myBackendBucket + compute/v1/projects/project/global/backendBuckets/myBackendBucket + global/backendBuckets/myBackendBucket + + If errorService is not specified at lower levels like pathMatcher, pathRule and routeRule, an errorService specified at a higher level in the UrlMap will be used. If UrlMap.defaultCustomErrorResponsePolicy contains one or more errorResponseRules[], it must specify errorService. + If load balancer cannot reach the backendBucket, a simple Not Found Error will be returned, with the original response code (or overrideResponseCode if configured). resource: 'BackendBucket' imports: 'selfLink' - name: 'test' diff --git a/mmv1/products/datafusion/go_Instance.yaml b/mmv1/products/datafusion/go_Instance.yaml index f76d7bc4950f..a4db1f7fef0f 100644 --- a/mmv1/products/datafusion/go_Instance.yaml +++ b/mmv1/products/datafusion/go_Instance.yaml @@ -68,6 +68,16 @@ examples: prober_test_run: '' test_vars_overrides: 'prober_test_run': '`options = { prober_test_run = "true" }`' + - name: 'data_fusion_instance_psc' + primary_resource_id: 'psc_instance' + vars: + instance_name: 'psc-instance' + network_name: 'datafusion-psc-network' + subnet_name: 'datafusion-psc-subnet' + attachment_name: 'datafusion-psc-attachment' + prober_test_run: '' + test_vars_overrides: + 'prober_test_run': '`options = { prober_test_run = "true" }`' - name: 'data_fusion_instance_cmek' primary_resource_id: 'cmek' vars: @@ -238,7 +248,6 @@ properties: description: | The IP range in CIDR notation to use for the managed Data Fusion instance nodes. This range must not overlap with any other ranges used in the Data Fusion instance network. - required: true immutable: true - name: 'network' type: String @@ -246,8 +255,46 @@ properties: Name of the network in the project with which the tenant project will be peered for executing pipelines. In case of shared VPC where the network resides in another host project the network should specified in the form of projects/{host-project-id}/global/networks/{network} - required: true immutable: true + - name: 'connectionType' + type: Enum + description: | + Optional. Type of connection for establishing private IP connectivity between the Data Fusion customer project VPC and + the corresponding tenant project from a predefined list of available connection modes. + If this field is unspecified for a private instance, VPC peering is used. + immutable: true + enum_values: + - 'VPC_PEERING' + - 'PRIVATE_SERVICE_CONNECT_INTERFACES' + - name: 'privateServiceConnectConfig' + type: NestedObject + description: | + Optional. Configuration for Private Service Connect. + This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. + immutable: true + properties: + - name: 'networkAttachment' + type: String + description: | + Optional. The reference to the network attachment used to establish private connectivity. + It will be of the form projects/{project-id}/regions/{region}/networkAttachments/{network-attachment-id}. + This is required only when using connection type PRIVATE_SERVICE_CONNECT_INTERFACES. + immutable: true + - name: 'unreachableCidrBlock' + type: String + description: | + Optional. Input only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. + The size of this block should be at least /25. This range should not overlap with the primary address range of any subnetwork used by the network attachment. + This range can be used for other purposes in the consumer VPC as long as there is no requirement for CDF to reach destinations using these addresses. + If this value is not provided, the server chooses a non RFC 1918 address range. The format of this field is governed by RFC 4632. + immutable: true + ignore_read: true + - name: 'effectiveUnreachableCidrBlock' + type: String + description: | + Output only. The CIDR block to which the CDF instance can't route traffic to in the consumer project VPC. + The size of this block is /25. The format of this field is governed by RFC 4632. + output: true - name: 'zone' type: String description: | diff --git a/mmv1/templates/terraform/encoders/go/cloudfunctions2_runtime_update_policy.go.tmpl b/mmv1/templates/terraform/encoders/go/cloudfunctions2_runtime_update_policy.go.tmpl new file mode 100644 index 000000000000..db4ef3e273ee --- /dev/null +++ b/mmv1/templates/terraform/encoders/go/cloudfunctions2_runtime_update_policy.go.tmpl @@ -0,0 +1,15 @@ +if obj == nil || obj["buildConfig"] == nil { + return obj, nil +} + +build_config := obj["buildConfig"].(map[string]interface{}) + +// Automatic Update policy is the default from API, unset it if the data +// contains the on-deploy policy. +if build_config["onDeployUpdatePolicy"] != nil { + delete(build_config, "automaticUpdatePolicy") +} + +obj["buildConfig"] = build_config + +return obj, nil diff --git a/mmv1/templates/terraform/examples/go/cloudfunctions2_abiu.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudfunctions2_abiu.tf.tmpl new file mode 100644 index 000000000000..b850585bc62c --- /dev/null +++ b/mmv1/templates/terraform/examples/go/cloudfunctions2_abiu.tf.tmpl @@ -0,0 +1,72 @@ +locals { + project = "{{index $.TestEnvVars "project"}}" # Google Cloud Platform Project ID +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "{{index $.Vars "service_account"}}" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "topic" { + provider = google-beta + name = "{{index $.Vars "topic"}}" +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + name = "${local.project}-{{index $.Vars "bucket_name"}}" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "{{index $.Vars "zip_path"}}" # Add path to the zipped function source code +} + +resource "google_cloudfunctions2_function" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "function"}}" + location = "europe-west6" + description = "a new function" + + build_config { + runtime = "nodejs16" + entry_point = "helloPubSub" # Set the entry point + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + automatic_update_policy {} + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "4Gi" + timeout_seconds = 60 + max_instance_request_concurrency = 80 + available_cpu = "4" + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.topic.id + retry_policy = "RETRY_POLICY_RETRY" + } +} diff --git a/mmv1/templates/terraform/examples/go/cloudfunctions2_abiu_on_deploy.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudfunctions2_abiu_on_deploy.tf.tmpl new file mode 100644 index 000000000000..f90f726cfb65 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/cloudfunctions2_abiu_on_deploy.tf.tmpl @@ -0,0 +1,72 @@ +locals { + project = "{{index $.TestEnvVars "project"}}" # Google Cloud Platform Project ID +} + +resource "google_service_account" "account" { + provider = google-beta + account_id = "{{index $.Vars "service_account"}}" + display_name = "Test Service Account" +} + +resource "google_pubsub_topic" "topic" { + provider = google-beta + name = "{{index $.Vars "topic"}}" +} + +resource "google_storage_bucket" "bucket" { + provider = google-beta + name = "${local.project}-{{index $.Vars "bucket_name"}}" # Every bucket name must be globally unique + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "object" { + provider = google-beta + name = "function-source.zip" + bucket = google_storage_bucket.bucket.name + source = "{{index $.Vars "zip_path"}}" # Add path to the zipped function source code +} + +resource "google_cloudfunctions2_function" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "function"}}" + location = "europe-west6" + description = "a new function" + + build_config { + runtime = "nodejs16" + entry_point = "helloPubSub" # Set the entry point + environment_variables = { + BUILD_CONFIG_TEST = "build_test" + } + source { + storage_source { + bucket = google_storage_bucket.bucket.name + object = google_storage_bucket_object.object.name + } + } + on_deploy_update_policy {} + } + + service_config { + max_instance_count = 3 + min_instance_count = 1 + available_memory = "4Gi" + timeout_seconds = 60 + max_instance_request_concurrency = 80 + available_cpu = "4" + environment_variables = { + SERVICE_CONFIG_TEST = "config_test" + } + ingress_settings = "ALLOW_INTERNAL_ONLY" + all_traffic_on_latest_revision = true + service_account_email = google_service_account.account.email + } + + event_trigger { + trigger_region = "us-central1" + event_type = "google.cloud.pubsub.topic.v1.messagePublished" + pubsub_topic = google_pubsub_topic.topic.id + retry_policy = "RETRY_POLICY_RETRY" + } +} diff --git a/mmv1/templates/terraform/examples/go/data_fusion_instance_psc.tf.tmpl b/mmv1/templates/terraform/examples/go/data_fusion_instance_psc.tf.tmpl new file mode 100644 index 000000000000..222d89b2f0f9 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/data_fusion_instance_psc.tf.tmpl @@ -0,0 +1,39 @@ +resource "google_data_fusion_instance" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "instance_name"}}" + region = "us-central1" + type = "BASIC" + private_instance = true + + network_config { + connection_type = "PRIVATE_SERVICE_CONNECT_INTERFACES" + private_service_connect_config { + network_attachment = google_compute_network_attachment.psc.id + unreachable_cidr_block = "192.168.0.0/25" + } + } + + {{index $.Vars "prober_test_run"}} +} + +resource "google_compute_network" "psc" { + name = "{{index $.Vars "network_name"}}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "psc" { + name = "{{index $.Vars "subnet_name"}}" + region = "us-central1" + + network = google_compute_network.psc.id + ip_cidr_range = "10.0.0.0/16" +} + +resource "google_compute_network_attachment" "psc" { + name = "{{index $.Vars "attachment_name"}}" + region = "us-central1" + connection_preference = "ACCEPT_AUTOMATIC" + + subnetworks = [ + google_compute_subnetwork.psc.self_link + ] +} diff --git a/mmv1/templates/terraform/examples/go/healthcare_dataset_cmek.tf.tmpl b/mmv1/templates/terraform/examples/go/healthcare_dataset_cmek.tf.tmpl new file mode 100644 index 000000000000..a7f2a6244c99 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/healthcare_dataset_cmek.tf.tmpl @@ -0,0 +1,36 @@ +data "google_project" "project" {} + +resource "google_healthcare_dataset" "default" { + name = "{{index $.Vars "dataset_name"}}" + location = "us-central1" + time_zone = "UTC" + + encryption_spec { + kms_key_name = google_kms_crypto_key.crypto_key.id + } + + depends_on = [ + google_kms_crypto_key_iam_binding.healthcare_cmek_keyuser + ] +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "{{index $.Vars "key_name"}}" + key_ring = google_kms_key_ring.key_ring.id + purpose = "ENCRYPT_DECRYPT" +} + +resource "google_kms_key_ring" "key_ring" { + name = "{{index $.Vars "keyring_name"}}" + location = "us-central1" +} + +resource "google_kms_crypto_key_iam_binding" "healthcare_cmek_keyuser" { + crypto_key_id = google_kms_crypto_key.crypto_key.id + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + members = [ + "serviceAccount:service-${data.google_project.project.number}@gcp-sa-healthcare.iam.gserviceaccount.com", + ] +} + + diff --git a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine.tf.tmpl b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine.tf.tmpl index fd1feccc8d47..457a35337e4a 100644 --- a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine.tf.tmpl @@ -67,4 +67,4 @@ resource "google_storage_bucket_object" "{{$.PrimaryResourceId}}" { name = "hello-world.zip" bucket = google_storage_bucket.{{$.PrimaryResourceId}}.name source = "./test-fixtures/hello-world.zip" -} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine_empty.tf.tmpl b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine_empty.tf.tmpl index f05b51e45174..0a221538ac12 100644 --- a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine_empty.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_appengine_empty.tf.tmpl @@ -5,4 +5,4 @@ resource "google_compute_region_network_endpoint_group" "{{$.PrimaryResourceId}} region = "us-central1" app_engine { } -} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_cloudrun.tf.tmpl b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_cloudrun.tf.tmpl index 14af0ae70d32..e6c5c2b58715 100644 --- a/mmv1/templates/terraform/examples/go/region_network_endpoint_group_cloudrun.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_network_endpoint_group_cloudrun.tf.tmpl @@ -24,4 +24,4 @@ resource "google_cloud_run_service" "{{$.PrimaryResourceId}}" { percent = 100 latest_revision = true } -} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine.tf.erb b/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine.tf.erb index 848721eea4a5..2dea773b58e7 100644 --- a/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine.tf.erb +++ b/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine.tf.erb @@ -67,4 +67,4 @@ resource "google_storage_bucket_object" "<%= ctx[:primary_resource_id] %>" { name = "hello-world.zip" bucket = google_storage_bucket.<%= ctx[:primary_resource_id] %>.name source = "./test-fixtures/hello-world.zip" -} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine_empty.tf.erb b/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine_empty.tf.erb index fe794ed86e7b..3bcdffc62c20 100644 --- a/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine_empty.tf.erb +++ b/mmv1/templates/terraform/examples/region_network_endpoint_group_appengine_empty.tf.erb @@ -5,4 +5,4 @@ resource "google_compute_region_network_endpoint_group" "<%= ctx[:primary_resour region = "us-central1" app_engine { } -} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/region_network_endpoint_group_cloudrun.tf.erb b/mmv1/templates/terraform/examples/region_network_endpoint_group_cloudrun.tf.erb index 7dd4bad620a7..9198102b398e 100644 --- a/mmv1/templates/terraform/examples/region_network_endpoint_group_cloudrun.tf.erb +++ b/mmv1/templates/terraform/examples/region_network_endpoint_group_cloudrun.tf.erb @@ -24,4 +24,4 @@ resource "google_cloud_run_service" "<%= ctx[:primary_resource_id] %>" { percent = 100 latest_revision = true } -} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl index 92d35d242d26..d7cb4c524fee 100644 --- a/mmv1/templates/terraform/property_documentation.html.markdown.tmpl +++ b/mmv1/templates/terraform/property_documentation.html.markdown.tmpl @@ -27,7 +27,7 @@ Default value is `{{ $.ItemType.DefaultValue }}`. {{- end }} Each value may be one of: {{ $.ItemType.EnumValuesToString "`" false }}. - {{- else if and ($.IsA "Enum") (and (not $.Output) (not (and $.ItemType $.ItemType.SkipDocsValues)))}} + {{- else if and ($.IsA "Enum") (and (not $.Output) (not $.SkipDocsValues))}} {{- if $.DefaultValue }} Default value is `{{ $.DefaultValue }}`. {{- end }} diff --git a/mmv1/templates/terraform/resource.html.markdown.erb b/mmv1/templates/terraform/resource.html.markdown.erb index cefd14b6a765..e13e38760a62 100644 --- a/mmv1/templates/terraform/resource.html.markdown.erb +++ b/mmv1/templates/terraform/resource.html.markdown.erb @@ -73,7 +73,7 @@ To get more information about <%= object.name -%>, see: <% end # object...api.nil? -%> <% if !object.references.guides.empty? -%> * How-to Guides -<% object.references.guides.each do |title, link| -%> +<% object.references.guides.sort.each do |title, link| -%> * [<%= title -%>](<%= link -%>) <% end # object...guides.each -%> <% end # object...guides.empty? -%> diff --git a/mmv1/templates/terraform/resource.html.markdown.tmpl b/mmv1/templates/terraform/resource.html.markdown.tmpl index 5ebe9a3680d0..68c0fd3ad6ec 100644 --- a/mmv1/templates/terraform/resource.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource.html.markdown.tmpl @@ -53,12 +53,13 @@ To get more information about {{$.Name}}, see: * [{{$title}}]({{$link}}) {{- end }} {{- end }} + {{- if gt (len $.Examples) 0}} {{ "" }} + {{- end }} {{- else }} {{ "" }} {{- end }} {{- if $.Docs.Warning}} - ~> **Warning:** {{$.Docs.Warning}} {{- end }} {{- if $.Docs.Note}} @@ -106,8 +107,9 @@ The following arguments are supported: {{- end}} {{- end }} - - - - -{{ range $p := $.RootProperties }} +{{ "" }} +{{ "" }} +{{- range $p := $.RootProperties }} {{- if and (not $p.Required) (not $p.Output) }} {{- trimTemplate "property_documentation.html.markdown.tmpl" $p -}} {{- end }} @@ -115,18 +117,23 @@ The following arguments are supported: {{- if or (contains $.BaseUrl "{{project}}") (contains $.CreateUrl "{{project}}")}} * `project` - (Optional) The ID of the project in which the resource belongs. If it is not provided, the provider project is used. +{{ "" }} {{- end }} -{{ range $f := $.VirtualFields }} +{{- range $f := $.VirtualFields }} * `{{$f.Name}}` - (Optional) {{$f.Description}} {{- end }} -{{ if $.Docs.OptionalProperties }} +{{- if not $.Docs.OptionalProperties }} +{{ "" }} +{{- end }} +{{- if $.Docs.OptionalProperties }} {{ $.Docs.OptionalProperties }} {{- end }} {{- range $p := $.AllUserProperties }} {{- if and (not $p.Required) (not $p.Output) }} {{- trimTemplate "nested_property_documentation.html.markdown.tmpl" $p -}} -{{- end}} + {{- end}} {{- end }} +{{- "" }} ## Attributes Reference In addition to the arguments listed above, the following computed attributes are exported: @@ -166,7 +173,7 @@ This resource does not support import. {{$.Name}} can be imported using any of these accepted formats: {{ range $idFormat := $.ImportIdFormatsFromResource }} -* `{{$idFormat}}` +* `{{replaceAll $idFormat "%" "" }}` {{- end }} @@ -174,7 +181,7 @@ In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashico ```tf import { - id = "{{ index $.ImportIdFormatsFromResource 0 }}" + id = "{{replaceAll (index $.ImportIdFormatsFromResource 0) "%" "" }}" to = {{$.TerraformName}}.default } ``` @@ -183,7 +190,7 @@ When using the [`terraform import` command](https://developer.hashicorp.com/terr ``` {{- range $idFormat := $.ImportIdFormatsFromResource }} -$ terraform import {{$.TerraformName}}.default {{$idFormat}} +$ terraform import {{$.TerraformName}}.default {{replaceAll $idFormat "%" "" }} {{- end }} ``` {{ end }} From 72a564e567828ccf959d90d26927dc49c696a280 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Wed, 26 Jun 2024 16:44:01 +0100 Subject: [PATCH 216/356] Make sweeper for google_vmwareengine_cluster handwritten, add iteration over loop of regions/locations (#11003) --- mmv1/products/vmwareengine/Cluster.yaml | 2 + .../resource_vmwareengine_cluster_sweeper.go | 174 ++++++++++++++++++ 2 files changed, 176 insertions(+) create mode 100644 mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go diff --git a/mmv1/products/vmwareengine/Cluster.yaml b/mmv1/products/vmwareengine/Cluster.yaml index e2fa5c8053c3..6112c8298d0d 100644 --- a/mmv1/products/vmwareengine/Cluster.yaml +++ b/mmv1/products/vmwareengine/Cluster.yaml @@ -49,6 +49,8 @@ async: !ruby/object:Api::OpAsync import_format: ["{{%parent}}/clusters/{{name}}"] id_format: "{{parent}}/clusters/{{name}}" autogen_async: true +# There is a handwritten sweeper that provides a list of locations to sweep +skip_sweeper: true examples: - !ruby/object:Provider::Terraform::Examples name: "vmware_engine_cluster_basic" diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go new file mode 100644 index 000000000000..71afd7f546f9 --- /dev/null +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_cluster_sweeper.go @@ -0,0 +1,174 @@ +package vmwareengine + +import ( + "context" + "fmt" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/sweeper" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func init() { + sweeper.AddTestSweepers("VmwareengineCluster", testSweepVmwareengineCluster) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVmwareengineCluster(region string) error { + resourceName := "VmwareengineCluster" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sweeper.SharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := envvar.GetTestBillingAccountFromEnv(t) + + // List of location values includes: + // * zones used for this resource type's acc tests in the past + // * the 'region' passed to the sweeper + locations := []string{region, "us-central1-a", "us-central1-b", "southamerica-west1-a", "southamerica-west1-b", "me-west1-a", "me-west1-b"} + log.Printf("[INFO][SWEEPER_LOG] Sweeping will include these locations: %v.", locations) + for _, location := range locations { + log.Printf("[INFO][SWEEPER_LOG] Beginning the process of sweeping location '%s'.", location) + + // Setup variables to replace in list template + d := &tpgresource.ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": location, + "location": location, + "zone": location, + "billing_account": billingId, + }, + } + + log.Printf("[INFO][SWEEPER_LOG] looking for parent resources in location '%s'.", location) + privateCloudNames, err := listPrivateCloudsInLocation(d, config) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error finding parental resources in location %s: %s", location, err) + continue + } + for _, parent := range privateCloudNames { + + // `parent` will be string of form projects/my-project/locations/us-central1-a/privateClouds/my-cloud + listUrl := fmt.Sprintf("https://vmwareengine.googleapis.com/v1/projects/%s/clusters", parent) + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + continue + } + + resourceList, ok := res["clusters"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + continue + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + continue + } + + name := tpgresource.GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !sweeper.IsSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://vmwareengine.googleapis.com/v1/{{parent}}/clusters/{{name}}" + deleteUrl, err := tpgresource.ReplaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + continue + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "DELETE", + Project: config.Project, + RawURL: deleteUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + } + } + return nil +} + +func listPrivateCloudsInLocation(d *tpgresource.ResourceDataMock, config *transport_tpg.Config) ([]string, error) { + listTemplate := strings.Split("https://vmwareengine.googleapis.com/v1/projects/{{project}}/locations/{{location}}/privateClouds", "?")[0] + listUrl, err := tpgresource.ReplaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil, err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: config.Project, + RawURL: listUrl, + UserAgent: config.UserAgent, + }) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil, err + } + + resourceList, ok := res["privateClouds"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil, fmt.Errorf("nothing found in response") + } + + rl := resourceList.([]interface{}) + privateCloudNames := []string{} + for _, r := range rl { + resource := r.(map[string]interface{}) + if name, ok := resource["name"]; ok { + privateCloudNames = append(privateCloudNames, name.(string)) + } + + } + return privateCloudNames, nil +} From 32ed20cf417ff8c1ee86a713b03d39147d9c2da2 Mon Sep 17 00:00:00 2001 From: liaoaohaha Date: Wed, 26 Jun 2024 08:48:42 -0700 Subject: [PATCH 217/356] add sqlAssertion rule (#11043) Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> --- mmv1/products/dataplex/Datascan.yaml | 10 ++++++++++ .../examples/dataplex_datascan_full_quality.tf.erb | 7 +++++++ 2 files changed, 17 insertions(+) diff --git a/mmv1/products/dataplex/Datascan.yaml b/mmv1/products/dataplex/Datascan.yaml index c3900509e163..4e2a40eac23e 100644 --- a/mmv1/products/dataplex/Datascan.yaml +++ b/mmv1/products/dataplex/Datascan.yaml @@ -428,6 +428,16 @@ properties: required: true description: | The SQL expression. + - !ruby/object:Api::Type::NestedObject + name: 'sqlAssertion' + description: | + Table rule which evaluates whether any row matches invalid state. + properties: + - !ruby/object:Api::Type::String + name: 'sqlStatement' + required: true + description: | + The SQL Statement. - !ruby/object:Api::Type::NestedObject name: 'dataProfileSpec' allow_empty_object: true diff --git a/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb b/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb index a9088a138411..55a1d3c8f4e6 100644 --- a/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb +++ b/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb @@ -94,6 +94,13 @@ resource "google_dataplex_datascan" "<%= ctx[:primary_resource_id] %>" { sql_expression = "COUNT(*) > 0" } } + + rules { + dimension = "VALIDITY" + sql_assertion { + sql_statement = "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null" + } + } } From 0879d50c2bcb55964c2de4b93bee607a6f9cc18c Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 26 Jun 2024 09:12:22 -0700 Subject: [PATCH 218/356] Fix Go rewrite diffs for ForceNew, ExactlyOneOf, AtLeastOneOf, and ConflictsWith (#11047) --- mmv1/api/type.go | 87 +++++++++++++++++-- mmv1/provider/terraform.go | 33 ------- .../terraform/schema_property.go.tmpl | 8 +- 3 files changed, 82 insertions(+), 46 deletions(-) diff --git a/mmv1/api/type.go b/mmv1/api/type.go index 8059e57beead..b3c00d744c7a 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -293,12 +293,12 @@ func (t *Type) SetDefault(r *Resource) { switch { case t.IsA("Array"): t.ItemType.ParentName = t.Name - t.ItemType.ParentMetadata = t.ParentMetadata + t.ItemType.ParentMetadata = t t.ItemType.SetDefault(r) case t.IsA("Map"): t.KeyExpander = "tpgresource.ExpandString" t.ValueType.ParentName = t.Name - t.ValueType.ParentMetadata = t.ParentMetadata + t.ValueType.ParentMetadata = t t.ValueType.SetDefault(r) case t.IsA("NestedObject"): if t.Name == "" { @@ -443,7 +443,11 @@ func (t *Type) GetPrefix() string { t.Prefix = fmt.Sprintf("%s%s", nestedPrefix, t.ResourceMetadata.ResourceName()) } else { - t.Prefix = fmt.Sprintf("%s%s", t.ParentMetadata.GetPrefix(), t.ParentMetadata.TitlelizeProperty()) + if t.ParentMetadata != nil && (t.ParentMetadata.IsA("Array") || t.ParentMetadata.IsA("Map")) { + t.Prefix = t.ParentMetadata.GetPrefix() + } else { + t.Prefix = fmt.Sprintf("%s%s", t.ParentMetadata.GetPrefix(), t.ParentMetadata.TitlelizeProperty()) + } } } return t.Prefix @@ -562,7 +566,7 @@ func (t Type) AtLeastOneOfList() []string { // Returns list of properties that needs exactly one of their fields set. // func (t *Type) exactly_one_of_list() { func (t Type) ExactlyOneOfList() []string { - if t.ResourceMetadata == nil || t.Parent() != nil { + if t.ResourceMetadata == nil { return []string{} } @@ -1326,13 +1330,78 @@ func (t *Type) GoLiteral(value interface{}) string { // def force_new?(property, resource) func (t *Type) IsForceNew() bool { + if t.IsA("KeyValueLabels") && t.ResourceMetadata.RootLabels() { + return false + } + + if t.IsA("KeyValueTerraformLabels") && !t.ResourceMetadata.Updatable() && !t.ResourceMetadata.RootLabels() { + return true + } + parent := t.Parent() - return (((!t.Output || t.IsA("KeyValueEffectiveLabels")) && + return (!t.Output || t.IsA("KeyValueEffectiveLabels")) && (t.Immutable || - (t.ResourceMetadata.Immutable && t.UpdateUrl == "" && !t.Immutable && + (t.ResourceMetadata.Immutable && t.UpdateUrl == "" && (parent == nil || (parent.IsForceNew() && - !(parent.FlattenObject && t.IsA("KeyValueLabels"))))))) || - (t.IsA("KeyValueTerraformLabels") && - t.ResourceMetadata.Updatable() && !t.ResourceMetadata.RootLabels())) + !(parent.FlattenObject && t.IsA("KeyValueLabels")))))) +} + +// Returns an updated path for a given Terraform field path (e.g. +// 'a_field', 'parent_field.0.child_name'). Returns nil if the property +// is not included in the resource's properties and removes keys that have +// been flattened +// FYI: Fields that have been renamed should use the new name, however, flattened +// fields still need to be included, ie: +// flattenedField > newParent > renameMe should be passed to this function as +// flattened_field.0.new_parent.0.im_renamed +// TODO(emilymye): Change format of input for +// exactly_one_of/at_least_one_of/etc to use camelcase, MM properities and +// convert to snake in this method +// def get_property_schema_path(schema_path, resource) +func (t *Type) GetPropertySchemaPath(schemaPath string) string { + nestedProps := t.ResourceMetadata.UserProperites() + + var pathTkns []string + for _, pname := range strings.Split(schemaPath, ".0.") { + camelPname := google.Camelize(pname, "lower") + index := slices.IndexFunc(nestedProps, func(p *Type) bool { + return p.Name == camelPname + }) + + // if we couldn't find it, see if it was renamed at the top level + if index == -1 { + index = slices.IndexFunc(nestedProps, func(p *Type) bool { + return p.Name == schemaPath + }) + } + + if index == -1 { + continue + } + + prop := nestedProps[index] + + nestedProps = prop.NestedProperties() + if !prop.FlattenObject { + pathTkns = append(pathTkns, google.Underscore(pname)) + } + } + + if len(pathTkns) == 0 || pathTkns[len(pathTkns)-1] == "" { + return "" + } + + return strings.Join(pathTkns[:], ".0.") +} + +func (t Type) GetPropertySchemaPathList(propertyList []string) []string { + var list []string + for _, path := range propertyList { + path = t.GetPropertySchemaPath(path) + if path != "" { + list = append(list, path) + } + } + return list } diff --git a/mmv1/provider/terraform.go b/mmv1/provider/terraform.go index ff16e3614ab8..e1dc95237850 100644 --- a/mmv1/provider/terraform.go +++ b/mmv1/provider/terraform.go @@ -858,39 +858,6 @@ func (t Terraform) GetMmv1ServicesInVersion(products []*api.Product) []string { // // end // -// # Returns an updated path for a given Terraform field path (e.g. -// # 'a_field', 'parent_field.0.child_name'). Returns nil if the property -// # is not included in the resource's properties and removes keys that have -// # been flattened -// # FYI: Fields that have been renamed should use the new name, however, flattened -// # fields still need to be included, ie: -// # flattenedField > newParent > renameMe should be passed to this function as -// # flattened_field.0.new_parent.0.im_renamed -// # TODO(emilymye): Change format of input for -// # exactly_one_of/at_least_one_of/etc to use camelcase, MM properities and -// # convert to snake in this method -// def get_property_schema_path(schema_path, resource) -// -// nested_props = resource.properties -// prop = nil -// path_tkns = schema_path.split('.0.').map do |pname| -// camel_pname = pname.camelize(:lower) -// prop = nested_props.find { |p| p.name == camel_pname } -// # if we couldn't find it, see if it was renamed at the top level -// prop = nested_props.find { |p| p.name == schema_path } if prop.nil? -// return nil if prop.nil? -// -// nested_props = prop.nested_properties || [] -// prop.flatten_object ? nil : pname.underscore -// end -// if path_tkns.empty? || path_tkns[-1].nil? -// nil -// else -// path_tkns.compact.join('.0.') -// end -// -// end -// // # Capitalize the first letter of a property name. // # E.g. "creationTimestamp" becomes "CreationTimestamp". // def titlelize_property(property) diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 44f21449b2c7..202e05563b2f 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -166,16 +166,16 @@ Default value: {{ .ItemType.DefaultValue -}} Default: {{ .GoLiteral .DefaultValue -}}, {{ end -}} {{ if or .Conflicting .Conflicts -}} - ConflictsWith: {{ .GoLiteral .Conflicting -}}, + ConflictsWith: {{ .GoLiteral (.GetPropertySchemaPathList .Conflicting) -}}, {{ end -}} {{ if or .AtLeastOneOfList .AtLeastOneOf -}} - AtLeastOneOf: {{ .GoLiteral .AtLeastOneOfList -}}, + AtLeastOneOf: {{ .GoLiteral (.GetPropertySchemaPathList .AtLeastOneOfList) -}}, {{ end -}} {{ if or .ExactlyOneOfList .ExactlyOneOf -}} - ExactlyOneOf: {{ .GoLiteral .ExactlyOneOfList -}}, + ExactlyOneOf: {{ .GoLiteral (.GetPropertySchemaPathList .ExactlyOneOfList) -}}, {{ end -}} {{ if or .RequiredWithList .RequiredWith -}} - RequiredWith: {{ .GoLiteral .RequiredWithList -}}, + RequiredWith: {{ .GoLiteral (.GetPropertySchemaPathList .RequiredWithList) -}}, {{ end -}} }, {{- end -}} From b53e61e2c3277e0c758b5ae0807631c14a73520d Mon Sep 17 00:00:00 2001 From: Sneha Prasad <32434989+snpd25@users.noreply.github.com> Date: Wed, 26 Jun 2024 21:50:57 +0530 Subject: [PATCH 219/356] Remove securityposture deployment basic test (#11019) Co-authored-by: Sneha Prasad --- .../securityposture/PostureDeployment.yaml | 11 +------ ...ityposture_posture_deployment_basic.tf.erb | 32 ------------------- 2 files changed, 1 insertion(+), 42 deletions(-) delete mode 100644 mmv1/templates/terraform/examples/securityposture_posture_deployment_basic.tf.erb diff --git a/mmv1/products/securityposture/PostureDeployment.yaml b/mmv1/products/securityposture/PostureDeployment.yaml index 0df1f718a997..327d55cf5cc0 100644 --- a/mmv1/products/securityposture/PostureDeployment.yaml +++ b/mmv1/products/securityposture/PostureDeployment.yaml @@ -32,16 +32,7 @@ autogen_async: true async: !ruby/object:Api::OpAsync operation: !ruby/object:Api::OpAsync::Operation base_url: '{{op_id}}' -examples: - - !ruby/object:Provider::Terraform::Examples - name: 'securityposture_posture_deployment_basic' - primary_resource_id: 'postureDeployment' - vars: - posture_id: "posture_1" - deployment_id: "posture_deployment_1" - test_env_vars: - org_id: :ORG_ID - project_number: :PROJECT_NUMBER + parameters: - !ruby/object:Api::Type::String name: parent diff --git a/mmv1/templates/terraform/examples/securityposture_posture_deployment_basic.tf.erb b/mmv1/templates/terraform/examples/securityposture_posture_deployment_basic.tf.erb deleted file mode 100644 index af6d2ea842cf..000000000000 --- a/mmv1/templates/terraform/examples/securityposture_posture_deployment_basic.tf.erb +++ /dev/null @@ -1,32 +0,0 @@ -resource "google_securityposture_posture" "posture_1" { - posture_id = "<%= ctx[:vars]['posture_id'] %>" - parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" - location = "global" - state = "ACTIVE" - description = "a new posture" - policy_sets { - policy_set_id = "org_policy_set" - description = "set of org policies" - policies { - policy_id = "policy_1" - constraint { - org_policy_constraint { - canned_constraint_id = "storage.uniformBucketLevelAccess" - policy_rules { - enforce = true - } - } - } - } - } -} - -resource "google_securityposture_posture_deployment" "<%= ctx[:primary_resource_id] %>" { - posture_deployment_id = "<%= ctx[:vars]['deployment_id'] %>" - parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" - location = "global" - description = "a new posture deployment" - target_resource = "projects/<%= ctx[:test_env_vars]['project_number'] %>" - posture_id = google_securityposture_posture.posture_1.name - posture_revision_id = google_securityposture_posture.posture_1.revision_id -} From 01768679676881807d028849a0093244f2da7d40 Mon Sep 17 00:00:00 2001 From: "Stephen Lewis (Burrows)" Date: Wed, 26 Jun 2024 09:46:36 -0700 Subject: [PATCH 220/356] Update membership.go (#11056) --- .ci/magician/github/membership.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index 3ccf61b38c59..d320a8c6a1ca 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -82,6 +82,11 @@ var ( startDate: newDate(2024, 5, 22, pdtLoc), endDate: newDate(2024, 5, 28, pdtLoc), }, + { + id: "melinath", + startDate: newDate(2024, 6, 26, pdtLoc), + endDate: newDate(2024, 7, 22, pdtLoc), + }, } ) From 0d579627d849d4035cd74328db72e1c3b4dd956e Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 26 Jun 2024 11:53:33 -0500 Subject: [PATCH 221/356] go rewrite - compute whitespace and test import diffs (#11054) --- mmv1/api/resource.go | 4 +-- ...r_nat_validate_action_active_range.go.tmpl | 1 - ...er_nat_validate_action_active_range.go.erb | 1 - mmv1/templates/terraform/nested_query.go.tmpl | 2 +- .../compute_snapshot_precreate_url.go.erb | 1 - .../go/compute_snapshot_precreate_url.go.tmpl | 1 - mmv1/templates/terraform/resource.go.tmpl | 25 +++++++++++-------- .../compute_service_attachment.go.erb | 1 - .../go/compute_service_attachment.go.tmpl | 1 - 9 files changed, 17 insertions(+), 20 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 07659cb582e6..4f51075dfbf8 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1023,13 +1023,13 @@ func (r Resource) IgnoreReadPropertiesToString(e resource.Examples) string { } } for _, tp := range e.IgnoreReadExtra { - props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp))) + props = append(props, fmt.Sprintf("\"%s\"", tp)) } for _, tp := range r.IgnoreReadLabelsFields(r.PropertiesWithExcluded()) { props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp))) } for _, tp := range ignoreReadFields(r.AllUserProperties()) { - props = append(props, fmt.Sprintf("\"%s\"", google.Underscore(tp))) + props = append(props, fmt.Sprintf("\"%s\"", tp)) } slices.Sort(props) diff --git a/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl b/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl index b84559fd5420..0844522c0620 100644 --- a/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl +++ b/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl @@ -1,5 +1,4 @@ {{- if ne $.TargetVersionName "ga" }} -// validates if the field action.source_nat_active_ranges is filled when the type is PRIVATE. natType := d.Get("type").(string) if natType == "PRIVATE" { rules := d.Get("rules").(*schema.Set) diff --git a/mmv1/templates/terraform/constants/router_nat_validate_action_active_range.go.erb b/mmv1/templates/terraform/constants/router_nat_validate_action_active_range.go.erb index 635e90f85472..0437b25d316f 100644 --- a/mmv1/templates/terraform/constants/router_nat_validate_action_active_range.go.erb +++ b/mmv1/templates/terraform/constants/router_nat_validate_action_active_range.go.erb @@ -1,5 +1,4 @@ <% unless version == 'ga' -%> -// validates if the field action.source_nat_active_ranges is filled when the type is PRIVATE. natType := d.Get("type").(string) if natType == "PRIVATE" { rules := d.Get("rules").(*schema.Set) diff --git a/mmv1/templates/terraform/nested_query.go.tmpl b/mmv1/templates/terraform/nested_query.go.tmpl index 73af9ee3db31..b1525ad40cef 100644 --- a/mmv1/templates/terraform/nested_query.go.tmpl +++ b/mmv1/templates/terraform/nested_query.go.tmpl @@ -218,7 +218,7 @@ func resource{{ $.ResourceName }}PatchDeleteEncoder(d *schema.ResourceData, meta // ListForPatch handles making API request to get parent resource and // extracting list of objects. -{{/* This function is similar to flattenNested...() but +{{- /* This function is similar to flattenNested...() but # 1) does an API request to read the parent resource from API (flatten takes in list from top-level Read() method, whereas this method is called in Create/Update/Delete) diff --git a/mmv1/templates/terraform/pre_create/compute_snapshot_precreate_url.go.erb b/mmv1/templates/terraform/pre_create/compute_snapshot_precreate_url.go.erb index 32227c290959..58a6aba3800c 100644 --- a/mmv1/templates/terraform/pre_create/compute_snapshot_precreate_url.go.erb +++ b/mmv1/templates/terraform/pre_create/compute_snapshot_precreate_url.go.erb @@ -1,3 +1,2 @@ url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sourceDiskProp.(string)) - diff --git a/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl b/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl index 32227c290959..58a6aba3800c 100644 --- a/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl +++ b/mmv1/templates/terraform/pre_create/go/compute_snapshot_precreate_url.go.tmpl @@ -1,3 +1,2 @@ url = regexp.MustCompile("PRE_CREATE_REPLACE_ME").ReplaceAllLiteralString(url, sourceDiskProp.(string)) - diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index f1fce957a967..8599c80247d1 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -240,7 +240,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ if err != nil { return err } -{{if $.UpdateMask -}} +{{- if $.UpdateMask -}} url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "{{ join $.NestedQuery.Keys "." -}}"}) if err != nil { return err @@ -568,9 +568,9 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{- end}} }) if err != nil { -{{if $.ReadErrorTransform -}} +{{- if $.ReadErrorTransform -}} return transport_tpg.HandleNotFoundError({{ $.ReadErrorTransform }}(err), d, fmt.Sprintf("{{ $.ResourceName }} %q", d.Id())) -{{ else -}} +{{- else }} return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("{{ $.ResourceName }} %q", d.Id())) {{- end}} } @@ -603,7 +603,8 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) return nil } {{ end}} -{{- if $.VirtualFields -}} +{{- if $.VirtualFields }} + // Explicitly set virtual fields to default values if unset {{- range $prop := $.VirtualFields }} {{ if not (eq $prop.DefaultValue nil) -}} @@ -615,9 +616,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{- end}} {{- end}} {{- end}} -{{ if $.HasProject }} - - +{{- if $.HasProject }} if err := d.Set("project", project); err != nil { return fmt.Errorf("Error reading {{ $.Name -}}: %s", err) } @@ -754,7 +753,7 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ {{- if $.UpdateMask }} {{ template "UpdateMask" $ -}} {{ end}} -{{ if $.CustomCode.PreUpdate -}} +{{- if $.CustomCode.PreUpdate -}}{{""}} {{ $.CustomTemplate $.CustomCode.PreUpdate true -}} {{ end}} {{ if $.NestedQuery -}} @@ -828,7 +827,7 @@ if len(updateMask) > 0 { } {{- end}} {{- end}}{{/*if not immutable*/}} -{{ if $.FieldSpecificUpdateMethods }} +{{- if $.FieldSpecificUpdateMethods }} d.Partial(true) {{ $CustomUpdateProps := $.PropertiesByCustomUpdate }} {{ range $group := $.PropertiesByCustomUpdateGroups }} @@ -864,7 +863,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $gro {{- end}} }) if err != nil { - return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("{{ $.ResourceName -}} %q", d.Id())) + return transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("{{ $.ResourceName }} %q", d.Id())) } obj["{{ $group.FingerprintName }}"] = getRes["{{ $group.FingerprintName }}"] @@ -980,7 +979,8 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $gro {{- end}} {{- end}} } -{{ end }}{{/*range PropertiesByCustomUpdate*/}} +{{- end }}{{/*range PropertiesByCustomUpdate*/}} +{{ "" }} d.Partial(false) {{- end }}{{/*if FieldSpecificUpdateMethods*/}} @@ -1187,6 +1187,9 @@ func resource{{ $.ResourceName -}}UpdateEncoder(d *schema.ResourceData, meta int {{ template "NestedQuery" $ }} {{- end }} {{- if $.CustomCode.Decoder }} +{{- if and $.CustomCode.UpdateEncoder (not $.NestedQuery ) }} +{{ "" }} +{{- end }} func resource{{ $.ResourceName -}}Decoder(d *schema.ResourceData, meta interface{}, res map[string]interface{}) (map[string]interface{}, error) { {{ $.CustomTemplate $.CustomCode.Decoder false -}} } diff --git a/mmv1/templates/terraform/update_encoder/compute_service_attachment.go.erb b/mmv1/templates/terraform/update_encoder/compute_service_attachment.go.erb index 03b7dbdb169e..cc3abdc946a6 100644 --- a/mmv1/templates/terraform/update_encoder/compute_service_attachment.go.erb +++ b/mmv1/templates/terraform/update_encoder/compute_service_attachment.go.erb @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -%> - // need to send value in PATCH due to validation bug on api b/198329756 nameProp := d.Get("name") if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { diff --git a/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl b/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl index c4c547e72fa0..aca47912a356 100644 --- a/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl +++ b/mmv1/templates/terraform/update_encoder/go/compute_service_attachment.go.tmpl @@ -10,7 +10,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -}} - // need to send value in PATCH due to validation bug on api b/198329756 nameProp := d.Get("name") if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, nameProp)) { From 24f92f9cd64e2829ef3e3fd4e7138594f0b5f56b Mon Sep 17 00:00:00 2001 From: Alessio Buraggina <28165200+tdbhacks@users.noreply.github.com> Date: Wed, 26 Jun 2024 13:17:37 -0400 Subject: [PATCH 222/356] Update destroy_scheduled_duration default value description (#11057) --- mmv1/products/kms/CryptoKey.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/kms/CryptoKey.yaml b/mmv1/products/kms/CryptoKey.yaml index 3bb03dcf7eb6..b1b6fc705338 100644 --- a/mmv1/products/kms/CryptoKey.yaml +++ b/mmv1/products/kms/CryptoKey.yaml @@ -149,7 +149,7 @@ properties: immutable: true description: | The period of time that versions of this key spend in the DESTROY_SCHEDULED state before transitioning to DESTROYED. - If not specified at creation time, the default duration is 24 hours. + If not specified at creation time, the default duration is 30 days. default_from_api: true - !ruby/object:Api::Type::Boolean name: 'importOnly' From 0a5f3df75416c3ae6ac22d4352e535e50155dcbd Mon Sep 17 00:00:00 2001 From: Lujie Zhao Date: Wed, 26 Jun 2024 13:03:29 -0700 Subject: [PATCH 223/356] Add a sleep to reduce the likelihood of eventual consistency issue (#11044) --- .../resourcemanager/resource_google_service_account_key.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account_key.go b/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account_key.go index 074839783a65..94df73f3b15f 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account_key.go +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_google_service_account_key.go @@ -145,6 +145,12 @@ func resourceGoogleServiceAccountKeyCreate(d *schema.ResourceData, meta interfac if err != nil { return err } + + // We can't guarantee complete consistency even after waiting on + // the results, so sleep for some additional time to reduce the + // likelihood of eventual consistency failures. + time.Sleep(10 * time.Second) + return resourceGoogleServiceAccountKeyRead(d, meta) } From 299de42d9eb56e21db97034377f2c6f45851743e Mon Sep 17 00:00:00 2001 From: sushilchaskar28 Date: Wed, 26 Jun 2024 13:30:06 -0700 Subject: [PATCH 224/356] Modify doc for `sql_statement` in `google_dataplex_datascan` (#11045) Co-authored-by: sushilchaskar --- mmv1/products/dataplex/Datascan.yaml | 2 +- .../terraform/examples/dataplex_datascan_full_quality.tf.erb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/products/dataplex/Datascan.yaml b/mmv1/products/dataplex/Datascan.yaml index 4e2a40eac23e..c1c71bed0dc2 100644 --- a/mmv1/products/dataplex/Datascan.yaml +++ b/mmv1/products/dataplex/Datascan.yaml @@ -437,7 +437,7 @@ properties: name: 'sqlStatement' required: true description: | - The SQL Statement. + The SQL statement. - !ruby/object:Api::Type::NestedObject name: 'dataProfileSpec' allow_empty_object: true diff --git a/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb b/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb index 55a1d3c8f4e6..54a73f986c39 100644 --- a/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb +++ b/mmv1/templates/terraform/examples/dataplex_datascan_full_quality.tf.erb @@ -94,7 +94,7 @@ resource "google_dataplex_datascan" "<%= ctx[:primary_resource_id] %>" { sql_expression = "COUNT(*) > 0" } } - + rules { dimension = "VALIDITY" sql_assertion { From 015939976ec0c44fd83b98cdff12e3cc6adbc67d Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 26 Jun 2024 14:26:34 -0700 Subject: [PATCH 225/356] go rewrite - refresh custom templates (#11058) --- .../terraform/constants/go/compute_instance.go.tmpl | 0 .../go/compute_managed_ssl_certificate.go.tmpl | 7 +++++++ .../go/compute_network_endpoint_group.go.tmpl | 9 +++++++++ .../terraform/constants/go/compute_route.go.tmpl | 12 ++++++++++++ .../terraform/constants/go/dataproc_cluster.go.tmpl | 10 ++++++++++ mmv1/templates/terraform/constants/go/disk.tmpl | 4 ++-- .../constants/go/pubsub_subscription.go.tmpl | 0 .../go/dataplex_datascan_full_quality.tf.tmpl | 7 +++++++ .../examples/go/storage_managed_folder_basic.tf.tmpl | 10 ++++++++++ .../go/storage_managed_folder.tf.tmpl | 2 ++ 10 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 mmv1/templates/terraform/constants/go/compute_instance.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/compute_managed_ssl_certificate.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/compute_network_endpoint_group.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/compute_route.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/dataproc_cluster.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/pubsub_subscription.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/storage_managed_folder_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/iam/example_config_body/go/storage_managed_folder.tf.tmpl diff --git a/mmv1/templates/terraform/constants/go/compute_instance.go.tmpl b/mmv1/templates/terraform/constants/go/compute_instance.go.tmpl new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mmv1/templates/terraform/constants/go/compute_managed_ssl_certificate.go.tmpl b/mmv1/templates/terraform/constants/go/compute_managed_ssl_certificate.go.tmpl new file mode 100644 index 000000000000..e80b7c1f9075 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/compute_managed_ssl_certificate.go.tmpl @@ -0,0 +1,7 @@ +// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. +func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { + if strings.HasPrefix(k, "managed.0.domains.") { + return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") + } + return false +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/compute_network_endpoint_group.go.tmpl b/mmv1/templates/terraform/constants/go/compute_network_endpoint_group.go.tmpl new file mode 100644 index 000000000000..ad1828dfe457 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/compute_network_endpoint_group.go.tmpl @@ -0,0 +1,9 @@ +// Use this method when subnet is optioanl and auto_create_subnetworks = true +// API sometimes choose a subnet so the diff needs to be ignored +func compareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { + if tpgresource.IsEmptyValue(reflect.ValueOf(new)) { + return true + } + // otherwise compare as self links + return tpgresource.CompareSelfLinkOrResourceName("", old, new, nil) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/compute_route.go.tmpl b/mmv1/templates/terraform/constants/go/compute_route.go.tmpl new file mode 100644 index 000000000000..915d204a137f --- /dev/null +++ b/mmv1/templates/terraform/constants/go/compute_route.go.tmpl @@ -0,0 +1,12 @@ +// Use this method when the field accepts either an IP address or a +// self_link referencing a resource (such as google_compute_route's +// next_hop_ilb) +func CompareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { + // if we can parse `new` as an IP address, then compare as strings + if net.ParseIP(new) != nil { + return new == old + } + + // otherwise compare as self links + return tpgresource.CompareSelfLinkOrResourceName("", old, new, nil) +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/dataproc_cluster.go.tmpl b/mmv1/templates/terraform/constants/go/dataproc_cluster.go.tmpl new file mode 100644 index 000000000000..2761e8c4a5e2 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/dataproc_cluster.go.tmpl @@ -0,0 +1,10 @@ +// Suppress diffs for values that are equivalent except for their use of the words "location" +// compared to "region" or "zone" +func locationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + return locationDiffSuppressHelper(old, new) || locationDiffSuppressHelper(new, old) +} + +func locationDiffSuppressHelper(a, b string) bool { + return strings.Replace(a, "/locations/", "/regions/", 1) == b || + strings.Replace(a, "/locations/", "/zones/", 1) == b +} \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/disk.tmpl b/mmv1/templates/terraform/constants/go/disk.tmpl index 4e1c27e01cb1..a3947c725250 100644 --- a/mmv1/templates/terraform/constants/go/disk.tmpl +++ b/mmv1/templates/terraform/constants/go/disk.tmpl @@ -17,12 +17,12 @@ func hyperDiskIopsUpdateDiffSupress(_ context.Context, d *schema.ResourceDiff, m } {{- end }} -<% if version == "ga" -%> +{{ if ne $.TargetVersionName `ga` -}} // Suppress all diffs, used for Disk.Interface which is a nonfunctional field func AlwaysDiffSuppress(_, _, _ string, _ *schema.ResourceData) bool { return true } -<% end -%> +{{- end }} // diffsupress for beta and to check change in source_disk attribute func sourceDiskDiffSupress(_, old, new string, _ *schema.ResourceData) bool { diff --git a/mmv1/templates/terraform/constants/go/pubsub_subscription.go.tmpl b/mmv1/templates/terraform/constants/go/pubsub_subscription.go.tmpl new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl b/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl index 9640959c3742..2a1a2f423ec5 100644 --- a/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl @@ -94,6 +94,13 @@ resource "google_dataplex_datascan" "{{$.PrimaryResourceId}}" { sql_expression = "COUNT(*) > 0" } } + + rules { + dimension = "VALIDITY" + sql_assertion { + sql_statement = "select * from bigquery-public-data.austin_bikeshare.bikeshare_stations where station_id is null" + } + } } diff --git a/mmv1/templates/terraform/examples/go/storage_managed_folder_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_managed_folder_basic.tf.tmpl new file mode 100644 index 000000000000..940ed198c082 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/storage_managed_folder_basic.tf.tmpl @@ -0,0 +1,10 @@ +resource "google_storage_bucket" "bucket" { + name = "{{index $.Vars "bucket_name"}}" + location = "EU" + uniform_bucket_level_access = true +} + +resource "google_storage_managed_folder" "{{$.PrimaryResourceId}}" { + bucket = google_storage_bucket.bucket.name + name = "managed/folder/name/" +} diff --git a/mmv1/templates/terraform/iam/example_config_body/go/storage_managed_folder.tf.tmpl b/mmv1/templates/terraform/iam/example_config_body/go/storage_managed_folder.tf.tmpl new file mode 100644 index 000000000000..2c5d96e844fb --- /dev/null +++ b/mmv1/templates/terraform/iam/example_config_body/go/storage_managed_folder.tf.tmpl @@ -0,0 +1,2 @@ + bucket = google_storage_managed_folder.folder.bucket + managed_folder = google_storage_managed_folder.folder.name From 7381c52e624297d4328b71d0a0948f44f9d87dec Mon Sep 17 00:00:00 2001 From: Matheus Guilherme Souza Aleixo <82680416+matheusaleixo-cit@users.noreply.github.com> Date: Wed, 26 Jun 2024 20:19:03 -0300 Subject: [PATCH 226/356] Added missing changes to "compute_security_policy" contribution docs (#11024) Co-authored-by: Stephen Lewis (Burrows) --- .../r/compute_security_policy.html.markdown | 26 +++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown index 76bb154d9a0a..194dcc7989f4 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_security_policy.html.markdown @@ -260,6 +260,11 @@ The following arguments are supported: such as `origin.ip`, `source.region_code` and `contents` in the request header. Structure is [documented below](#nested_expr). +* `expr_options` - + (Optional) + The configuration options available when specifying a user defined CEVAL expression (i.e., 'expr'). + Structure is [documented below](#nested_expr_options). + The `config` block supports: * `src_ip_ranges` - (Required) Set of IP addresses or ranges (IPV4 or IPV6) in CIDR notation @@ -271,6 +276,23 @@ The following arguments are supported: * `expression` - (Required) Textual representation of an expression in Common Expression Language syntax. The application context of the containing message determines which well-known feature set of CEL is supported. +The `expr_options` block supports: + +* `recaptcha_options` - + (Required) + reCAPTCHA configuration options to be applied for the rule. If the rule does not evaluate reCAPTCHA tokens, this field has no effect. + Structure is [documented below](#nested_recaptcha_options). + +The `recaptcha_options` block supports: + +* `action_token_site_keys` - + (Optional) + A list of site keys to be used during the validation of reCAPTCHA action-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + +* `session_token_site_keys` - + (Optional) + A list of site keys to be used during the validation of reCAPTCHA session-tokens. The provided site keys need to be created from reCAPTCHA API under the same project where the security policy is created. + The `preconfigured_waf_config` block supports: * `exclusion` - (Optional) An exclusion to apply during preconfigured WAF evaluation. Structure is [documented below](#nested_exclusion). @@ -328,6 +350,8 @@ The following arguments are supported: * `HTTP_PATH`: The URL path of the HTTP request. The key value is truncated to the first 128 bytes * `SNI`: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to `ALL` on a HTTP session. * `REGION_CODE`: The country/region from which the request originates. + * `TLS_JA3_FINGERPRINT`: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. + * `USER_IP`: The IP address of the originating client, which is resolved based on "user_ip_request_headers" configured with the securitypolicy. If there is no "user_ip_request_headers" configuration or an IP address cannot be resolved from it, the key type defaults to IP. * `enforce_on_key_name` - (Optional) Rate limit key name applicable only for the following key types: @@ -355,6 +379,8 @@ The following arguments are supported: * `HTTP_PATH`: The URL path of the HTTP request. The key value is truncated to the first 128 bytes * `SNI`: Server name indication in the TLS session of the HTTPS request. The key value is truncated to the first 128 bytes. The key type defaults to `ALL` on a HTTP session. * `REGION_CODE`: The country/region from which the request originates. + * `TLS_JA3_FINGERPRINT`: JA3 TLS/SSL fingerprint if the client connects using HTTPS, HTTP/2 or HTTP/3. If not available, the key type defaults to ALL. + * `USER_IP`: The IP address of the originating client, which is resolved based on "user_ip_request_headers" configured with the securitypolicy. If there is no "user_ip_request_headers" configuration or an IP address cannot be resolved from it, the key type defaults to IP. * `exceed_redirect_options` - (Optional) Parameters defining the redirect action that is used as the exceed action. Cannot be specified if the exceed action is not redirect. Structure is [documented below](#nested_exceed_redirect_options). From 5f8bad02e3de871ca4a232dfcfc1c159b00e3024 Mon Sep 17 00:00:00 2001 From: Serhii P <22973227+serpro69@users.noreply.github.com> Date: Thu, 27 Jun 2024 16:17:50 +0200 Subject: [PATCH 227/356] Fix docs link to predefined metadata keys (#11065) --- .../terraform/website/docs/r/compute_instance.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index 04dc4c466681..25baeb72ef4c 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -128,8 +128,8 @@ The following arguments are supported: * `metadata` - (Optional) Metadata key/value pairs to make available from within the instance. Ssh keys attached in the Cloud Console will be removed. - Add them to your config in order to keep them attached to your instance. A - list of default metadata values (e.g. ssh-keys) can be found [here](https://cloud.google.com/compute/docs/metadata/default-metadata-values) + Add them to your config in order to keep them attached to your instance. + A list of predefined metadata keys (e.g. ssh-keys) can be found [here](https://cloud.google.com/compute/docs/metadata/predefined-metadata-keys) -> Depending on the OS you choose for your instance, some metadata keys have special functionality. Most linux-based images will run the content of From 3f2918a1fd53571b182a3ffc697cd4189d347f79 Mon Sep 17 00:00:00 2001 From: Eric Pang Date: Thu, 27 Jun 2024 10:23:36 -0400 Subject: [PATCH 228/356] Add Secure Source Manager Repository resource (#10840) --- .../securesourcemanager/Repository.yaml | 156 ++++++++++++++++++ ...ure_source_manager_repository_basic.tf.erb | 10 ++ ...e_manager_repository_initial_config.tf.erb | 18 ++ 3 files changed, 184 insertions(+) create mode 100644 mmv1/products/securesourcemanager/Repository.yaml create mode 100644 mmv1/templates/terraform/examples/secure_source_manager_repository_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/secure_source_manager_repository_initial_config.tf.erb diff --git a/mmv1/products/securesourcemanager/Repository.yaml b/mmv1/products/securesourcemanager/Repository.yaml new file mode 100644 index 000000000000..2f0382ca2ac5 --- /dev/null +++ b/mmv1/products/securesourcemanager/Repository.yaml @@ -0,0 +1,156 @@ +# # Copyright 2024 Google Inc. +# # Licensed under the Apache License, Version 2.0 (the "License"); +# # you may not use this file except in compliance with the License. +# # You may obtain a copy of the License at +# # +# # http://www.apache.org/licenses/LICENSE-2.0 +# # +# # Unless required by applicable law or agreed to in writing, software +# # distributed under the License is distributed on an "AS IS" BASIS, +# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# # See the License for the specific language governing permissions and +# # limitations under the License. + +--- !ruby/object:Api::Resource +name: 'Repository' +base_url: 'projects/{{project}}/locations/{{location}}/repositories?repository_id={{repository_id}}' +self_link: 'projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}' +immutable: true +description: 'Repositories store source code. It supports all Git SCM client commands and has built-in pull requests and issue tracking. Both HTTPS and SSH authentication are supported.' +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/secure-source-manager/docs/overview' +import_format: ['projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}', '{{repository_id}}'] +autogen_async: true +async: !ruby/object:Api::OpAsync + actions: ['create', 'delete'] + operation: !ruby/object:Api::OpAsync::Operation + path: 'name' + base_url: '{{op_id}}' + wait_ms: 1000 + result: !ruby/object:Api::OpAsync::Result + path: 'response' + status: !ruby/object:Api::OpAsync::Status + path: 'done' + complete: true + allowed: + - true + - false + error: !ruby/object:Api::OpAsync::Error + path: 'error' + message: 'message' +iam_policy: !ruby/object:Api::Resource::IamPolicy + parent_resource_attribute: 'repository_id' + method_name_separator: ':' + allowed_iam_role: 'roles/securesourcemanager.repoAdmin' + import_format: ['projects/{{project}}/locations/{{location}}/repositories/{{repository_id}}', '{{repository_id}}'] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'secure_source_manager_repository_basic' + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-my-repository%s\", + context[\"random_suffix\"\ + ])" + vars: + repository_id: 'my-repository' + instance_id: 'my-instance' + - !ruby/object:Provider::Terraform::Examples + name: 'secure_source_manager_repository_initial_config' + primary_resource_id: 'default' + primary_resource_name: "fmt.Sprintf(\"tf-test-my-repository%s\", + context[\"random_suffix\"\ + ])" + vars: + repository_id: 'my-repository' + instance_id: 'my-instance' +parameters: + - !ruby/object:Api::Type::String + name: 'location' + description: | + The location for the Repository. + required: true + url_param_only: true + - !ruby/object:Api::Type::String + name: 'repository_id' + description: | + The ID for the Repository. + required: true + url_param_only: true +properties: + - !ruby/object:Api::Type::String + name: 'name' + description: | + The resource name for the Repository. + output: true + - !ruby/object:Api::Type::String + name: 'description' + description: | + Description of the repository, which cannot exceed 500 characters. + - !ruby/object:Api::Type::String + name: 'instance' + description: | + The name of the instance in which the repository is hosted. + required: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - !ruby/object:Api::Type::String + name: 'uid' + description: | + Unique identifier of the repository. + output: true + - !ruby/object:Api::Type::Time + name: 'createTime' + description: | + Time the repository was created in UTC. + output: true + - !ruby/object:Api::Type::Time + name: 'updateTime' + description: | + Time the repository was updated in UTC. + output: true + - !ruby/object:Api::Type::NestedObject + name: 'uris' + description: | + URIs for the repository. + output: true + properties: + - !ruby/object:Api::Type::String + name: 'html' + description: | + HTML is the URI for the user to view the repository in a browser. + output: true + - !ruby/object:Api::Type::String + name: 'gitHttps' + description: + git_https is the git HTTPS URI for git operations. + output: true + - !ruby/object:Api::Type::String + name: 'api' + description: | + API is the URI for API access. + output: true + - !ruby/object:Api::Type::NestedObject + name: 'initialConfig' + description: | + Initial configurations for the repository. + ignore_read: true + properties: + - !ruby/object:Api::Type::String + name: 'defaultBranch' + description: | + Default branch name of the repository. + - !ruby/object:Api::Type::Array + name: 'gitignores' + description: | + List of gitignore template names user can choose from. + Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'license' + description: | + License template name user can choose from. + Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. + - !ruby/object:Api::Type::String + name: 'readme' + description: | + README template name. + Valid values can be viewed at https://cloud.google.com/secure-source-manager/docs/reference/rest/v1/projects.locations.repositories#initialconfig. diff --git a/mmv1/templates/terraform/examples/secure_source_manager_repository_basic.tf.erb b/mmv1/templates/terraform/examples/secure_source_manager_repository_basic.tf.erb new file mode 100644 index 000000000000..6cc59eeacc15 --- /dev/null +++ b/mmv1/templates/terraform/examples/secure_source_manager_repository_basic.tf.erb @@ -0,0 +1,10 @@ +resource "google_secure_source_manager_instance" "instance" { + location = "us-central1" + instance_id = "<%= ctx[:vars]['instance_id'] %>" +} + +resource "google_secure_source_manager_repository" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + repository_id = "<%= ctx[:vars]['repository_id'] %>" + instance = google_secure_source_manager_instance.instance.name +} diff --git a/mmv1/templates/terraform/examples/secure_source_manager_repository_initial_config.tf.erb b/mmv1/templates/terraform/examples/secure_source_manager_repository_initial_config.tf.erb new file mode 100644 index 000000000000..b8264bb9240f --- /dev/null +++ b/mmv1/templates/terraform/examples/secure_source_manager_repository_initial_config.tf.erb @@ -0,0 +1,18 @@ +resource "google_secure_source_manager_instance" "instance" { + location = "us-central1" + instance_id = "<%= ctx[:vars]['instance_id'] %>" +} + +resource "google_secure_source_manager_repository" "<%= ctx[:primary_resource_id] %>" { + location = "us-central1" + repository_id = "<%= ctx[:vars]['repository_id'] %>" + instance = google_secure_source_manager_instance.instance.name + + description = "This is a test repository" + initial_config { + default_branch = "main" + gitignores = ["python"] + license = "mit" + readme = "default" + } +} From 9565eef72a86e565b4fa86786b3e202f807f29ce Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 27 Jun 2024 09:17:21 -0700 Subject: [PATCH 229/356] Fix Go rewrite diffs for default_value and ExtraSchemaEntry (#11060) --- mmv1/api/resource.go | 10 +++++----- mmv1/products/compute/RegionSslCertificate.yaml | 2 +- mmv1/products/compute/go_Address.yaml | 2 +- mmv1/products/compute/go_Autoscaler.yaml | 8 ++++---- mmv1/products/compute/go_BackendService.yaml | 4 ++-- mmv1/products/compute/go_Disk.yaml | 6 +++--- mmv1/products/compute/go_ForwardingRule.yaml | 6 +++--- mmv1/products/compute/go_GlobalAddress.yaml | 2 +- .../compute/go_GlobalForwardingRule.yaml | 6 +++--- mmv1/products/compute/go_HaVpnGateway.yaml | 2 +- mmv1/products/compute/go_HealthCheck.yaml | 16 ++++++++-------- mmv1/products/compute/go_HttpHealthCheck.yaml | 2 +- mmv1/products/compute/go_HttpsHealthCheck.yaml | 2 +- mmv1/products/compute/go_Image.yaml | 2 +- mmv1/products/compute/go_Instance.yaml | 2 +- .../compute/go_InterconnectAttachment.yaml | 2 +- .../compute/go_ManagedSslCertificate.yaml | 6 +++--- mmv1/products/compute/go_Network.yaml | 2 +- .../compute/go_NetworkEndpointGroup.yaml | 6 +++--- mmv1/products/compute/go_NodeGroup.yaml | 2 +- mmv1/products/compute/go_NodeTemplate.yaml | 2 +- .../compute/go_OrganizationSecurityPolicy.yaml | 2 +- .../go_OrganizationSecurityPolicyRule.yaml | 2 +- mmv1/products/compute/go_PacketMirroring.yaml | 2 +- mmv1/products/compute/go_PerInstanceConfig.yaml | 8 ++++---- mmv1/products/compute/go_RegionAutoscaler.yaml | 6 +++--- .../compute/go_RegionBackendService.yaml | 8 ++++---- mmv1/products/compute/go_RegionDisk.yaml | 6 +++--- mmv1/products/compute/go_RegionHealthCheck.yaml | 16 ++++++++-------- .../compute/go_RegionNetworkEndpointGroup.yaml | 2 +- .../compute/go_RegionPerInstanceConfig.yaml | 8 ++++---- .../compute/go_RegionSslCertificate.yaml | 1 + mmv1/products/compute/go_RegionSslPolicy.yaml | 4 ++-- .../compute/go_RegionTargetTcpProxy.yaml | 2 +- mmv1/products/compute/go_Reservation.yaml | 2 +- mmv1/products/compute/go_ResourcePolicy.yaml | 2 +- mmv1/products/compute/go_Route.yaml | 2 +- mmv1/products/compute/go_Router.yaml | 2 +- mmv1/products/compute/go_RouterNat.yaml | 2 +- mmv1/products/compute/go_SslPolicy.yaml | 4 ++-- mmv1/products/compute/go_Subnetwork.yaml | 6 +++--- mmv1/products/compute/go_TargetHttpsProxy.yaml | 2 +- mmv1/products/compute/go_TargetInstance.yaml | 2 +- mmv1/products/compute/go_TargetSslProxy.yaml | 2 +- mmv1/products/compute/go_TargetTcpProxy.yaml | 2 +- mmv1/templates/terraform/resource.go.tmpl | 14 ++++++++------ .../unordered_list_customize_diff.go.tmpl | 8 ++++---- .../terraform/yaml_conversion_field.erb | 2 +- 48 files changed, 107 insertions(+), 104 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 4f51075dfbf8..5a91ac950662 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1467,8 +1467,8 @@ func (r Resource) propertiesWithCustomUpdate(properties []*Type) []*Type { }) } -func (r Resource) PropertiesByCustomUpdate() map[UpdateGroup][]*Type { - customUpdateProps := r.propertiesWithCustomUpdate(r.RootProperties()) +func (r Resource) PropertiesByCustomUpdate(properties []*Type) map[UpdateGroup][]*Type { + customUpdateProps := r.propertiesWithCustomUpdate(properties) groupedCustomUpdateProps := map[UpdateGroup][]*Type{} for _, prop := range customUpdateProps { groupedProperty := UpdateGroup{UpdateUrl: prop.UpdateUrl, @@ -1499,11 +1499,11 @@ func (r Resource) PropertiesByCustomUpdateGroups() []UpdateGroup { } func (r Resource) FieldSpecificUpdateMethods() bool { - return (len(r.PropertiesByCustomUpdate()) > 0) + return (len(r.PropertiesByCustomUpdate(r.RootProperties())) > 0) } -func (r Resource) CustomUpdatePropertiesByKey(updateUrl string, updateId string, fingerprintName string, updateVerb string) []*Type { - groupedProperties := r.PropertiesByCustomUpdate() +func (r Resource) CustomUpdatePropertiesByKey(properties []*Type, updateUrl string, updateId string, fingerprintName string, updateVerb string) []*Type { + groupedProperties := r.PropertiesByCustomUpdate(properties) groupedProperty := UpdateGroup{UpdateUrl: updateUrl, UpdateVerb: updateVerb, UpdateId: updateId, diff --git a/mmv1/products/compute/RegionSslCertificate.yaml b/mmv1/products/compute/RegionSslCertificate.yaml index e9d8b451803b..c81793ce98ca 100644 --- a/mmv1/products/compute/RegionSslCertificate.yaml +++ b/mmv1/products/compute/RegionSslCertificate.yaml @@ -78,7 +78,7 @@ examples: ignore_read_extra: - 'name_prefix' custom_code: !ruby/object:Provider::Terraform::CustomCode - constants: templates/terraform/constants/go/compute_certificate.go.tmpl + constants: templates/terraform/constants/compute_certificate.go.erb extra_schema_entry: templates/terraform/extra_schema_entry/ssl_certificate.erb parameters: - !ruby/object:Api::Type::ResourceRef diff --git a/mmv1/products/compute/go_Address.yaml b/mmv1/products/compute/go_Address.yaml index beca1b1ebe45..5a280c74e32b 100644 --- a/mmv1/products/compute/go_Address.yaml +++ b/mmv1/products/compute/go_Address.yaml @@ -114,7 +114,7 @@ properties: The type of address to reserve. Note: if you set this argument's value as `INTERNAL` you need to leave the `network_tier` argument unset in that resource block. custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' - default_value: EXTERNAL + default_value: "EXTERNAL" enum_values: - 'INTERNAL' - 'EXTERNAL' diff --git a/mmv1/products/compute/go_Autoscaler.yaml b/mmv1/products/compute/go_Autoscaler.yaml index 703a28c379a3..2625bed10ec8 100644 --- a/mmv1/products/compute/go_Autoscaler.yaml +++ b/mmv1/products/compute/go_Autoscaler.yaml @@ -151,7 +151,7 @@ properties: type: String description: | Defines operating mode for this policy. - default_value: ON + default_value: "ON" - name: 'scaleDownControl' type: NestedObject description: | @@ -262,7 +262,7 @@ properties: - OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' - default_value: NONE + default_value: "NONE" - name: 'metric' type: Array description: | @@ -359,7 +359,7 @@ properties: TimeSeries are returned upon the query execution, the autoscaler will sum their respective values to obtain its scaling value. min_version: 'beta' - default_value: resource.type = gce_instance + default_value: "resource.type = gce_instance" - name: 'loadBalancingUtilization' type: NestedObject description: | @@ -398,7 +398,7 @@ properties: type: String description: | The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. - default_value: UTC + default_value: "UTC" - name: 'durationSec' type: Integer description: | diff --git a/mmv1/products/compute/go_BackendService.yaml b/mmv1/products/compute/go_BackendService.yaml index c13d11c4ee35..274582b20829 100644 --- a/mmv1/products/compute/go_BackendService.yaml +++ b/mmv1/products/compute/go_BackendService.yaml @@ -150,7 +150,7 @@ properties: for an explanation of load balancing modes. From version 6.0.0 default value will be UTILIZATION to match default GCP value. - default_value: UTILIZATION + default_value: "UTILIZATION" enum_values: - 'UTILIZATION' - 'RATE' @@ -771,7 +771,7 @@ properties: load balancing cannot be used with the other. For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). immutable: true - default_value: EXTERNAL + default_value: "EXTERNAL" enum_values: - 'EXTERNAL' - 'INTERNAL_SELF_MANAGED' diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml index 5cfc9920a63e..08a4bf0ea163 100644 --- a/mmv1/products/compute/go_Disk.yaml +++ b/mmv1/products/compute/go_Disk.yaml @@ -347,8 +347,8 @@ properties: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. min_version: 'beta' url_param_only: true - diff_suppress_func: AlwaysDiffSuppress - default_value: SCSI + diff_suppress_func: 'AlwaysDiffSuppress' + default_value: "SCSI" deprecation_message: '`interface` is deprecated and will be removed in a future major release. This field is no longer used and can be safely removed from your configurations; disk interfaces are automatically determined on attachment.' - name: 'sourceDisk' type: String @@ -384,7 +384,7 @@ properties: diff_suppress_func: 'tpgresource.CompareResourceNames' custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' - default_value: pd-standard + default_value: "pd-standard" resource: 'DiskType' imports: 'selfLink' - name: 'image' diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index ca3d6425cf88..2944456aca31 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -299,7 +299,7 @@ properties: When reading an `IPAddress`, the API always returns the IP address number. default_from_api: true - diff_suppress_func: InternalIpDiffSuppress + diff_suppress_func: 'InternalIpDiffSuppress' - name: 'IPProtocol' type: Enum description: | @@ -344,7 +344,7 @@ properties: For more information about forwarding rules, refer to [Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). - default_value: EXTERNAL + default_value: "EXTERNAL" enum_values: - 'EXTERNAL' - 'EXTERNAL_MANAGED' @@ -412,7 +412,7 @@ properties: @pattern: \d+(?:-\d+)? default_from_api: true - diff_suppress_func: PortRangeDiffSuppress + diff_suppress_func: 'PortRangeDiffSuppress' - name: 'ports' type: Array description: | diff --git a/mmv1/products/compute/go_GlobalAddress.yaml b/mmv1/products/compute/go_GlobalAddress.yaml index 2ae8cfaa6a10..a53c5b276399 100644 --- a/mmv1/products/compute/go_GlobalAddress.yaml +++ b/mmv1/products/compute/go_GlobalAddress.yaml @@ -130,7 +130,7 @@ properties: * EXTERNAL indicates public/external single IP address. * INTERNAL indicates internal IP ranges belonging to some network. diff_suppress_func: 'tpgresource.EmptyOrDefaultStringSuppress("EXTERNAL")' - default_value: EXTERNAL + default_value: "EXTERNAL" enum_values: - 'EXTERNAL' - 'INTERNAL' diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index 9ebea92c0892..b9fe030fcdc3 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -255,7 +255,7 @@ properties: When reading an `IPAddress`, the API always returns the IP address number. default_from_api: true - diff_suppress_func: InternalIpDiffSuppress + diff_suppress_func: 'InternalIpDiffSuppress' - name: 'IPProtocol' type: Enum description: | @@ -308,7 +308,7 @@ properties: For more information about forwarding rules, refer to [Forwarding rule concepts](https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts). - default_value: EXTERNAL + default_value: "EXTERNAL" enum_values: - 'EXTERNAL' - 'EXTERNAL_MANAGED' @@ -433,7 +433,7 @@ properties: cannot have overlapping `portRange`s. @pattern: \d+(?:-\d+)? - diff_suppress_func: PortRangeDiffSuppress + diff_suppress_func: 'PortRangeDiffSuppress' - name: 'subnetwork' type: ResourceRef description: | diff --git a/mmv1/products/compute/go_HaVpnGateway.yaml b/mmv1/products/compute/go_HaVpnGateway.yaml index c62e2447443f..4545e037be24 100644 --- a/mmv1/products/compute/go_HaVpnGateway.yaml +++ b/mmv1/products/compute/go_HaVpnGateway.yaml @@ -127,7 +127,7 @@ properties: If not specified, IPV4_ONLY will be used. immutable: true custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' - default_value: IPV4_ONLY + default_value: "IPV4_ONLY" enum_values: - 'IPV4_ONLY' - 'IPV4_IPV6' diff --git a/mmv1/products/compute/go_HealthCheck.yaml b/mmv1/products/compute/go_HealthCheck.yaml index aa04399b16e3..20c697a78af4 100644 --- a/mmv1/products/compute/go_HealthCheck.yaml +++ b/mmv1/products/compute/go_HealthCheck.yaml @@ -213,7 +213,7 @@ properties: - 'http_health_check.0.port_name' - 'http_health_check.0.proxy_header' - 'http_health_check.0.port_specification' - default_value: / + default_value: "/" - name: 'response' type: String description: | @@ -267,7 +267,7 @@ properties: - 'http_health_check.0.port_name' - 'http_health_check.0.proxy_header' - 'http_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -338,7 +338,7 @@ properties: - 'https_health_check.0.port_name' - 'https_health_check.0.proxy_header' - 'https_health_check.0.port_specification' - default_value: / + default_value: "/" - name: 'response' type: String description: | @@ -392,7 +392,7 @@ properties: - 'https_health_check.0.port_name' - 'https_health_check.0.proxy_header' - 'https_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -499,7 +499,7 @@ properties: - 'tcp_health_check.0.port_name' - 'tcp_health_check.0.proxy_header' - 'tcp_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -605,7 +605,7 @@ properties: - 'ssl_health_check.0.port_name' - 'ssl_health_check.0.proxy_header' - 'ssl_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -675,7 +675,7 @@ properties: - 'http2_health_check.0.port_name' - 'http2_health_check.0.proxy_header' - 'http2_health_check.0.port_specification' - default_value: / + default_value: "/" - name: 'response' type: String description: | @@ -729,7 +729,7 @@ properties: - 'http2_health_check.0.port_name' - 'http2_health_check.0.proxy_header' - 'http2_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' diff --git a/mmv1/products/compute/go_HttpHealthCheck.yaml b/mmv1/products/compute/go_HttpHealthCheck.yaml index dca967b94c9c..0a7780b177a1 100644 --- a/mmv1/products/compute/go_HttpHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpHealthCheck.yaml @@ -107,7 +107,7 @@ properties: description: | The request path of the HTTP health check request. The default value is /. - default_value: / + default_value: "/" - name: 'timeoutSec' type: Integer description: | diff --git a/mmv1/products/compute/go_HttpsHealthCheck.yaml b/mmv1/products/compute/go_HttpsHealthCheck.yaml index a186293003ad..6a29961e22bb 100644 --- a/mmv1/products/compute/go_HttpsHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpsHealthCheck.yaml @@ -107,7 +107,7 @@ properties: description: | The request path of the HTTPS health check request. The default value is /. - default_value: / + default_value: "/" - name: 'timeoutSec' type: Integer description: | diff --git a/mmv1/products/compute/go_Image.yaml b/mmv1/products/compute/go_Image.yaml index 40505e568024..731e4028534d 100644 --- a/mmv1/products/compute/go_Image.yaml +++ b/mmv1/products/compute/go_Image.yaml @@ -219,7 +219,7 @@ properties: should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created. - default_value: TAR + default_value: "TAR" enum_values: - 'TAR' - name: 'sha1' diff --git a/mmv1/products/compute/go_Instance.yaml b/mmv1/products/compute/go_Instance.yaml index 49eb97d03c47..c8b2b4856e99 100644 --- a/mmv1/products/compute/go_Instance.yaml +++ b/mmv1/products/compute/go_Instance.yaml @@ -593,13 +593,13 @@ properties: at_least_one_of: - 'confidential_instance_config.0.enable_confidential_compute' - 'confidential_instance_config.0.confidential_instance_type' + deprecation_message: '`enableConfidentialCompute` is deprecated and will be removed in a future major release. Use `confidentialInstanceType: SEV` instead.' - name: 'confidentialInstanceType' type: Enum description: | The confidential computing technology the instance uses. SEV is an AMD feature. One of the following values: SEV, SEV_SNP. If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required. - min_version: 'beta' at_least_one_of: - 'confidential_instance_config.0.enable_confidential_compute' - 'confidential_instance_config.0.confidential_instance_type' diff --git a/mmv1/products/compute/go_InterconnectAttachment.yaml b/mmv1/products/compute/go_InterconnectAttachment.yaml index b991a7353f58..f422c5b8fbb3 100644 --- a/mmv1/products/compute/go_InterconnectAttachment.yaml +++ b/mmv1/products/compute/go_InterconnectAttachment.yaml @@ -300,7 +300,7 @@ properties: attachment must be created with this option. immutable: true custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'IPSEC' diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml index 580fed2333d3..c9d0d3457913 100644 --- a/mmv1/products/compute/go_ManagedSslCertificate.yaml +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -65,8 +65,8 @@ async: path: 'error/errors' message: 'message' collection_url_key: 'items' -custom_code: !ruby/object:Provider::Terraform::CustomCode - constants: templates/terraform/constants/compute_managed_ssl_certificate.go.erb +custom_code: + constants: 'templates/terraform/constants/go/compute_managed_ssl_certificate.go.tmpl' examples: - name: 'managed_ssl_certificate_basic' primary_resource_id: 'default' @@ -130,7 +130,7 @@ properties: description: | Enum field whose value is always `MANAGED` - used to signal to the API which type this is. - default_value: MANAGED + default_value: "MANAGED" enum_values: - 'MANAGED' - name: 'subjectAlternativeNames' diff --git a/mmv1/products/compute/go_Network.yaml b/mmv1/products/compute/go_Network.yaml index 28cd90a53fe6..9a8704c9fa3d 100644 --- a/mmv1/products/compute/go_Network.yaml +++ b/mmv1/products/compute/go_Network.yaml @@ -175,7 +175,7 @@ properties: Set the order that Firewall Rules and Firewall Policies are evaluated. update_url: 'projects/{{project}}/global/networks/{{name}}' update_verb: 'PATCH' - default_value: AFTER_CLASSIC_FIREWALL + default_value: "AFTER_CLASSIC_FIREWALL" enum_values: - 'BEFORE_CLASSIC_FIREWALL' - 'AFTER_CLASSIC_FIREWALL' diff --git a/mmv1/products/compute/go_NetworkEndpointGroup.yaml b/mmv1/products/compute/go_NetworkEndpointGroup.yaml index 3ffa4af09e61..e4d0610c43c1 100644 --- a/mmv1/products/compute/go_NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_NetworkEndpointGroup.yaml @@ -56,8 +56,8 @@ async: path: 'error/errors' message: 'message' collection_url_key: 'items' -custom_code: !ruby/object:Provider::Terraform::CustomCode - constants: templates/terraform/constants/compute_network_endpoint_group.go.erb +custom_code: + constants: 'templates/terraform/constants/go/compute_network_endpoint_group.go.tmpl' examples: - name: 'network_endpoint_group' primary_resource_id: 'neg' @@ -112,7 +112,7 @@ properties: CONNECTION balancing modes. Possible values include: GCE_VM_IP, GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_IP_PORT, INTERNET_FQDN_PORT, SERVERLESS, and PRIVATE_SERVICE_CONNECT. - default_value: GCE_VM_IP_PORT + default_value: "GCE_VM_IP_PORT" enum_values: - 'GCE_VM_IP' - 'GCE_VM_IP_PORT' diff --git a/mmv1/products/compute/go_NodeGroup.yaml b/mmv1/products/compute/go_NodeGroup.yaml index 3e46390a3e91..afef7468796c 100644 --- a/mmv1/products/compute/go_NodeGroup.yaml +++ b/mmv1/products/compute/go_NodeGroup.yaml @@ -122,7 +122,7 @@ properties: type: String description: | Specifies how to handle instances when a node in the group undergoes maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT. - default_value: DEFAULT + default_value: "DEFAULT" - name: 'maintenanceWindow' type: NestedObject description: | diff --git a/mmv1/products/compute/go_NodeTemplate.yaml b/mmv1/products/compute/go_NodeTemplate.yaml index d14bff5502ae..2b09bff673b5 100644 --- a/mmv1/products/compute/go_NodeTemplate.yaml +++ b/mmv1/products/compute/go_NodeTemplate.yaml @@ -149,7 +149,7 @@ properties: type: Enum description: | CPU overcommit. - default_value: NONE + default_value: "NONE" enum_values: - 'ENABLED' - 'NONE' diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml index 644b09c48415..c9033fe12929 100644 --- a/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml +++ b/mmv1/products/compute/go_OrganizationSecurityPolicy.yaml @@ -88,6 +88,6 @@ properties: is "FIREWALL". min_version: 'beta' immutable: true - default_value: FIREWALL + default_value: "FIREWALL" enum_values: - 'FIREWALL' diff --git a/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml b/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml index 0b02fc55bfc2..54fa3324ecf7 100644 --- a/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_OrganizationSecurityPolicyRule.yaml @@ -90,7 +90,7 @@ properties: Preconfigured versioned expression. For organization security policy rules, the only supported type is "FIREWALL". min_version: 'beta' - default_value: FIREWALL + default_value: "FIREWALL" enum_values: - 'FIREWALL' - name: 'config' diff --git a/mmv1/products/compute/go_PacketMirroring.yaml b/mmv1/products/compute/go_PacketMirroring.yaml index 04554d993989..065c5979be0c 100644 --- a/mmv1/products/compute/go_PacketMirroring.yaml +++ b/mmv1/products/compute/go_PacketMirroring.yaml @@ -138,7 +138,7 @@ properties: - name: 'direction' type: Enum description: Direction of traffic to mirror. - default_value: BOTH + default_value: "BOTH" enum_values: - 'INGRESS' - 'EGRESS' diff --git a/mmv1/products/compute/go_PerInstanceConfig.yaml b/mmv1/products/compute/go_PerInstanceConfig.yaml index 4db8981e8176..3ca02f701b76 100644 --- a/mmv1/products/compute/go_PerInstanceConfig.yaml +++ b/mmv1/products/compute/go_PerInstanceConfig.yaml @@ -166,7 +166,7 @@ properties: type: Enum description: | The mode of the disk. - default_value: READ_WRITE + default_value: "READ_WRITE" enum_values: - 'READ_ONLY' - 'READ_WRITE' @@ -178,7 +178,7 @@ properties: `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently deleted from the instance group. - default_value: NEVER + default_value: "NEVER" enum_values: - 'NEVER' - 'ON_PERMANENT_INSTANCE_DELETION' @@ -195,7 +195,7 @@ properties: type: Enum description: | These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. - default_value: NEVER + default_value: "NEVER" enum_values: - 'NEVER' - 'ON_PERMANENT_INSTANCE_DELETION' @@ -224,7 +224,7 @@ properties: type: Enum description: | These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. - default_value: NEVER + default_value: "NEVER" enum_values: - 'NEVER' - 'ON_PERMANENT_INSTANCE_DELETION' diff --git a/mmv1/products/compute/go_RegionAutoscaler.yaml b/mmv1/products/compute/go_RegionAutoscaler.yaml index 0bc0b9cfcb30..3284e5faa1bd 100644 --- a/mmv1/products/compute/go_RegionAutoscaler.yaml +++ b/mmv1/products/compute/go_RegionAutoscaler.yaml @@ -138,7 +138,7 @@ properties: type: String description: | Defines operating mode for this policy. - default_value: ON + default_value: "ON" - name: 'scaleDownControl' type: NestedObject description: | @@ -247,7 +247,7 @@ properties: - OPTIMIZE_AVAILABILITY. Predictive autoscaling improves availability by monitoring daily and weekly load patterns and scaling out ahead of anticipated demand. custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' - default_value: NONE + default_value: "NONE" - name: 'metric' type: Array description: | @@ -380,7 +380,7 @@ properties: type: String description: | The time zone to be used when interpreting the schedule. The value of this field must be a time zone name from the tz database: http://en.wikipedia.org/wiki/Tz_database. - default_value: UTC + default_value: "UTC" - name: 'durationSec' type: Integer description: | diff --git a/mmv1/products/compute/go_RegionBackendService.yaml b/mmv1/products/compute/go_RegionBackendService.yaml index cab122136a1b..f01a23bc1d77 100644 --- a/mmv1/products/compute/go_RegionBackendService.yaml +++ b/mmv1/products/compute/go_RegionBackendService.yaml @@ -149,7 +149,7 @@ properties: for an explanation of load balancing modes. From version 6.0.0 default value will be UTILIZATION to match default GCP value. - default_value: CONNECTION + default_value: "CONNECTION" enum_values: - 'UTILIZATION' - 'RATE' @@ -777,7 +777,7 @@ properties: balancing cannot be used with the other(s). For more information, refer to [Choosing a load balancer](https://cloud.google.com/load-balancing/docs/backend-service). immutable: true - default_value: INTERNAL + default_value: "INTERNAL" enum_values: - 'EXTERNAL' - 'EXTERNAL_MANAGED' @@ -1188,7 +1188,7 @@ properties: `PER_SESSION`: The Connection Tracking is performed as per the configured Session Affinity. It matches the configured Session Affinity. - default_value: PER_CONNECTION + default_value: "PER_CONNECTION" enum_values: - 'PER_CONNECTION' - 'PER_SESSION' @@ -1211,7 +1211,7 @@ properties: If set to `ALWAYS_PERSIST`, existing connections always persist on unhealthy backends regardless of protocol and session affinity. It is generally not recommended to use this mode overriding the default. - default_value: DEFAULT_FOR_PROTOCOL + default_value: "DEFAULT_FOR_PROTOCOL" enum_values: - 'DEFAULT_FOR_PROTOCOL' - 'NEVER_PERSIST' diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml index 084e0ff3a5ac..15c02435d62c 100644 --- a/mmv1/products/compute/go_RegionDisk.yaml +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -286,7 +286,7 @@ properties: create the disk. Provide this when creating the disk. custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' custom_expand: 'templates/terraform/custom_expand/go/resourceref_with_validation.go.tmpl' - default_value: pd-standard + default_value: "pd-standard" resource: 'RegionDiskType' imports: 'selfLink' - name: 'interface' @@ -295,8 +295,8 @@ properties: Specifies the disk interface to use for attaching this disk, which is either SCSI or NVME. The default is SCSI. min_version: 'beta' url_param_only: true - diff_suppress_func: AlwaysDiffSuppress - default_value: SCSI + diff_suppress_func: 'AlwaysDiffSuppress' + default_value: "SCSI" deprecation_message: '`interface` is deprecated and will be removed in a future major release. This field is no longer used and can be safely removed from your configurations; disk interfaces are automatically determined on attachment.' - name: 'sourceDisk' type: String diff --git a/mmv1/products/compute/go_RegionHealthCheck.yaml b/mmv1/products/compute/go_RegionHealthCheck.yaml index 3249ecc45d3d..061556ba80b1 100644 --- a/mmv1/products/compute/go_RegionHealthCheck.yaml +++ b/mmv1/products/compute/go_RegionHealthCheck.yaml @@ -220,7 +220,7 @@ properties: - 'http_health_check.0.port_name' - 'http_health_check.0.proxy_header' - 'http_health_check.0.port_specification' - default_value: / + default_value: "/" - name: 'response' type: String description: | @@ -274,7 +274,7 @@ properties: - 'http_health_check.0.port_name' - 'http_health_check.0.proxy_header' - 'http_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -345,7 +345,7 @@ properties: - 'https_health_check.0.port_name' - 'https_health_check.0.proxy_header' - 'https_health_check.0.port_specification' - default_value: / + default_value: "/" - name: 'response' type: String description: | @@ -399,7 +399,7 @@ properties: - 'https_health_check.0.port_name' - 'https_health_check.0.proxy_header' - 'https_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -506,7 +506,7 @@ properties: - 'tcp_health_check.0.port_name' - 'tcp_health_check.0.proxy_header' - 'tcp_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -612,7 +612,7 @@ properties: - 'ssl_health_check.0.port_name' - 'ssl_health_check.0.proxy_header' - 'ssl_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' @@ -682,7 +682,7 @@ properties: - 'http2_health_check.0.port_name' - 'http2_health_check.0.proxy_header' - 'http2_health_check.0.port_specification' - default_value: / + default_value: "/" - name: 'response' type: String description: | @@ -736,7 +736,7 @@ properties: - 'http2_health_check.0.port_name' - 'http2_health_check.0.proxy_header' - 'http2_health_check.0.port_specification' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' diff --git a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml index ba12edad1066..57d6aa26deda 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml @@ -136,7 +136,7 @@ properties: type: Enum description: | Type of network endpoints in this network endpoint group. Defaults to SERVERLESS. - default_value: SERVERLESS + default_value: "SERVERLESS" enum_values: - 'SERVERLESS' - 'PRIVATE_SERVICE_CONNECT' diff --git a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml index da471721b90c..492502f2190f 100644 --- a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml @@ -167,7 +167,7 @@ properties: type: Enum description: | The mode of the disk. - default_value: READ_WRITE + default_value: "READ_WRITE" enum_values: - 'READ_ONLY' - 'READ_WRITE' @@ -179,7 +179,7 @@ properties: `NEVER` - detach the disk when the VM is deleted, but do not delete the disk. `ON_PERMANENT_INSTANCE_DELETION` will delete the stateful disk when the VM is permanently deleted from the instance group. - default_value: NEVER + default_value: "NEVER" enum_values: - 'NEVER' - 'ON_PERMANENT_INSTANCE_DELETION' @@ -196,7 +196,7 @@ properties: type: Enum description: | These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. - default_value: NEVER + default_value: "NEVER" enum_values: - 'NEVER' - 'ON_PERMANENT_INSTANCE_DELETION' @@ -225,7 +225,7 @@ properties: type: Enum description: | These stateful IPs will never be released during autohealing, update or VM instance recreate operations. This flag is used to configure if the IP reservation should be deleted after it is no longer used by the group, e.g. when the given instance or the whole group is deleted. - default_value: NEVER + default_value: "NEVER" enum_values: - 'NEVER' - 'ON_PERMANENT_INSTANCE_DELETION' diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml index 029e4331c90f..0d16bbc8dd59 100644 --- a/mmv1/products/compute/go_RegionSslCertificate.yaml +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -50,6 +50,7 @@ async: collection_url_key: 'items' custom_code: extra_schema_entry: 'templates/terraform/extra_schema_entry/go/ssl_certificate.tmpl' + constants: 'templates/terraform/constants/go/compute_certificate.go.tmpl' examples: - name: 'region_ssl_certificate_basic' primary_resource_id: 'default' diff --git a/mmv1/products/compute/go_RegionSslPolicy.yaml b/mmv1/products/compute/go_RegionSslPolicy.yaml index 4a4bab982358..c19fa6181e55 100644 --- a/mmv1/products/compute/go_RegionSslPolicy.yaml +++ b/mmv1/products/compute/go_RegionSslPolicy.yaml @@ -91,7 +91,7 @@ properties: See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) for information on what cipher suites each profile provides. If `CUSTOM` is used, the `custom_features` attribute **must be set**. - default_value: COMPATIBLE + default_value: "COMPATIBLE" enum_values: - 'COMPATIBLE' - 'MODERN' @@ -102,7 +102,7 @@ properties: description: | The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. - default_value: TLS_1_0 + default_value: "TLS_1_0" enum_values: - 'TLS_1_0' - 'TLS_1_1' diff --git a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml index 81901cf43476..f1c24ee41313 100644 --- a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml @@ -95,7 +95,7 @@ properties: description: | Specifies the type of proxy header to append before sending data to the backend. - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' diff --git a/mmv1/products/compute/go_Reservation.yaml b/mmv1/products/compute/go_Reservation.yaml index 518522620377..1ae6cf037ade 100644 --- a/mmv1/products/compute/go_Reservation.yaml +++ b/mmv1/products/compute/go_Reservation.yaml @@ -232,7 +232,7 @@ properties: description: | The disk interface to use for attaching this disk. immutable: true - default_value: SCSI + default_value: "SCSI" enum_values: - 'SCSI' - 'NVME' diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml index b44f07dfc4b1..cec8edde5c7e 100644 --- a/mmv1/products/compute/go_ResourcePolicy.yaml +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -215,7 +215,7 @@ properties: description: | Specifies the behavior to apply to scheduled snapshots when the source disk is deleted. - default_value: KEEP_AUTO_SNAPSHOTS + default_value: "KEEP_AUTO_SNAPSHOTS" enum_values: - 'KEEP_AUTO_SNAPSHOTS' - 'APPLY_RETENTION_POLICY' diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index c53dfd1dd17e..b1bfaa4647d2 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -72,8 +72,8 @@ async: message: 'message' collection_url_key: 'items' custom_code: - constants: templates/terraform/constants/compute_route.go.erb extra_schema_entry: 'templates/terraform/extra_schema_entry/go/route.tmpl' + constants: 'templates/terraform/constants/go/compute_route.go.tmpl' decoder: 'templates/terraform/decoders/go/route.tmpl' error_retry_predicates: diff --git a/mmv1/products/compute/go_Router.yaml b/mmv1/products/compute/go_Router.yaml index b41f54f27de3..63e95ff1f3d0 100644 --- a/mmv1/products/compute/go_Router.yaml +++ b/mmv1/products/compute/go_Router.yaml @@ -126,7 +126,7 @@ properties: type: Enum description: | User-specified flag to indicate which mode to use for advertisement. - default_value: DEFAULT + default_value: "DEFAULT" enum_values: - 'DEFAULT' - 'CUSTOM' diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml index 8e6afb9e5415..820cf11b61f6 100644 --- a/mmv1/products/compute/go_RouterNat.yaml +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -448,7 +448,7 @@ properties: If `PRIVATE` NAT used for private IP translation. min_version: 'beta' immutable: true - default_value: PUBLIC + default_value: "PUBLIC" enum_values: - 'PUBLIC' - 'PRIVATE' diff --git a/mmv1/products/compute/go_SslPolicy.yaml b/mmv1/products/compute/go_SslPolicy.yaml index 6d2ecd29f45d..9641c0832a70 100644 --- a/mmv1/products/compute/go_SslPolicy.yaml +++ b/mmv1/products/compute/go_SslPolicy.yaml @@ -89,7 +89,7 @@ properties: See the [official documentation](https://cloud.google.com/compute/docs/load-balancing/ssl-policies#profilefeaturesupport) for information on what cipher suites each profile provides. If `CUSTOM` is used, the `custom_features` attribute **must be set**. - default_value: COMPATIBLE + default_value: "COMPATIBLE" enum_values: - 'COMPATIBLE' - 'MODERN' @@ -100,7 +100,7 @@ properties: description: | The minimum version of SSL protocol that can be used by the clients to establish a connection with the load balancer. - default_value: TLS_1_0 + default_value: "TLS_1_0" enum_values: - 'TLS_1_0' - 'TLS_1_1' diff --git a/mmv1/products/compute/go_Subnetwork.yaml b/mmv1/products/compute/go_Subnetwork.yaml index 2778d4992a3d..b1e6108aed1b 100644 --- a/mmv1/products/compute/go_Subnetwork.yaml +++ b/mmv1/products/compute/go_Subnetwork.yaml @@ -292,7 +292,7 @@ properties: - 'log_config.0.flow_sampling' - 'log_config.0.metadata' - 'log_config.0.filterExpr' - default_value: INTERVAL_5_SEC + default_value: "INTERVAL_5_SEC" enum_values: - 'INTERVAL_5_SEC' - 'INTERVAL_30_SEC' @@ -325,7 +325,7 @@ properties: - 'log_config.0.flow_sampling' - 'log_config.0.metadata' - 'log_config.0.filterExpr' - default_value: INCLUDE_ALL_METADATA + default_value: "INCLUDE_ALL_METADATA" enum_values: - 'EXCLUDE_ALL_METADATA' - 'INCLUDE_ALL_METADATA' @@ -349,7 +349,7 @@ properties: - 'log_config.0.flow_sampling' - 'log_config.0.metadata' - 'log_config.0.filterExpr' - default_value: true + default_value: "true" - name: 'stackType' type: Enum description: | diff --git a/mmv1/products/compute/go_TargetHttpsProxy.yaml b/mmv1/products/compute/go_TargetHttpsProxy.yaml index 0b758b752c8c..296503b25d2b 100644 --- a/mmv1/products/compute/go_TargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpsProxy.yaml @@ -120,7 +120,7 @@ properties: update_url: 'projects/{{project}}/global/targetHttpsProxies/{{name}}/setQuicOverride' update_verb: 'POST' custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'ENABLE' diff --git a/mmv1/products/compute/go_TargetInstance.yaml b/mmv1/products/compute/go_TargetInstance.yaml index 99d92de1ee4d..c61038f4b112 100644 --- a/mmv1/products/compute/go_TargetInstance.yaml +++ b/mmv1/products/compute/go_TargetInstance.yaml @@ -130,7 +130,7 @@ properties: NAT option controlling how IPs are NAT'ed to the instance. Currently only NO_NAT (default value) is supported. immutable: true - default_value: NO_NAT + default_value: "NO_NAT" enum_values: - 'NO_NAT' - name: 'securityPolicy' diff --git a/mmv1/products/compute/go_TargetSslProxy.yaml b/mmv1/products/compute/go_TargetSslProxy.yaml index 1f294f259f28..e5dba61992b5 100644 --- a/mmv1/products/compute/go_TargetSslProxy.yaml +++ b/mmv1/products/compute/go_TargetSslProxy.yaml @@ -88,7 +88,7 @@ properties: the backend. update_url: 'projects/{{project}}/global/targetSslProxies/{{name}}/setProxyHeader' update_verb: 'POST' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' diff --git a/mmv1/products/compute/go_TargetTcpProxy.yaml b/mmv1/products/compute/go_TargetTcpProxy.yaml index b75fff706297..bc8d89f4d45c 100644 --- a/mmv1/products/compute/go_TargetTcpProxy.yaml +++ b/mmv1/products/compute/go_TargetTcpProxy.yaml @@ -87,7 +87,7 @@ properties: the backend. update_url: 'projects/{{project}}/global/targetTcpProxies/{{name}}/setProxyHeader' update_verb: 'POST' - default_value: NONE + default_value: "NONE" enum_values: - 'NONE' - 'PROXY_V1' diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 8599c80247d1..84f1248fa77d 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -149,8 +149,10 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { }, {{- end}} {{- end}} -{{/* TODO Q2 function to compile custom code lines ($.CustomCode.extra_schema_entry) */}} -{{- if $.HasProject -}} +{{- if $.CustomCode.ExtraSchemaEntry }} + {{ $.CustomTemplate $.CustomCode.ExtraSchemaEntry false -}} +{{- end}} +{{ if $.HasProject -}} "project": { Type: schema.TypeString, Optional: true, @@ -609,7 +611,7 @@ func resource{{ $.ResourceName -}}Read(d *schema.ResourceData, meta interface{}) {{- range $prop := $.VirtualFields }} {{ if not (eq $prop.DefaultValue nil) -}} if _, ok := d.GetOkExists("{{ $prop.Name -}}"); !ok { - if err := d.Set("{{ $prop.Name -}}", {{ $prop.DefaultValue -}}); err != nil { + if err := d.Set("{{ $prop.Name -}}", {{ $prop.GoLiteral $prop.DefaultValue -}}); err != nil { return fmt.Errorf("Error setting {{ $prop.Name -}}: %s", err) } } @@ -829,7 +831,7 @@ if len(updateMask) > 0 { {{- end}}{{/*if not immutable*/}} {{- if $.FieldSpecificUpdateMethods }} d.Partial(true) -{{ $CustomUpdateProps := $.PropertiesByCustomUpdate }} +{{ $CustomUpdateProps := $.PropertiesByCustomUpdate $.RootProperties }} {{ range $group := $.PropertiesByCustomUpdateGroups }} if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $group)) "\") || d.HasChange(\""}}") { obj := make(map[string]interface{}) @@ -869,7 +871,7 @@ if d.HasChange("{{ join ($.PropertyNamesToStrings (index $CustomUpdateProps $gro obj["{{ $group.FingerprintName }}"] = getRes["{{ $group.FingerprintName }}"] {{ end }}{{/*if FingerprintName*/}} -{{ range $propsByKey := $.CustomUpdatePropertiesByKey $group.UpdateUrl $group.UpdateId $group.FingerprintName $group.UpdateVerb }} +{{ range $propsByKey := $.CustomUpdatePropertiesByKey $.AllUserProperties $group.UpdateUrl $group.UpdateId $group.FingerprintName $group.UpdateVerb }} {{ $propsByKey.ApiName -}}Prop, err := expand{{ if $.NestedQuery -}}Nested{{ end }}{{ $.ResourceName -}}{{ camelize $propsByKey.Name "upper" -}}({{ if $propsByKey.FlattenObject }}nil{{else}}d.Get("{{underscore $propsByKey.Name}}"){{ end }}, d, config) if err != nil { return err @@ -1150,7 +1152,7 @@ func resource{{ $.ResourceName }}Import(d *schema.ResourceData, meta interface{} // Explicitly set virtual fields to default values on import {{- range $vf := $.VirtualFields }} {{- if not (eq $vf.DefaultValue nil) }} - if err := d.Set("{{ $vf.Name }}", {{ $vf.DefaultValue }}); err != nil { + if err := d.Set("{{ $vf.Name }}", {{ $vf.GoLiteral $vf.DefaultValue }}); err != nil { return nil, fmt.Errorf("Error setting {{ $vf.Name }}: %s", err) } {{- end }} diff --git a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl index f05a76bf00fe..98e788c85227 100644 --- a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl +++ b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl @@ -1,5 +1,5 @@ {{- define "UnorderedListCustomizeDiff" }} -keys := diff.GetChangedKeysPrefix({{ underscore $.Name }}) +keys := diff.GetChangedKeysPrefix("{{ underscore $.Name }}") if len(keys) == 0 { return nil } @@ -28,11 +28,11 @@ for i := 0; i < count; i++ { } } -oldSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceMetadata.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), old) -newSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceMetadata.ResourceName }}().Schema[{{ underscore $.Name }}].Elem.(*schema.Resource)), new) +oldSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceMetadata.ResourceName }}().Schema["{{ underscore $.Name }}"].Elem.(*schema.Resource)), old) +newSet := schema.NewSet(schema.HashResource(Resource{{ $.ResourceMetadata.ResourceName }}().Schema["{{ underscore $.Name }}"].Elem.(*schema.Resource)), new) if oldSet.Equal(newSet) { - if err := diff.Clear({{ underscore $.Name }}); err != nil { + if err := diff.Clear("{{ underscore $.Name }}"); err != nil { return err } } diff --git a/mmv1/templates/terraform/yaml_conversion_field.erb b/mmv1/templates/terraform/yaml_conversion_field.erb index 3d9b5d39cf1a..ef751e24e860 100644 --- a/mmv1/templates/terraform/yaml_conversion_field.erb +++ b/mmv1/templates/terraform/yaml_conversion_field.erb @@ -151,7 +151,7 @@ <% end -%> <% end -%> <% unless property.default_value.nil? -%> - default_value: <%= property.default_value %> + default_value: <%= go_literal(property.default_value) %> <% end -%> <% unless property.deprecation_message.nil? -%> deprecation_message: '<%= property.deprecation_message %>' From c3ce5c0a8d1505a81050326d6ef117a4dfd77d17 Mon Sep 17 00:00:00 2001 From: hyperbola Date: Fri, 28 Jun 2024 00:55:26 +0800 Subject: [PATCH 230/356] Fix typo in `google_pubsub_subscription` terraform manual page (#11068) --- mmv1/products/pubsub/Subscription.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/pubsub/Subscription.yaml b/mmv1/products/pubsub/Subscription.yaml index bba44db6cfd5..8ce56cc77c90 100644 --- a/mmv1/products/pubsub/Subscription.yaml +++ b/mmv1/products/pubsub/Subscription.yaml @@ -140,7 +140,7 @@ properties: - !ruby/object:Api::Type::String name: 'table' description: | - The name of the table to which to write data, of the form {projectId}:{datasetId}.{tableId} + The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} required: true - !ruby/object:Api::Type::Boolean name: 'useTopicSchema' From 68982811939bc750fb567c339c80b52b67ae569f Mon Sep 17 00:00:00 2001 From: Obada Alabbadi <76101898+obada-ab@users.noreply.github.com> Date: Thu, 27 Jun 2024 19:58:56 +0300 Subject: [PATCH 231/356] Migrate BigQuery Reservation Assignment from DCL to MMv1 (#10894) --- .../bigqueryreservation/Assignment.yaml | 90 +++++++++++++++++++ ...gquery_reservation_assignment_basic.tf.erb | 13 +++ ...igquery_reservation_assignment_full.tf.erb | 14 +++ .../bigquery_reservation_assignment.go.erb | 20 +++++ .../bigqueryreservation/assignment.yaml | 4 - .../bigqueryreservation/beta/assignment.yaml | 4 - .../beta/tpgtools_product.yaml | 9 -- .../samples/assignment/basic.tf.tmpl | 13 --- .../samples/assignment/basic.yaml | 7 -- .../samples/assignment/meta.yaml | 3 - .../bigqueryreservation/tpgtools_product.yaml | 9 -- 11 files changed, 137 insertions(+), 49 deletions(-) create mode 100644 mmv1/products/bigqueryreservation/Assignment.yaml create mode 100644 mmv1/templates/terraform/examples/bigquery_reservation_assignment_basic.tf.erb create mode 100644 mmv1/templates/terraform/examples/bigquery_reservation_assignment_full.tf.erb create mode 100644 mmv1/templates/terraform/pre_create/bigquery_reservation_assignment.go.erb delete mode 100644 tpgtools/overrides/bigqueryreservation/assignment.yaml delete mode 100644 tpgtools/overrides/bigqueryreservation/beta/assignment.yaml delete mode 100644 tpgtools/overrides/bigqueryreservation/beta/tpgtools_product.yaml delete mode 100644 tpgtools/overrides/bigqueryreservation/samples/assignment/basic.tf.tmpl delete mode 100644 tpgtools/overrides/bigqueryreservation/samples/assignment/basic.yaml delete mode 100644 tpgtools/overrides/bigqueryreservation/samples/assignment/meta.yaml delete mode 100644 tpgtools/overrides/bigqueryreservation/tpgtools_product.yaml diff --git a/mmv1/products/bigqueryreservation/Assignment.yaml b/mmv1/products/bigqueryreservation/Assignment.yaml new file mode 100644 index 000000000000..b4d077bdc1ff --- /dev/null +++ b/mmv1/products/bigqueryreservation/Assignment.yaml @@ -0,0 +1,90 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: ReservationAssignment +base_url: 'projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments' +create_url: 'projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments' +self_link: 'projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments' +delete_url: 'projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments/{{name}}' +id_format: 'projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments/{{name}}' +import_format: + [ + 'projects/{{project}}/locations/{{location}}/reservations/{{reservation}}/assignments/{{name}}', + ] +nested_query: !ruby/object:Api::Resource::NestedQuery + keys: + - assignments +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Work with reservation assignments': 'https://cloud.google.com/bigquery/docs/reservations-assignments' + api: 'https://cloud.google.com/bigquery/docs/reference/reservations/rest/v1/projects.locations.reservations.assignments' +legacy_long_form_project: true +description: | + The BigqueryReservation Assignment resource. +immutable: true +custom_code: !ruby/object:Provider::Terraform::CustomCode + pre_create: templates/terraform/pre_create/bigquery_reservation_assignment.go.erb +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'bigquery_reservation_assignment_basic' + primary_resource_id: 'assignment' + vars: + reservation_name: "example-reservation" + test_env_vars: + project: :PROJECT_NAME + - !ruby/object:Provider::Terraform::Examples + name: 'bigquery_reservation_assignment_full' + primary_resource_id: 'assignment' + skip_docs: true + vars: + reservation_name: "example-reservation" + test_env_vars: + project: :PROJECT_NAME +parameters: + - !ruby/object:Api::Type::String + name: location + description: The location for the resource + url_param_only: true + immutable: true + default_from_api: true + - !ruby/object:Api::Type::ResourceRef + name: reservation + resource: reservation + imports: name + description: The reservation for the resource + url_param_only: true + required: true + immutable: true +properties: + - !ruby/object:Api::Type::String + name: name + description: Output only. The resource name of the assignment. + output: true + custom_flatten: 'templates/terraform/custom_flatten/name_from_self_link.erb' + - !ruby/object:Api::Type::String + name: assignee + description: The resource which will use the reservation. E.g. projects/myproject, folders/123, organizations/456. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + - !ruby/object:Api::Type::String + name: jobType + description: | + Types of job, which could be specified when using the reservation. Possible values: JOB_TYPE_UNSPECIFIED, PIPELINE, QUERY + required: true + - !ruby/object:Api::Type::String + name: state + description: | + Assignment will remain in PENDING state if no active capacity commitment is present. It will become ACTIVE when some capacity commitment becomes active. + Possible values: STATE_UNSPECIFIED, PENDING, ACTIVE + output: true diff --git a/mmv1/templates/terraform/examples/bigquery_reservation_assignment_basic.tf.erb b/mmv1/templates/terraform/examples/bigquery_reservation_assignment_basic.tf.erb new file mode 100644 index 000000000000..0aac775c6040 --- /dev/null +++ b/mmv1/templates/terraform/examples/bigquery_reservation_assignment_basic.tf.erb @@ -0,0 +1,13 @@ +resource "google_bigquery_reservation" "basic" { + name = "<%= ctx[:vars]['reservation_name'] %>" + project = "<%= ctx[:test_env_vars]['project'] %>" + location = "us-central1" + slot_capacity = 0 + ignore_idle_slots = false +} + +resource "google_bigquery_reservation_assignment" "<%= ctx[:primary_resource_id] %>" { + assignee = "projects/<%= ctx[:test_env_vars]['project'] %>" + job_type = "PIPELINE" + reservation = google_bigquery_reservation.basic.id +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/bigquery_reservation_assignment_full.tf.erb b/mmv1/templates/terraform/examples/bigquery_reservation_assignment_full.tf.erb new file mode 100644 index 000000000000..7944dc71ea68 --- /dev/null +++ b/mmv1/templates/terraform/examples/bigquery_reservation_assignment_full.tf.erb @@ -0,0 +1,14 @@ +resource "google_bigquery_reservation" "basic" { + name = "<%= ctx[:vars]['reservation_name'] %>" + project = "<%= ctx[:test_env_vars]['project'] %>" + location = "us-central1" + slot_capacity = 0 + ignore_idle_slots = false +} + +resource "google_bigquery_reservation_assignment" "<%= ctx[:primary_resource_id] %>" { + assignee = "projects/<%= ctx[:test_env_vars]['project'] %>" + job_type = "QUERY" + location = "us-central1" + reservation = google_bigquery_reservation.basic.id +} \ No newline at end of file diff --git a/mmv1/templates/terraform/pre_create/bigquery_reservation_assignment.go.erb b/mmv1/templates/terraform/pre_create/bigquery_reservation_assignment.go.erb new file mode 100644 index 000000000000..c4a0cdfda0f2 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/bigquery_reservation_assignment.go.erb @@ -0,0 +1,20 @@ + if _, ok := d.GetOkExists("location"); !ok { + // Extract location from parent reservation. + reservation := d.Get("reservation").(string) + + tableRef := regexp.MustCompile("projects/(.+)/locations/(.+)/reservations/(.+)") + if parts := tableRef.FindStringSubmatch(reservation); parts != nil { + err := d.Set("location", parts[2]) + if err != nil { + return err + } + } + + if strings.Contains(url, "locations//") { + // re-compute url now that location must be set + url = strings.ReplaceAll(url, "/locations//", "/locations/"+d.Get("location").(string)+"/") + if err != nil { + return err + } + } + } diff --git a/tpgtools/overrides/bigqueryreservation/assignment.yaml b/tpgtools/overrides/bigqueryreservation/assignment.yaml deleted file mode 100644 index af851d79a0c9..000000000000 --- a/tpgtools/overrides/bigqueryreservation/assignment.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- type: CUSTOMIZE_DIFF - details: - functions: - - tpgresource.DefaultProviderProject diff --git a/tpgtools/overrides/bigqueryreservation/beta/assignment.yaml b/tpgtools/overrides/bigqueryreservation/beta/assignment.yaml deleted file mode 100644 index af851d79a0c9..000000000000 --- a/tpgtools/overrides/bigqueryreservation/beta/assignment.yaml +++ /dev/null @@ -1,4 +0,0 @@ -- type: CUSTOMIZE_DIFF - details: - functions: - - tpgresource.DefaultProviderProject diff --git a/tpgtools/overrides/bigqueryreservation/beta/tpgtools_product.yaml b/tpgtools/overrides/bigqueryreservation/beta/tpgtools_product.yaml deleted file mode 100644 index 94e8bdc9919a..000000000000 --- a/tpgtools/overrides/bigqueryreservation/beta/tpgtools_product.yaml +++ /dev/null @@ -1,9 +0,0 @@ -## product level overrides - -## Skip base path generation... already generated by magic modules -- type: PRODUCT_BASE_PATH - details: - skip: true -- type: PRODUCT_DOCS_SECTION - details: - docssection: BigQuery Reservation \ No newline at end of file diff --git a/tpgtools/overrides/bigqueryreservation/samples/assignment/basic.tf.tmpl b/tpgtools/overrides/bigqueryreservation/samples/assignment/basic.tf.tmpl deleted file mode 100644 index 3a59b5ccb669..000000000000 --- a/tpgtools/overrides/bigqueryreservation/samples/assignment/basic.tf.tmpl +++ /dev/null @@ -1,13 +0,0 @@ -resource "google_bigquery_reservation" "basic" { - name = "tf-test-my-reservation%{random_suffix}" - project = "{{project}}" - location = "us-central1" - slot_capacity = 0 - ignore_idle_slots = false -} - -resource "google_bigquery_reservation_assignment" "primary" { - assignee = "projects/{{project}}" - job_type = "PIPELINE" - reservation = google_bigquery_reservation.basic.id -} \ No newline at end of file diff --git a/tpgtools/overrides/bigqueryreservation/samples/assignment/basic.yaml b/tpgtools/overrides/bigqueryreservation/samples/assignment/basic.yaml deleted file mode 100644 index 0281b96065c4..000000000000 --- a/tpgtools/overrides/bigqueryreservation/samples/assignment/basic.yaml +++ /dev/null @@ -1,7 +0,0 @@ -updates: -variables: - - name: "project" - type: "project" - - name: "reservation" - type: "resource_name" - diff --git a/tpgtools/overrides/bigqueryreservation/samples/assignment/meta.yaml b/tpgtools/overrides/bigqueryreservation/samples/assignment/meta.yaml deleted file mode 100644 index 33eeb9b6691a..000000000000 --- a/tpgtools/overrides/bigqueryreservation/samples/assignment/meta.yaml +++ /dev/null @@ -1,3 +0,0 @@ - -ignore_read: - - "reservation" \ No newline at end of file diff --git a/tpgtools/overrides/bigqueryreservation/tpgtools_product.yaml b/tpgtools/overrides/bigqueryreservation/tpgtools_product.yaml deleted file mode 100644 index d909522fa961..000000000000 --- a/tpgtools/overrides/bigqueryreservation/tpgtools_product.yaml +++ /dev/null @@ -1,9 +0,0 @@ -## product level overrides - -## Skip base path generation... already generated by magic modules -- type: PRODUCT_BASE_PATH - details: - skip: true -- type: PRODUCT_DOCS_SECTION - details: - docssection: BigQuery Reservation \ No newline at end of file From 536bda8fdfcac33c0481785bfe41a33a3732c414 Mon Sep 17 00:00:00 2001 From: Jon Buckley Date: Thu, 27 Jun 2024 13:04:08 -0400 Subject: [PATCH 232/356] Logging bucket index_configs documentation fixes (#11048) --- .../r/logging_billing_account_bucket_config.html.markdown | 8 ++++---- .../docs/r/logging_folder_bucket_config.html.markdown | 8 ++++---- .../r/logging_organization_bucket_config.html.markdown | 8 ++++---- .../docs/r/logging_project_bucket_config.html.markdown | 8 ++++---- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/mmv1/third_party/terraform/website/docs/r/logging_billing_account_bucket_config.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_billing_account_bucket_config.html.markdown index 7f3e127e6298..2f481874d40f 100644 --- a/mmv1/third_party/terraform/website/docs/r/logging_billing_account_bucket_config.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/logging_billing_account_bucket_config.html.markdown @@ -36,9 +36,9 @@ resource "google_logging_billing_account_bucket_config" "example-billing-account retention_days = 30 bucket_id = "_Default" - index_configs = { - file_path = "jsonPayload.request.status" - type = "INDEX_TYPE_STRING" + index_configs { + field_path = "jsonPayload.request.status" + type = "INDEX_TYPE_STRING" } } ``` @@ -62,7 +62,7 @@ The following arguments are supported: The `index_configs` block supports: * `field_path` - The LogEntry field path to index. - Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation]( https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) for details. + Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. * `type` - The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. diff --git a/mmv1/third_party/terraform/website/docs/r/logging_folder_bucket_config.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_folder_bucket_config.html.markdown index e6dd1094a285..e73a8ebd95cf 100644 --- a/mmv1/third_party/terraform/website/docs/r/logging_folder_bucket_config.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/logging_folder_bucket_config.html.markdown @@ -26,9 +26,9 @@ resource "google_logging_folder_bucket_config" "basic" { retention_days = 30 bucket_id = "_Default" - index_configs = { - file_path = "jsonPayload.request.status" - type = "INDEX_TYPE_STRING" + index_configs { + field_path = "jsonPayload.request.status" + type = "INDEX_TYPE_STRING" } } ``` @@ -52,7 +52,7 @@ The following arguments are supported: The `index_configs` block supports: * `field_path` - The LogEntry field path to index. - Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation]( https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) for details. + Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. * `type` - The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. diff --git a/mmv1/third_party/terraform/website/docs/r/logging_organization_bucket_config.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_organization_bucket_config.html.markdown index 6dcefaa42ea9..9255da93ea42 100644 --- a/mmv1/third_party/terraform/website/docs/r/logging_organization_bucket_config.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/logging_organization_bucket_config.html.markdown @@ -25,9 +25,9 @@ resource "google_logging_organization_bucket_config" "basic" { retention_days = 30 bucket_id = "_Default" - index_configs = { - file_path = "jsonPayload.request.status" - type = "INDEX_TYPE_STRING" + index_configs { + field_path = "jsonPayload.request.status" + type = "INDEX_TYPE_STRING" } } ``` @@ -51,7 +51,7 @@ The following arguments are supported: The `index_configs` block supports: * `field_path` - The LogEntry field path to index. - Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation]( https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) for details. + Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. * `type` - The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. diff --git a/mmv1/third_party/terraform/website/docs/r/logging_project_bucket_config.html.markdown b/mmv1/third_party/terraform/website/docs/r/logging_project_bucket_config.html.markdown index a829b5c0628f..2e57e1993f01 100644 --- a/mmv1/third_party/terraform/website/docs/r/logging_project_bucket_config.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/logging_project_bucket_config.html.markdown @@ -102,9 +102,9 @@ resource "google_logging_project_bucket_config" "example-project-bucket-index-co retention_days = 30 bucket_id = "custom-bucket" - index_configs = { - file_path = "jsonPayload.request.status" - type = "INDEX_TYPE_STRING" + index_configs { + field_path = "jsonPayload.request.status" + type = "INDEX_TYPE_STRING" } } ``` @@ -156,7 +156,7 @@ See [Enabling CMEK for Logging Buckets](https://cloud.google.com/logging/docs/ro The `index_configs` block supports: * `field_path` - The LogEntry field path to index. -Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation]( https://cloud.google.com/logging/docs/view/advanced-queries#indexed-fields) for details. +Note that some paths are automatically indexed, and other paths are not eligible for indexing. See [indexing documentation](https://cloud.google.com/logging/docs/analyze/custom-index) for details. * `type` - The type of data in this index. Allowed types include `INDEX_TYPE_UNSPECIFIED`, `INDEX_TYPE_STRING` and `INDEX_TYPE_INTEGER`. From fe588b3874f6419190200d283d39f7387b46eb6b Mon Sep 17 00:00:00 2001 From: haiyanmeng Date: Thu, 27 Jun 2024 13:06:21 -0400 Subject: [PATCH 233/356] Add support for Config Sync enabled field in ConfigManagement Fleet-level default config (#11030) --- mmv1/products/gkehub2/Feature.yaml | 3 +++ .../services/gkehub2/resource_gke_hub_feature_test.go.erb | 5 +++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/mmv1/products/gkehub2/Feature.yaml b/mmv1/products/gkehub2/Feature.yaml index 0dd00735c910..86a76db27cfb 100644 --- a/mmv1/products/gkehub2/Feature.yaml +++ b/mmv1/products/gkehub2/Feature.yaml @@ -286,6 +286,9 @@ properties: - !ruby/object:Api::Type::String name: sourceFormat description: 'Specifies whether the Config Sync Repo is in hierarchical or unstructured mode' + - !ruby/object:Api::Type::Boolean + name: enabled + description: 'Enables the installation of ConfigSync. If set to true, ConfigSync resources will be created and the other ConfigSync fields will be applied if exist. If set to false, all other ConfigSync fields will be ignored, ConfigSync resources will be deleted. If omitted, ConfigSync resources will be managed depends on the presence of the git or oci field.' - !ruby/object:Api::Type::Boolean name: preventDrift description: 'Set to true to enable the Config Sync admission webhook to prevent drifts. If set to `false`, disables the Config Sync admission webhook and does not prevent drifts.' diff --git a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb index 990e5b3f93af..e75d70ca3813 100644 --- a/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb +++ b/mmv1/third_party/terraform/services/gkehub2/resource_gke_hub_feature_test.go.erb @@ -531,8 +531,9 @@ resource "google_gke_hub_feature" "feature" { fleet_default_member_config { configmanagement { version = "1.16.1" - config_sync { - prevent_drift = true + config_sync { + enabled = true + prevent_drift = true source_format = "unstructured" oci { sync_repo = "us-central1-docker.pkg.dev/corp-gke-build-artifacts/acm/configs:latest" From 4238773ea79ed8c1e8f3f8ee9831f5a1399a0ead Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Thu, 27 Jun 2024 20:08:58 +0100 Subject: [PATCH 234/356] Update Repository.yaml to fix rake error (#11070) --- .../securesourcemanager/Repository.yaml | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mmv1/products/securesourcemanager/Repository.yaml b/mmv1/products/securesourcemanager/Repository.yaml index 2f0382ca2ac5..5756b37c8d73 100644 --- a/mmv1/products/securesourcemanager/Repository.yaml +++ b/mmv1/products/securesourcemanager/Repository.yaml @@ -1,15 +1,15 @@ -# # Copyright 2024 Google Inc. -# # Licensed under the Apache License, Version 2.0 (the "License"); -# # you may not use this file except in compliance with the License. -# # You may obtain a copy of the License at -# # -# # http://www.apache.org/licenses/LICENSE-2.0 -# # -# # Unless required by applicable law or agreed to in writing, software -# # distributed under the License is distributed on an "AS IS" BASIS, -# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# # See the License for the specific language governing permissions and -# # limitations under the License. +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. --- !ruby/object:Api::Resource name: 'Repository' From 2f97bccc871a7034ec65c58c418956f4b60cb083 Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Thu, 27 Jun 2024 13:58:46 -0700 Subject: [PATCH 235/356] remove comment like #[START #[END in examples .tf.tmpl files (#11051) --- .../go/compute_packet_mirroring_full.tf.tmpl | 2 -- .../examples/go/compute_reservation.tf.tmpl | 2 -- .../go/dns_managed_zone_basic.tf.tmpl | 2 -- .../go/dns_managed_zone_private.tf.tmpl | 2 -- ...ns_managed_zone_private_forwarding.tf.tmpl | 2 -- .../go/dns_managed_zone_private_gke.tf.tmpl | 2 -- .../dns_managed_zone_private_peering.tf.tmpl | 2 -- .../go/dns_managed_zone_quickstart.tf.tmpl | 2 -- ...dns_managed_zone_service_directory.tf.tmpl | 2 -- .../examples/go/dns_policy_basic.tf.tmpl | 2 -- .../examples/go/dns_record_set_basic.tf.tmpl | 2 -- .../go/dns_response_policy_basic.tf.tmpl | 2 -- .../go/dns_response_policy_rule_basic.tf.tmpl | 2 -- ...xternal_cdn_lb_with_backend_bucket.tf.tmpl | 20 ------------- .../go/external_http_lb_mig_backend.tf.tmpl | 18 ------------ ..._http_lb_mig_backend_custom_header.tf.tmpl | 2 -- .../external_ssl_proxy_lb_mig_backend.tf.tmpl | 2 -- .../external_tcp_proxy_lb_mig_backend.tf.tmpl | 2 -- .../examples/go/external_vpn_gateway.tf.tmpl | 2 -- .../go/firewall_with_target_tags.tf.tmpl | 2 -- .../go/flask_google_cloud_quickstart.tf.tmpl | 8 ------ ..._internal_http_lb_with_mig_backend.tf.tmpl | 2 -- .../go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl | 2 -- .../go/instance_custom_hostname.tf.tmpl | 6 ++-- .../go/instance_settings_basic.tf.tmpl | 2 -- .../instance_virtual_display_enabled.tf.tmpl | 6 ++-- .../go/int_https_lb_https_redirect.tf.tmpl | 4 +-- .../internal_http_lb_with_mig_backend.tf.tmpl | 2 -- ...ternal_tcp_udp_lb_with_mig_backend.tf.tmpl | 4 +-- ..._custom_firewall_enforcement_order.tf.tmpl | 2 -- .../examples/go/network_custom_mtu.tf.tmpl | 2 -- ...gement_connectivity_test_addresses.tf.tmpl | 2 -- ...gement_connectivity_test_instances.tf.tmpl | 2 -- ..._services_lb_route_extension_basic.tf.tmpl | 4 --- ...ervices_lb_traffic_extension_basic.tf.tmpl | 3 -- ...rivate_service_connect_google_apis.tf.tmpl | 6 ---- .../go/privateca_capool_all_fields.tf.tmpl | 2 -- .../go/privateca_capool_basic.tf.tmpl | 2 -- ...vateca_certificate_authority_basic.tf.tmpl | 2 -- ...teca_certificate_authority_byo_key.tf.tmpl | 2 -- ...a_certificate_authority_custom_ski.tf.tmpl | 2 -- ..._certificate_authority_subordinate.tf.tmpl | 2 -- .../go/privateca_certificate_config.tf.tmpl | 4 +-- .../go/privateca_certificate_csr.tf.tmpl | 2 -- .../privateca_certificate_custom_ski.tf.tmpl | 2 -- ...privateca_certificate_no_authority.tf.tmpl | 2 -- ...rivateca_certificate_with_template.tf.tmpl | 2 -- .../examples/go/privateca_quickstart.tf.tmpl | 2 -- .../go/privateca_template_basic.tf.tmpl | 2 -- .../go/region_autoscaler_basic.tf.tmpl | 2 -- .../go/region_target_tcp_proxy_basic.tf.tmpl | 4 +-- ...region_url_map_path_template_match.tf.tmpl | 2 -- ...gional_external_http_load_balancer.tf.tmpl | 28 ------------------- .../examples/go/spot_instance_basic.tf.tmpl | 4 +-- .../examples/go/sql_database_basic.tf.tmpl | 2 -- .../go/sql_database_deletion_policy.tf.tmpl | 2 -- .../go/sql_database_instance_my_sql.tf.tmpl | 4 --- .../go/sql_database_instance_postgres.tf.tmpl | 4 --- .../sql_database_instance_sqlserver.tf.tmpl | 4 --- .../examples/go/sql_instance_cmek.tf.tmpl | 14 ---------- .../examples/go/sql_instance_ha.tf.tmpl | 6 ---- .../go/sql_instance_iam_condition.tf.tmpl | 2 -- .../examples/go/sql_instance_labels.tf.tmpl | 6 ---- .../examples/go/sql_instance_pitr.tf.tmpl | 4 --- .../examples/go/sql_instance_ssl_cert.tf.tmpl | 10 ------- ..._mysql_instance_authorized_network.tf.tmpl | 2 -- .../go/sql_mysql_instance_backup.tf.tmpl | 2 -- ...sql_mysql_instance_backup_location.tf.tmpl | 2 -- ...ql_mysql_instance_backup_retention.tf.tmpl | 2 -- .../go/sql_mysql_instance_clone.tf.tmpl | 4 --- .../go/sql_mysql_instance_flags.tf.tmpl | 2 -- .../go/sql_mysql_instance_public_ip.tf.tmpl | 2 -- .../go/sql_mysql_instance_pvp.tf.tmpl | 2 -- .../go/sql_mysql_instance_replica.tf.tmpl | 4 --- ...stgres_instance_authorized_network.tf.tmpl | 2 -- .../go/sql_postgres_instance_backup.tf.tmpl | 2 -- ..._postgres_instance_backup_location.tf.tmpl | 2 -- ...postgres_instance_backup_retention.tf.tmpl | 2 -- .../go/sql_postgres_instance_clone.tf.tmpl | 4 --- .../go/sql_postgres_instance_flags.tf.tmpl | 2 -- .../sql_postgres_instance_public_ip.tf.tmpl | 2 -- .../go/sql_postgres_instance_pvp.tf.tmpl | 2 -- .../go/sql_postgres_instance_replica.tf.tmpl | 4 --- ...server_instance_authorized_network.tf.tmpl | 2 -- .../go/sql_sqlserver_instance_backup.tf.tmpl | 2 -- ...sqlserver_instance_backup_location.tf.tmpl | 2 -- ...qlserver_instance_backup_retention.tf.tmpl | 2 -- .../go/sql_sqlserver_instance_clone.tf.tmpl | 4 --- .../go/sql_sqlserver_instance_flags.tf.tmpl | 2 -- .../sql_sqlserver_instance_public_ip.tf.tmpl | 2 -- .../go/sql_sqlserver_instance_replica.tf.tmpl | 4 --- .../go/sql_sqlserver_vm_instance.tf.tmpl | 4 --- .../examples/go/storage_hmac_key.tf.tmpl | 4 +-- .../go/storage_make_data_public.tf.tmpl | 2 -- .../examples/go/storage_new_bucket.tf.tmpl | 8 ------ .../storage_object_lifecycle_setting.tf.tmpl | 2 -- .../go/storage_pubsub_notifications.tf.tmpl | 2 -- .../go/storage_static_website.tf.tmpl | 6 ---- .../go/target_grpc_proxy_basic.tf.tmpl | 2 -- .../go/target_http_proxy_basic.tf.tmpl | 2 -- ...http_proxy_http_keep_alive_timeout.tf.tmpl | 2 -- .../target_http_proxy_https_redirect.tf.tmpl | 2 -- .../go/target_https_proxy_basic.tf.tmpl | 2 -- ...ttps_proxy_http_keep_alive_timeout.tf.tmpl | 2 -- .../go/target_https_proxy_mtls.tf.tmpl | 2 -- .../go/target_ssl_proxy_basic.tf.tmpl | 2 -- .../go/target_tcp_proxy_basic.tf.tmpl | 2 -- .../go/url_map_bucket_and_service.tf.tmpl | 2 -- .../go/url_map_header_based_routing.tf.tmpl | 2 -- .../url_map_parameter_based_routing.tf.tmpl | 2 -- .../go/url_map_path_template_match.tf.tmpl | 2 -- .../go/url_map_traffic_director_path.tf.tmpl | 2 -- ..._map_traffic_director_path_partial.tf.tmpl | 2 -- .../go/url_map_traffic_director_route.tf.tmpl | 2 -- ...map_traffic_director_route_partial.tf.tmpl | 2 -- 115 files changed, 10 insertions(+), 373 deletions(-) diff --git a/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl index 5cabd103b4cd..4a82a492c4cd 100644 --- a/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl @@ -54,7 +54,6 @@ resource "google_compute_forwarding_rule" "default" { network_tier = "PREMIUM" } -# [START compute_vm_packet_mirror] resource "google_compute_packet_mirroring" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mirroring_name"}}" description = "bar" @@ -76,4 +75,3 @@ resource "google_compute_packet_mirroring" "{{$.PrimaryResourceId}}" { direction = "BOTH" } } -# [END compute_vm_packet_mirror] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl index 802269430c8c..c98518869326 100644 --- a/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_reservation_create_local_reservation] resource "google_compute_reservation" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "gce_reservation_local"}}" @@ -17,4 +16,3 @@ resource "google_compute_reservation" "{{$.PrimaryResourceId}}" { } } -# [END compute_reservation_create_local_reservation] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl index 7f7a458ed13a..59fd73af0a75 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_basic] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "example-zone" dns_name = "example-${random_id.rnd.hex}.com." @@ -11,4 +10,3 @@ resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { resource "random_id" "rnd" { byte_length = 4 } -# [END dns_managed_zone_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl index 5c3a1784d3aa..2f48709d0e95 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -28,4 +27,3 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_private] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl index a8667c1d827d..0194bfb8f74f 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_forwarding] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -37,4 +36,3 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_private_forwarding] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl index bb062e78c832..6a4a3cc8d941 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_gke] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -67,4 +66,3 @@ resource "google_container_cluster" "cluster-1" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END dns_managed_zone_private_gke] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl index f8515d19c5b8..891a4512eadd 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_peering] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "peering.example.com." @@ -28,4 +27,3 @@ resource "google_compute_network" "network-target" { name = "{{index $.Vars "network_target_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_private_peering] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl index 6d0a5f6bebc2..580fe7096c3b 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_domain_tutorial] # to setup a web-server resource "google_compute_instance" "default" { name = "{{index $.Vars "dns_compute_instance"}}" @@ -53,4 +52,3 @@ resource "google_dns_record_set" "default" { google_compute_instance.default.network_interface.0.access_config.0.nat_ip ] } -# [END dns_domain_tutorial] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl index 5fc030a212d7..916242554cd1 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_service_directory] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { provider = google-beta @@ -28,4 +27,3 @@ resource "google_compute_network" "network" { name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_service_directory] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl index 7129c4992bfe..86ceb05b04d9 100644 --- a/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_policy_basic] resource "google_dns_policy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "policy_name"}}" enable_inbound_forwarding = true @@ -32,4 +31,3 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } -# [END dns_policy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl index 57e48b8efff9..4ac1dc34847f 100644 --- a/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_record_set_basic] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sample_zone"}}" dns_name = "{{index $.Vars "sample_zone"}}.hashicorptest.com." @@ -12,4 +11,3 @@ resource "google_dns_record_set" "default" { rrdatas = ["10.0.0.1", "10.1.0.1"] ttl = 86400 } -# [END dns_record_set_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl index ac7e4c75ef2f..ab8da5ec4b1c 100644 --- a/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_response_policy_basic] resource "google_compute_network" "network-1" { name = "{{index $.Vars "network_1_name"}}" auto_create_subnetworks = false @@ -69,4 +68,3 @@ resource "google_dns_response_policy" "{{$.PrimaryResourceId}}" { gke_cluster_name = google_container_cluster.cluster-1.id } } -# [END dns_response_policy_basic] diff --git a/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl index c187913e052c..7e8e38c88372 100644 --- a/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_response_policy_rule_basic] resource "google_compute_network" "network-1" { name = "{{index $.Vars "network_1_name"}}" auto_create_subnetworks = false @@ -35,4 +34,3 @@ resource "google_dns_response_policy_rule" "{{$.PrimaryResourceId}}" { } } -# [END dns_response_policy_rule_basic] diff --git a/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl b/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl index 1e8e402edba8..d0abe1648d10 100644 --- a/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl @@ -1,6 +1,5 @@ # CDN load balancer with Cloud bucket as backend -# [START cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] # Cloud Storage bucket resource "google_storage_bucket" "default" { name = "{{index $.Vars "my_bucket"}}" @@ -16,18 +15,14 @@ resource "google_storage_bucket" "default" { } } -# [END cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] -# [START cloudloadbalancing_cdn_with_backend_bucket_make_public] # make bucket public resource "google_storage_bucket_iam_member" "default" { bucket = google_storage_bucket.default.name role = "roles/storage.objectViewer" member = "allUsers" } -# [END cloudloadbalancing_cdn_with_backend_bucket_make_public] -# [START cloudloadbalancing_cdn_with_backend_bucket_index_page] resource "google_storage_bucket_object" "index_page" { name = "{{index $.Vars "index_page"}}" bucket = google_storage_bucket.default.name @@ -37,9 +32,7 @@ resource "google_storage_bucket_object" "index_page" { EOT } -# [END cloudloadbalancing_cdn_with_backend_bucket_index_page] -# [START cloudloadbalancing_cdn_with_backend_bucket_error_page] resource "google_storage_bucket_object" "error_page" { name = "{{index $.Vars "404_page"}}" bucket = google_storage_bucket.default.name @@ -49,9 +42,7 @@ resource "google_storage_bucket_object" "error_page" { EOT } -# [END cloudloadbalancing_cdn_with_backend_bucket_error_page] -# [START cloudloadbalancing_cdn_with_backend_bucket_image] # image object for testing, try to access http:///test.jpg resource "google_storage_bucket_object" "test_image" { name = "{{index $.Vars "test_object"}}" @@ -65,16 +56,12 @@ resource "google_storage_bucket_object" "test_image" { bucket = google_storage_bucket.default.name } -# [END cloudloadbalancing_cdn_with_backend_bucket_image] -# [START cloudloadbalancing_cdn_with_backend_bucket_ip_address] # reserve IP address resource "google_compute_global_address" "default" { name = "{{index $.Vars "example_ip"}}" } -# [END cloudloadbalancing_cdn_with_backend_bucket_ip_address] -# [START cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] # forwarding rule resource "google_compute_global_forwarding_rule" "default" { name = "{{index $.Vars "http_lb_forwarding_rule"}}" @@ -84,25 +71,19 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] -# [START cloudloadbalancing_cdn_with_backend_bucket_http_proxy] # http proxy resource "google_compute_target_http_proxy" "default" { name = "{{index $.Vars "http_lb_proxy"}}" url_map = google_compute_url_map.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_http_proxy] -# [START cloudloadbalancing_cdn_with_backend_bucket_url_map] # url map resource "google_compute_url_map" "default" { name = "{{index $.Vars "http_lb"}}" default_service = google_compute_backend_bucket.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_url_map] -# [START cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] # backend bucket with CDN policy with default ttl settings resource "google_compute_backend_bucket" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "cat_backend_bucket"}}" @@ -118,4 +99,3 @@ resource "google_compute_backend_bucket" "{{$.PrimaryResourceId}}" { serve_while_stale = 86400 } } -# [END cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl index 3e3c2ea58296..3e0de516a7e5 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # External HTTP load balancer with an CDN-enabled managed instance group backend -# [START cloudloadbalancing_ext_http_gce_instance_template] resource "google_compute_instance_template" "default" { name = "{{index $.Vars "lb_backend_template"}}" disk { @@ -37,9 +36,7 @@ resource "google_compute_instance_template" "default" { } tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_http_gce_instance_template] -# [START cloudloadbalancing_ext_http_gce_instance_mig] resource "google_compute_instance_group_manager" "default" { name = "{{index $.Vars "lb_backend_example"}}" zone = "us-east1-b" @@ -54,10 +51,8 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } -# [END cloudloadbalancing_ext_http_gce_instance_mig] -# [START cloudloadbalancing_ext_http_gce_instance_firewall_rule] resource "google_compute_firewall" "default" { name = "{{index $.Vars "fw_allow_health_check"}}" direction = "INGRESS" @@ -70,16 +65,12 @@ resource "google_compute_firewall" "default" { protocol = "tcp" } } -# [END cloudloadbalancing_ext_http_gce_instance_firewall_rule] -# [START cloudloadbalancing_ext_http_gce_instance_ip_address] resource "google_compute_global_address" "default" { name = "{{index $.Vars "lb_ipv4_1"}}" ip_version = "IPV4" } -# [END cloudloadbalancing_ext_http_gce_instance_ip_address] -# [START cloudloadbalancing_ext_http_gce_instance_health_check] resource "google_compute_health_check" "default" { name = "{{index $.Vars "http_basic_check"}}" check_interval_sec = 5 @@ -93,9 +84,7 @@ resource "google_compute_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } -# [END cloudloadbalancing_ext_http_gce_instance_health_check] -# [START cloudloadbalancing_ext_http_gce_instance_backend_service] resource "google_compute_backend_service" "default" { name = "{{index $.Vars "web_backend_service"}}" connection_draining_timeout_sec = 0 @@ -111,23 +100,17 @@ resource "google_compute_backend_service" "default" { capacity_scaler = 1.0 } } -# [END cloudloadbalancing_ext_http_gce_instance_backend_service] -# [START cloudloadbalancing_ext_http_gce_instance_url_map] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "web_map_http"}}" default_service = google_compute_backend_service.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_url_map] -# [START cloudloadbalancing_ext_http_gce_instance_target_http_proxy] resource "google_compute_target_http_proxy" "default" { name = "{{index $.Vars "http_lb_proxy"}}" url_map = google_compute_url_map.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_target_http_proxy] -# [START cloudloadbalancing_ext_http_gce_instance_forwarding_rule] resource "google_compute_global_forwarding_rule" "default" { name = "{{index $.Vars "http_content_rule"}}" ip_protocol = "TCP" @@ -136,4 +119,3 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_forwarding_rule] diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl index 1f81adddbce2..3929838d30e8 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl @@ -1,7 +1,6 @@ # External HTTP load balancer with a CDN-enabled managed instance group backend # and custom request and response headers -# [START cloudloadbalancing_ext_http_gce_custom_header] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "xlb_network_name"}}" @@ -154,4 +153,3 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_http_gce_custom_header] diff --git a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl index 19cd2b27e99a..b4e551c989ca 100644 --- a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # External SSL proxy load balancer with managed instance group backend -# [START cloudloadbalancing_ext_ssl_proxy_lb] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "ssl_proxy_xlb_network"}}" @@ -184,5 +183,4 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_ssl_proxy_lb] diff --git a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl index 08408558c900..dde8ee80f76b 100644 --- a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # External TCP proxy load balancer with managed instance group backend -# [START cloudloadbalancing_ext_tcp_proxy_lb] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "tcp_proxy_xlb_network"}}" @@ -142,4 +141,3 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_tcp_proxy_lb] diff --git a/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl b/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl index 0eae785631a7..9eeb7bf49af5 100644 --- a/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudvpn_ha_external] resource "google_compute_ha_vpn_gateway" "ha_gateway" { region = "us-central1" name = "{{index $.Vars "ha_vpn_gateway_name"}}" @@ -100,4 +99,3 @@ resource "google_compute_router_peer" "router1_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router1_interface2.name } -# [END cloudvpn_ha_external] diff --git a/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl b/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl index 748888614305..44b17b9699ed 100644 --- a/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl @@ -1,4 +1,3 @@ -# [START vpc_firewall_create] resource "google_compute_firewall" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "firewall_name"}}" @@ -13,4 +12,3 @@ resource "google_compute_firewall" "{{$.PrimaryResourceId}}" { source_tags = ["foo"] target_tags = ["web"] } -# [END vpc_firewall_create] diff --git a/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl index 7419704f987f..fc23157f4b37 100644 --- a/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_flask_quickstart_vm] # Create a single Compute Engine instance resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "flask_vm"}}" @@ -26,9 +25,7 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } } -# [END compute_flask_quickstart_vm] -# [START vpc_flask_quickstart_ssh_fw] resource "google_compute_firewall" "ssh" { name = "{{index $.Vars "allow_ssh"}}" allow { @@ -41,10 +38,8 @@ resource "google_compute_firewall" "ssh" { source_ranges = ["0.0.0.0/0"] target_tags = ["ssh"] } -# [END vpc_flask_quickstart_ssh_fw] -# [START vpc_flask_quickstart_5000_fw] resource "google_compute_firewall" "flask" { name = "{{index $.Vars "flask_app_firewall"}}" network = "default" @@ -55,12 +50,10 @@ resource "google_compute_firewall" "flask" { } source_ranges = ["0.0.0.0/0"] } -# [END vpc_flask_quickstart_5000_fw] # Create new multi-region storage bucket in the US # with versioning enabled -# [START storage_bucket_tf_with_versioning] resource "google_storage_bucket" "default" { name = "{{index $.Vars "bucket_tfstate"}}" force_destroy = false @@ -70,4 +63,3 @@ resource "google_storage_bucket" "default" { enabled = true } } -# [END storage_bucket_tf_with_versioning] diff --git a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl index e02dd389ad34..5a11a850cf9b 100644 --- a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # Global Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "gilb_network" { name = "{{index $.Vars "gilb_network_name"}}" @@ -182,4 +181,3 @@ resource "google_compute_instance" "vm-test" { } } } -# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl b/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl index cc7826b64ce9..b048fc49571a 100644 --- a/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudvpn_ha_gcp_to_gcp] resource "google_compute_ha_vpn_gateway" "{{$.PrimaryResourceId}}" { region = "us-central1" name = "{{index $.Vars "ha_vpn_gateway1_name"}}" @@ -178,4 +177,3 @@ resource "google_compute_router_peer" "router2_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router2_interface2.name } -# [END cloudvpn_ha_gcp_to_gcp] diff --git a/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl index b67c592a6cb0..297ed227261c 100644 --- a/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl @@ -1,13 +1,12 @@ -# [START compute_custom_hostname_instance_create] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "custom_hostname_instance_name"}}" machine_type = "f1-micro" zone = "us-central1-c" - # Set a custom hostname below + # Set a custom hostname below hostname = "hashicorptest.com" - + boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -21,4 +20,3 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } -# [END compute_custom_hostname_instance_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl index a05e7c8ab91d..3ef1792a394a 100644 --- a/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START instance_settings_basic] resource "google_compute_instance_settings" "{{$.PrimaryResourceId}}" { zone = "us-east7-b" @@ -9,4 +8,3 @@ resource "google_compute_instance_settings" "{{$.PrimaryResourceId}}" { } } -# [END instance_settings_basic] diff --git a/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl index a1ea2d5cbccb..49560ea75cda 100644 --- a/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl @@ -1,13 +1,12 @@ -# [START compute_instance_virtual_display_enabled] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "instance_virtual_display"}}" machine_type = "f1-micro" zone = "us-central1-c" - + # Set the below to true to enable virtual display enable_display = true - + boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -21,4 +20,3 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } -# [END compute_instance_virtual_display_enabled] diff --git a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl index 325ae36b4a6e..4e721cf60f77 100644 --- a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl @@ -1,6 +1,5 @@ # Internal HTTPS load balancer with HTTP-to-HTTPS redirect -# [START cloudloadbalancing_int_https_with_redirect] # VPC network resource "google_compute_network" "default" { @@ -88,7 +87,7 @@ resource "google_compute_region_ssl_certificate" "default" { name_prefix = "my-certificate-" private_key = tls_private_key.default.private_key_pem certificate = tls_self_signed_cert.default.cert_pem - region = "europe-west1" + region = "europe-west1" lifecycle { create_before_destroy = true } @@ -283,4 +282,3 @@ resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { } } } -# [END cloudloadbalancing_int_https_with_redirect] diff --git a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl index 17b0e5512661..d46b31a22a11 100644 --- a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -188,4 +187,3 @@ resource "google_compute_instance" "vm-test" { } } } -# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl index b6fb1079cf3b..9d028444c3f9 100644 --- a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # Internal TCP/UDP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_tcp_udp_gce] # VPC resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -129,7 +128,7 @@ resource "google_compute_firewall" "fw_hc" { target_tags = ["allow-health-check"] } -# allow communication within the subnet +# allow communication within the subnet resource "google_compute_firewall" "fw_ilb_to_backends" { name = "{{index $.Vars "fw_allow_ilb_to_backends_name"}}" provider = google-beta @@ -177,4 +176,3 @@ resource "google_compute_instance" "vm_test" { } } } -# [END cloudloadbalancing_int_tcp_udp_gce] diff --git a/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl b/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl index 622d27933c18..d621d2a084bf 100644 --- a/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl @@ -1,8 +1,6 @@ -# [START vpc_auto_create] resource "google_compute_network" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = true network_firewall_policy_enforcement_order = "BEFORE_CLASSIC_FIREWALL" } -# [END vpc_auto_create] diff --git a/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl b/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl index 3e3dc85f6d18..0a075376d536 100644 --- a/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl @@ -1,8 +1,6 @@ -# [START vpc_auto_create] resource "google_compute_network" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = true mtu = 1460 } -# [END vpc_auto_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl index 17f1a11e3825..aa9a5ce8ef3c 100644 --- a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl @@ -1,4 +1,3 @@ -# [START networkmanagement_test_addresses] resource "google_network_management_connectivity_test" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "primary_resource_name"}}" source { @@ -43,4 +42,3 @@ resource "google_compute_address" "dest-addr" { address = "10.0.43.43" region = "us-central1" } -# [END networkmanagement_test_addresses] diff --git a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl index 682cbbd3ff4e..1ab2b32cea92 100644 --- a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl @@ -1,4 +1,3 @@ -# [START networkmanagement_test_instances] resource "google_network_management_connectivity_test" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "primary_resource_name"}}" source { @@ -57,4 +56,3 @@ data "google_compute_image" "debian_9" { family = "debian-11" project = "debian-cloud" } -# [END networkmanagement_test_instances] diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl index b47eec932fa1..6dfe8c28adc8 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl @@ -1,5 +1,4 @@ # Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -188,9 +187,7 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } -# [END cloudloadbalancing_int_http_gce] -# [START lb_route_extension] resource "google_network_services_lb_route_extension" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "lb_route_extension_name"}}" description = "my route extension" @@ -349,4 +346,3 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } -# [END lb_route_extension] diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl index abc31633df6b..3b3238e80538 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl @@ -177,9 +177,7 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } -# [END cloudloadbalancing_int_http_gce] -# [START lb_traffic_extension] resource "google_network_services_lb_traffic_extension" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "lb_traffic_extension_name"}}" description = "my traffic extension" @@ -334,4 +332,3 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } -# [END lb_traffic_extension] diff --git a/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl b/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl index dc4e9fbca51e..7434666761aa 100644 --- a/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl @@ -1,4 +1,3 @@ -# [START vpc_subnet_private_access] resource "google_compute_network" "network" { provider = google-beta project = "{{index $.TestEnvVars "project"}}" @@ -15,9 +14,7 @@ resource "google_compute_subnetwork" "vpc_subnetwork" { network = google_compute_network.network.id private_ip_google_access = true } -# [END vpc_subnet_private_access] -# [START compute_internal_ip_private_access] resource "google_compute_global_address" "default" { provider = google-beta project = google_compute_network.network.project @@ -27,9 +24,7 @@ resource "google_compute_global_address" "default" { network = google_compute_network.network.id address = "100.100.100.106" } -# [END compute_internal_ip_private_access] -# [START compute_forwarding_rule_private_access] resource "google_compute_global_forwarding_rule" "default" { provider = google-beta project = google_compute_network.network.project @@ -43,4 +38,3 @@ resource "google_compute_global_forwarding_rule" "default" { service_directory_region = "europe-west3" } } -# [END compute_forwarding_rule_private_access] diff --git a/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl index f09904910df4..1fce3dc083ef 100644 --- a/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca_pool_all_fields] resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -88,4 +87,3 @@ resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { } } } -# [END privateca_create_ca_pool_all_fields] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl index d7a4806d9119..c5f76a407a83 100644 --- a/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca_pool] resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -11,4 +10,3 @@ resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { foo = "bar" } } -# [END privateca_create_ca_pool] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl index d3a96e88f282..82e718a57170 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca] resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -47,4 +46,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { algorithm = "RSA_PKCS1_4096_SHA256" } } -# [END privateca_create_ca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl index 2aae4fc5f1af..3179b700af9f 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca_byo_key] resource "google_project_service_identity" "privateca_sa" { service = "privateca.googleapis.com" } @@ -69,4 +68,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { google_kms_crypto_key_iam_member.privateca_sa_keyuser_viewer, ] } -# [END privateca_create_ca_byo_key] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl index 92deecf16d0b..6e898ab25e80 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca] resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -50,4 +49,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { cloud_kms_key_version = "{{index $.Vars "kms_key_name"}}/cryptoKeyVersions/1" } } -# [END privateca_create_ca] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl index 8a6ec536f0b1..f468ba2d74b3 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_subordinateca] resource "google_privateca_certificate_authority" "root-ca" { pool = "{{index $.Vars "pool_name"}}" certificate_authority_id = "{{index $.Vars "certificate_authority_id"}}-root" @@ -93,4 +92,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { } type = "SUBORDINATE" } -# [END privateca_create_subordinateca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl index c7eb1742e124..88e505e72f49 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_config] resource "google_privateca_ca_pool" "default" { location = "us-central1" @@ -61,7 +60,7 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { locality = "mountain view" province = "california" street_address = "1600 amphitheatre parkway" - } + } subject_alt_name { email_addresses = ["email@example.com"] ip_addresses = ["127.0.0.1"] @@ -99,4 +98,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { } } } -# [END privateca_create_certificate_config] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl index 74e268f42ba1..e2357245e0b8 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_csr] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -55,4 +54,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { lifetime = "860s" pem_csr = file("test-fixtures/rsa_csr.pem") } -# [END privateca_create_certificate_csr] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl index e760da42aede..81d37cec8816 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -90,4 +89,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } -# [END privateca_create_certificate] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl index 8e683242cda5..d09d2d1f7913 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -87,4 +86,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } -# [END privateca_create_certificate] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl index ee36989471a2..9d18151adbf7 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_template] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -131,4 +130,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { pem_csr = file("test-fixtures/rsa_csr.pem") certificate_template = google_privateca_certificate_template.default.id } -# [END privateca_create_certificate_template] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl index fa96c3119def..9f4328102c52 100644 --- a/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_quickstart] provider google{} provider tls{} @@ -94,4 +93,3 @@ resource "google_privateca_certificate" "default" { name = "{{index $.Vars "my_certificate"}}" pem_csr = tls_cert_request.example.cert_request_pem } -# [END privateca_quickstart] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl index 03909a72b673..0661224a55fa 100644 --- a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_template] resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -71,4 +70,3 @@ resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { label-one = "value-one" } } -# [END privateca_create_certificate_template] diff --git a/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl index dcb2d94988e1..0623a608d612 100644 --- a/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl @@ -14,7 +14,6 @@ resource "google_compute_region_autoscaler" "{{$.PrimaryResourceId}}" { } } -# [START compute_instance_template_basic] resource "google_compute_instance_template" "foobar" { name = "{{index $.Vars "instance_template_name"}}" machine_type = "e2-standard-4" @@ -46,7 +45,6 @@ resource "google_compute_instance_template" "foobar" { ] } } -# [END compute_instance_template_basic] resource "google_compute_target_pool" "foobar" { name = "{{index $.Vars "target_pool_name"}}" diff --git a/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl index fbdd40f1e5c9..d5d678b9a571 100644 --- a/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_region_target_tcp_proxy_basic] resource "google_compute_region_target_tcp_proxy" "default" { name = "{{index $.Vars "region_target_tcp_proxy_name"}}" region = "europe-west4" @@ -20,9 +19,8 @@ resource "google_compute_region_health_check" "default" { region = "europe-west4" timeout_sec = 1 check_interval_sec = 1 - + tcp_health_check { port = "80" } } -# [END cloudloadbalancing_region_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl b/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl index 0490b422924a..f313cfa20b62 100644 --- a/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { region = "us-central1" @@ -87,4 +86,3 @@ resource "google_compute_region_health_check" "default" { } } -# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl index 7c1bd95b0e04..0a3ff32857ad 100644 --- a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl @@ -1,14 +1,10 @@ -# [START cloudloadbalancing_rllxlb_example] -# [START cloudloadbalancing_vpc_network_rllxlb_example] resource "google_compute_network" "default" { name = "{{index $.Vars "lb_network"}}" auto_create_subnetworks = false routing_mode = "REGIONAL" } -# [END cloudloadbalancing_vpc_network_rllxlb_example] -# [START cloudloadbalancing_vpc_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "default" { name = "{{index $.Vars "backend_subnet"}}" ip_cidr_range = "10.1.2.0/24" @@ -18,9 +14,7 @@ resource "google_compute_subnetwork" "default" { region = "us-west1" stack_type = "IPV4_ONLY" } -# [END cloudloadbalancing_vpc_subnetwork_rllxlb_example] -# [START cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "proxy_only" { name = "{{index $.Vars "proxy_only_subnet"}}" ip_cidr_range = "10.129.0.0/23" @@ -29,9 +23,7 @@ resource "google_compute_subnetwork" "proxy_only" { region = "us-west1" role = "ACTIVE" } -# [END cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] -# [START cloudloadbalancing_health_firewall_rllxlb_example] resource "google_compute_firewall" "default" { name = "{{index $.Vars "fw_allow_health_check"}}" allow { @@ -43,9 +35,7 @@ resource "google_compute_firewall" "default" { source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] target_tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_health_firewall_rllxlb_example] -# [START cloudloadbalancing_proxy_firewall_rllxlb_example] resource "google_compute_firewall" "allow_proxy" { name = "{{index $.Vars "fw_allow_proxies"}}" allow { @@ -66,9 +56,7 @@ resource "google_compute_firewall" "allow_proxy" { source_ranges = ["10.129.0.0/23"] target_tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_proxy_firewall_rllxlb_example] -# [START cloudloadbalancing_instance_template_rllxlb_example] resource "google_compute_instance_template" "default" { name = "{{index $.Vars "l7_xlb_backend_template"}}" disk { @@ -106,9 +94,7 @@ resource "google_compute_instance_template" "default" { } tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_instance_template_rllxlb_example] -# [START cloudloadbalancing_instance_group_rllxlb_example] resource "google_compute_instance_group_manager" "default" { name = "{{index $.Vars "l7_xlb_backend_example"}}" zone = "us-west1-a" @@ -123,19 +109,15 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } -# [END cloudloadbalancing_instance_group_rllxlb_example] -# [START cloudloadbalancing_ip_address_rllxlb_example] resource "google_compute_address" "default" { name = "{{index $.Vars "address_name"}}" address_type = "EXTERNAL" network_tier = "STANDARD" region = "us-west1" } -# [END cloudloadbalancing_ip_address_rllxlb_example] -# [START cloudloadbalancing_health_check_rllxlb_example] resource "google_compute_region_health_check" "default" { name = "{{index $.Vars "l7_xlb_basic_check"}}" check_interval_sec = 5 @@ -149,9 +131,7 @@ resource "google_compute_region_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } -# [END cloudloadbalancing_health_check_rllxlb_example] -# [START cloudloadbalancing_backend_service_rllxlb_example] resource "google_compute_region_backend_service" "default" { name = "{{index $.Vars "l7_xlb_backend_service"}}" region = "us-west1" @@ -166,25 +146,19 @@ resource "google_compute_region_backend_service" "default" { capacity_scaler = 1.0 } } -# [END cloudloadbalancing_backend_service_rllxlb_example] -# [START cloudloadbalancing_url_map_rllxlb_example] resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "regional_l7_xlb_map"}}" region = "us-west1" default_service = google_compute_region_backend_service.default.id } -# [END cloudloadbalancing_url_map_rllxlb_example] -# [START cloudloadbalancing_target_http_proxy_rllxlb_example] resource "google_compute_region_target_http_proxy" "default" { name = "{{index $.Vars "l7_xlb_proxy"}}" region = "us-west1" url_map = google_compute_region_url_map.default.id } -# [END cloudloadbalancing_target_http_proxy_rllxlb_example] -# [START cloudloadbalancing_forwarding_rule_rllxlb_example] resource "google_compute_forwarding_rule" "default" { name = "l7-xlb-forwarding-rule" provider = google-beta @@ -199,6 +173,4 @@ resource "google_compute_forwarding_rule" "default" { ip_address = google_compute_address.default.address network_tier = "STANDARD" } -# [END cloudloadbalancing_forwarding_rule_rllxlb_example] -# [END cloudloadbalancing_rllxlb_example] diff --git a/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl index 18c695305bc0..9d96f30623f6 100644 --- a/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_spot_instance_create] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "spot_instance_name"}}" @@ -10,7 +9,7 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { image = "debian-cloud/debian-11" } } - + scheduling { preemptible = true automatic_restart = false @@ -26,4 +25,3 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } -# [END compute_spot_instance_create] diff --git a/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl index 9c1915799fa8..fa73d6564e15 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl @@ -1,9 +1,7 @@ -# [START cloud_sql_database_create] resource "google_sql_database" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_name"}}" instance = google_sql_database_instance.instance.name } -# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl index f0704eab298c..94ee726216e9 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl @@ -1,10 +1,8 @@ -# [START cloud_sql_database_create] resource "google_sql_database" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_name"}}" instance = google_sql_database_instance.instance.name deletion_policy = "ABANDON" } -# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl index 124e40ca8f2a..21a9c6082906 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_80_db_n1_s2] -# [START cloud_sql_mysql_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -27,4 +24,3 @@ resource "google_sql_user" "user" { enable_password_verification = true } } -# [END cloud_sql_mysql_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl index 2a41ff5a4017..ca0440f929fd 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_80_db_n1_s2] -# [START cloud_sql_postgres_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -22,4 +19,3 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } -# [END cloud_sql_postgres_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl index 370d70d28d3b..da41d5d0ade6 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" region = "us-central1" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_80_db_n1_s2] -# [START cloud_sql_sqlserver_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -22,4 +19,3 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } -# [END cloud_sql_sqlserver_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl index fa875cd2bf18..254ce6225bb3 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl @@ -1,28 +1,21 @@ -# [START cloud_sql_instance_service_identity] resource "google_project_service_identity" "gcp_sa_cloud_sql" { provider = google-beta service = "sqladmin.googleapis.com" } -# [END cloud_sql_instance_service_identity] -# [START cloud_sql_instance_keyring] resource "google_kms_key_ring" "keyring" { provider = google-beta name = "{{index $.Vars "keyring_name"}}" location = "us-central1" } -# [END cloud_sql_instance_keyring] -# [START cloud_sql_instance_key] resource "google_kms_crypto_key" "key" { provider = google-beta name = "{{index $.Vars "crypto_key_name"}}" key_ring = google_kms_key_ring.keyring.id purpose = "ENCRYPT_DECRYPT" } -# [END cloud_sql_instance_key] -# [START cloud_sql_instance_crypto_key] resource "google_kms_crypto_key_iam_member" "crypto_key" { provider = google-beta crypto_key_id = google_kms_crypto_key.key.id @@ -30,9 +23,7 @@ resource "google_kms_crypto_key_iam_member" "crypto_key" { member = "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}" } -# [END cloud_sql_instance_crypto_key] -# [START cloud_sql_mysql_instance_cmek] resource "google_sql_database_instance" "mysql_instance_with_cmek" { name = "{{index $.Vars "mysql_instance_cmek"}}" provider = google-beta @@ -44,9 +35,7 @@ resource "google_sql_database_instance" "mysql_instance_with_cmek" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_cmek] -# [START cloud_sql_postgres_instance_cmek] resource "google_sql_database_instance" "postgres_instance_with_cmek" { name = "{{index $.Vars "postgres_instance_cmek"}}" provider = google-beta @@ -58,9 +47,7 @@ resource "google_sql_database_instance" "postgres_instance_with_cmek" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_cmek] -# [START cloud_sql_sqlserver_instance_cmek] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_cmek"}}" provider = google-beta @@ -73,4 +60,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_cmek] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl index 6ac502c2ddd4..4c56199b3ed4 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_ha] resource "google_sql_database_instance" "mysql_instance_ha" { name = "{{index $.Vars "mysql_instance_ha"}}" region = "asia-northeast1" @@ -14,9 +13,7 @@ resource "google_sql_database_instance" "mysql_instance_ha" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_ha] -# [START cloud_sql_postgres_instance_ha] resource "google_sql_database_instance" "postgres_instance_ha" { name = "{{index $.Vars "postgres_instance_ha"}}" region = "us-central1" @@ -32,9 +29,7 @@ resource "google_sql_database_instance" "postgres_instance_ha" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_ha] -# [START cloud_sql_sqlserver_instance_ha] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_ha"}}" region = "us-central1" @@ -50,4 +45,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_ha] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl index 9e80323369b8..b95a271bf5e6 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl @@ -6,7 +6,6 @@ resource "google_project_service_identity" "gcp_sa_cloud_sql" { service = "sqladmin.googleapis.com" } -# [START cloud_sql_instance_iam_conditions] data "google_iam_policy" "sql_iam_policy" { binding { role = "roles/cloudsql.client" @@ -25,7 +24,6 @@ resource "google_project_iam_policy" "project" { project = data.google_project.project.id policy_data = data.google_iam_policy.sql_iam_policy.policy_data } -# [END cloud_sql_instance_iam_conditions] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_iam_condition"}}" diff --git a/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl index 06a6bbc81a24..9fa98a22dad8 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_labels] resource "google_sql_database_instance" "mysql_instance_labels" { name = "{{index $.Vars "mysql_instance_labels"}}" region = "us-central1" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "mysql_instance_labels" { } deletion_protection = "false" } -# [END cloud_sql_mysql_instance_labels] -# [START cloud_sql_postgres_instance_labels] resource "google_sql_database_instance" "postgres_instance_labels" { name = "{{index $.Vars "postgres_instance_labels"}}" region = "us-central1" @@ -28,9 +25,7 @@ resource "google_sql_database_instance" "postgres_instance_labels" { } deletion_protection = "false" } -# [END cloud_sql_postgres_instance_labels] -# [START cloud_sql_sqlserver_instance_labels] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_labels"}}" region = "us-central1" @@ -45,4 +40,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "false" } -# [END cloud_sql_sqlserver_instance_labels] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl index a06e64956bf6..3739b999506e 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_pitr] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_pitr"}}" region = "asia-northeast1" @@ -14,9 +13,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_pitr] -# [START cloud_sql_postgres_instance_pitr] resource "google_sql_database_instance" "postgres_instance_pitr" { name = "{{index $.Vars "postgres_instance__pitr"}}" region = "us-central1" @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "postgres_instance_pitr" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_pitr] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl index e976fd8c142a..0ac9a24a3c81 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_require_ssl] resource "google_sql_database_instance" "mysql_instance" { name = "{{index $.Vars "mysql_instance"}}" region = "asia-northeast1" @@ -11,16 +10,12 @@ resource "google_sql_database_instance" "mysql_instance" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_require_ssl] -# [START cloud_sql_mysql_instance_ssl_cert] resource "google_sql_ssl_cert" "mysql_client_cert" { common_name = "mysql_common_name" instance = google_sql_database_instance.mysql_instance.name } -# [END cloud_sql_mysql_instance_ssl_cert] -# [START cloud_sql_postgres_instance_require_ssl] resource "google_sql_database_instance" "postgres_instance" { name = "{{index $.Vars "postgres_instance"}}" region = "asia-northeast1" @@ -33,16 +28,12 @@ resource "google_sql_database_instance" "postgres_instance" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_require_ssl] -# [START cloud_sql_postgres_instance_ssl_cert] resource "google_sql_ssl_cert" "postgres_client_cert" { common_name = "postgres_common_name" instance = google_sql_database_instance.postgres_instance.name } -# [END cloud_sql_postgres_instance_ssl_cert] -# [START cloud_sql_sqlserver_instance_require_ssl] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance"}}" region = "asia-northeast1" @@ -56,4 +47,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_require_ssl] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl index 2e6490cd6fe4..1cd16f0e408b 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_with_authorized_network"}}" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl index 67dfdc4fad50..fab77d0d506d 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup"}}" region = "asia-northeast1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl index 916d12bf09ac..96e138c64c2d 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup_location"}}" region = "asia-northeast1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl index 57e630c34e2f..ee7a1d41c254 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup_retention"}}" region = "asia-northeast1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl index 5ea986116ac5..134192af0268 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "mysql_instance_source_name"}}" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_source] -# [START cloud_sql_mysql_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_clone_name"}}" region = "us-central1" @@ -20,4 +17,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl index a2421e78283e..188242db640d 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "MYSQL_8_0" name = "{{index $.Vars "mysql_instance"}}" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl index 8165b86cd381..93d0efe450fa 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "MYSQL_5_7" name = "{{index $.Vars "mysql_public_ip_instance_name"}}" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl index de0fd811c86b..feef53e47ab8 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_pvp] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_pvp_instance_name"}}" region = "asia-northeast1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_pvp] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl index 6b36e0c5c05a..9664d733332e 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "mysql_primary_instance_name"}}" region = "europe-west4" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_primary] -# [START cloud_sql_mysql_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl index 77bd979fa7d1..1441006a0b88 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_with_authorized_network"}}" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl index be494aa10a38..94043bb1c485 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup"}}" region = "us-central1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl index 523fe2128d10..93476520866b 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup_location"}}" region = "us-central1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl index 5851ba5db1b5..b797a1f8e699 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup_retention"}}" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl index d0a68e4b6c9c..fd519d57b23e 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "postgres_instance_source_name"}}" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_source] -# [START cloud_sql_postgres_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_clone_name"}}" region = "us-central1" @@ -20,4 +17,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl index eec472895808..eed5805b0096 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance"}}" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl index 8fbb18db8910..4ab67a49c708 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "POSTGRES_14" name = "{{index $.Vars "postgres_public_ip_instance_name"}}" @@ -19,4 +18,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl index d9036765b51c..5181bd859d46 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_pvp] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_pvp_instance_name"}}" region = "asia-northeast1" @@ -17,4 +16,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_pvp] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl index 8f205eb48d40..180fa60611e5 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "postgres_primary_instance_name"}}" region = "europe-west4" @@ -11,9 +10,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_primary] -# [START cloud_sql_postgres_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -31,4 +28,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl index bb36f15ae286..6cb660a5dfa9 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_with_authorized_network"}}" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl index 80b315b42fe3..ea04446a312c 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup"}}" region = "us-central1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl index 46cbacab1c67..ba2d19cc8eed 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup_location"}}" region = "us-central1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl index 0898f0083008..bcf2751fbac2 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup_retention"}}" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl index 121529a7065d..9122adc7ec14 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "sqlserver_instance_source_name"}}" region = "us-central1" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_source] -# [START cloud_sql_sqlserver_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_clone_name"}}" region = "us-central1" @@ -22,4 +19,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl index 559ea293afb1..31e878f1a9fc 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance"}}" region = "us-central1" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl index 789aba01520a..86b8090be47a 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_public_ip_instance_name"}}" region = "europe-west4" @@ -20,4 +19,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl index 88d0acc1e8d9..93c26ef17944 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "sqlserver_primary_instance_name"}}" region = "europe-west4" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_primary] -# [START cloud_sql_sqlserver_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl index 24ed99f68177..c1d7a1e1d5fa 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl @@ -14,7 +14,6 @@ resource "google_compute_subnetwork" "default" { network = google_compute_network.default.id } -# [START cloud_sql_sqlserver_vm_instance] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { provider = google-beta name = "{{index $.Vars "sqlserver_vm"}}" @@ -39,9 +38,7 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { subnetwork = google_compute_subnetwork.default.id } } -# [END cloud_sql_sqlserver_vm_instance] -# [START cloud_sql_sqlserver_vm_firewall_rule] resource "google_compute_firewall" "sql_server_1433" { provider = google-beta name = "{{index $.Vars "sql_server_1433_3"}}" @@ -55,4 +52,3 @@ resource "google_compute_firewall" "sql_server_1433" { priority = 1000 source_ranges = ["0.0.0.0/0"] } -# [END cloud_sql_sqlserver_vm_firewall_rule] diff --git a/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl index 5d248640da3c..152b8b675a03 100644 --- a/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl @@ -1,11 +1,9 @@ -# [START storage_hmac_key] # Create a new service account resource "google_service_account" "service_account" { account_id = "{{index $.Vars "account_id"}}" } -#Create the HMAC key for the associated service account +#Create the HMAC key for the associated service account resource "google_storage_hmac_key" "{{$.PrimaryResourceId}}" { service_account_email = google_service_account.service_account.email } -# [END storage_hmac_key] diff --git a/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl index c020cc71de31..edc5c07db1f5 100644 --- a/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl @@ -5,7 +5,6 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { uniform_bucket_level_access = true } -# [START storage_make_data_public] # Make bucket public resource "google_storage_bucket_iam_member" "member" { provider = google-beta @@ -13,4 +12,3 @@ resource "google_storage_bucket_iam_member" "member" { role = "roles/storage.objectViewer" member = "allUsers" } -# [END storage_make_data_public] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl index 0e52b1e699a8..b6557318d04c 100644 --- a/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_create_new_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { @@ -8,9 +7,7 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { uniform_bucket_level_access = true } -# [END storage_create_new_bucket_tf] -# [START storage_upload_object_tf] # Upload files # Discussion about using tf to upload a large number of objects # https://stackoverflow.com/questions/68455132/terraform-copy-multiple-files-to-bucket-at-the-same-time-bucket-creation @@ -24,9 +21,7 @@ resource "google_storage_bucket_object" "default" { content_type = "text/plain" bucket = google_storage_bucket.static.id } -# [END storage_upload_object_tf] -# [START storage_get_object_metadata_tf] # Get object metadata data "google_storage_bucket_object" "default" { name = google_storage_bucket_object.default.name @@ -36,9 +31,7 @@ data "google_storage_bucket_object" "default" { output "object_metadata" { value = data.google_storage_bucket_object.default } -# [END storage_get_object_metadata_tf] -# [START storage_get_bucket_metadata_tf] # Get bucket metadata data "google_storage_bucket" "default" { name = google_storage_bucket.static.id @@ -47,5 +40,4 @@ data "google_storage_bucket" "default" { output "bucket_metadata" { value = data.google_storage_bucket.default } -# [END storage_get_bucket_metadata_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl index b8fd64b062e1..3beadc46890e 100644 --- a/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_create_lifecycle_setting_tf] resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { provider = google-beta name = "{{index $.Vars "example_bucket"}}" @@ -14,4 +13,3 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { } } } -# [END storage_create_lifecycle_setting_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl index e586cbbb59d6..b73d62d3826c 100644 --- a/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_create_pubsub_notifications_tf] // Create a Pub/Sub notification. resource "google_storage_notification" "notification" { provider = google-beta @@ -33,4 +32,3 @@ resource "google_pubsub_topic" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "your_topic_name"}}" provider = google-beta } -# [END storage_create_pubsub_notifications_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl index 826b69285cca..6f7fa483a2b0 100644 --- a/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_static_website_create_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage and settings for main_page_suffix and not_found_page resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { @@ -10,18 +9,14 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { not_found_page = "{{index $.Vars "main_page_suffix"}}" } } -# [END storage_static_website_create_bucket_tf] -# [START storage_static_website_make_bucket_public_tf] # Make bucket public by granting allUsers READER access resource "google_storage_bucket_access_control" "public_rule" { bucket = google_storage_bucket.static_website.id role = "READER" entity = "allUsers" } -# [END storage_static_website_make_bucket_public_tf] -# [START storage_static_website_upload_files_tf] # Upload a simple index.html page to the bucket resource "google_storage_bucket_object" "indexpage" { name = "{{index $.Vars "main_page_suffix"}}" @@ -37,4 +32,3 @@ resource "google_storage_bucket_object" "errorpage" { content_type = "text/html" bucket = google_storage_bucket.static_website.id } -# [END storage_static_website_upload_files_tf] diff --git a/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl index 99596d6b90d3..900565b2a1d1 100644 --- a/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_grpc_proxy_basic] resource "google_compute_target_grpc_proxy" "default" { name = "{{index $.Vars "proxy_name"}}" url_map = google_compute_url_map.urlmap.id @@ -86,4 +85,3 @@ resource "google_compute_health_check" "default" { grpc_service_name = "testservice" } } -# [END cloudloadbalancing_target_grpc_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl index 5e5b374f711c..dcce57816ab9 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_basic] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -39,4 +38,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_http_proxy_basic] diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl index 875d95a242f9..9927dcd35ffb 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" http_keep_alive_timeout_sec = 610 @@ -41,4 +40,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl index e07e79da166d..9b25f866cf5d 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_https_redirect] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -11,4 +10,3 @@ resource "google_compute_url_map" "default" { strip_query = false } } -# [END cloudloadbalancing_target_http_proxy_https_redirect] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl index 5cfab5833370..087268c78f28 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_basic] resource "google_compute_target_https_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_https_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -48,4 +47,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_basic] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl index fe52d17cb089..075c573196f7 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] resource "google_compute_target_https_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_https_proxy_name"}}" http_keep_alive_timeout_sec = 610 @@ -50,4 +49,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl index 08f749b2793c..11c28dfea258 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_mtls] data "google_project" "project" { provider = google-beta } @@ -90,4 +89,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_mtls] diff --git a/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl index 4167a0c8b1bc..7533c8ab2c1a 100644 --- a/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_ssl_proxy_basic] resource "google_compute_target_ssl_proxy" "default" { name = "{{index $.Vars "target_ssl_proxy_name"}}" backend_service = google_compute_backend_service.default.id @@ -25,4 +24,3 @@ resource "google_compute_health_check" "default" { port = "443" } } -# [END cloudloadbalancing_target_ssl_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl index 2c7689ab33c6..ebd96724985b 100644 --- a/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_tcp_proxy_basic] resource "google_compute_target_tcp_proxy" "default" { name = "{{index $.Vars "target_tcp_proxy_name"}}" backend_service = google_compute_backend_service.default.id @@ -21,4 +20,3 @@ resource "google_compute_health_check" "default" { port = "443" } } -# [END cloudloadbalancing_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl index f8b9d8efdb9a..42c461403bf6 100644 --- a/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_bucket_and_service] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -73,4 +72,3 @@ resource "google_storage_bucket" "static" { name = "{{index $.Vars "storage_bucket_name"}}" location = "US" } -# [END cloudloadbalancing_url_map_bucket_and_service] diff --git a/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl index 432b692d479d..7d58d12765c4 100644 --- a/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_header_based_routing] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "header-based routing example" @@ -73,4 +72,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END trafficdirector_url_map_header_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl index 02d79ef69565..5b9994af38f9 100644 --- a/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_parameter_based_routing] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "parameter-based routing example" @@ -73,4 +72,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END trafficdirector_url_map_parameter_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl index 17dc901c7517..74fd0795d039 100644 --- a/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -74,4 +73,3 @@ resource "google_storage_bucket" "static" { name = "{{index $.Vars "storage_bucket_name"}}" location = "US" } -# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl index 72e92f8fb516..f401ca61a072 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_path] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -102,4 +101,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_path] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl index ad2340dc3cc3..b35f1cca09fd 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_path_partial] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -72,4 +71,3 @@ resource "google_compute_health_check" "default" { } } -# [END trafficdirector_url_map_traffic_director_path_partial] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl index a711552980c2..9f75e5b169de 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_route] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -82,4 +81,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_route] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl index c50a260f71ab..1d1ea0c74ffd 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_route_partial] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -53,4 +52,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_route_partial] \ No newline at end of file From 92f7c92917a2648e11c6002bfbe640f2f20afdb6 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Thu, 27 Jun 2024 13:59:07 -0700 Subject: [PATCH 236/356] Make downstream builder no-op when commit already built (#10647) --- .ci/gcb-push-downstream.yml | 8 ++++---- .ci/magician/cmd/generate_downstream.go | 18 ++++++++++++++++++ .ci/magician/cmd/sync_branch.go | 5 +++++ .ci/magician/cmd/wait_for_commit.go | 5 +++-- 4 files changed, 30 insertions(+), 6 deletions(-) diff --git a/.ci/gcb-push-downstream.yml b/.ci/gcb-push-downstream.yml index c2081f601a6f..f03700d5d44a 100644 --- a/.ci/gcb-push-downstream.yml +++ b/.ci/gcb-push-downstream.yml @@ -35,7 +35,7 @@ steps: id: tpg-sync waitFor: ["build-magician-binary"] args: - - wait-for-commit + - 'wait-for-commit' - 'tpg-sync' - $BRANCH_NAME - $COMMIT_SHA @@ -46,7 +46,7 @@ steps: id: tpgb-sync waitFor: ["build-magician-binary"] args: - - wait-for-commit + - 'wait-for-commit' - 'tpgb-sync' - $BRANCH_NAME - $COMMIT_SHA @@ -57,7 +57,7 @@ steps: id: tgc-sync waitFor: ["build-magician-binary"] args: - - wait-for-commit + - 'wait-for-commit' - 'tgc-sync' - $BRANCH_NAME - $COMMIT_SHA @@ -68,7 +68,7 @@ steps: id: tf-oics-sync waitFor: ["build-magician-binary"] args: - - wait-for-commit + - 'wait-for-commit' - 'tf-oics-sync' - $BRANCH_NAME - $COMMIT_SHA diff --git a/.ci/magician/cmd/generate_downstream.go b/.ci/magician/cmd/generate_downstream.go index 0cdc8526c682..fd28cf0978db 100644 --- a/.ci/magician/cmd/generate_downstream.go +++ b/.ci/magician/cmd/generate_downstream.go @@ -98,6 +98,24 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G baseBranch = "main" } + var syncBranchPrefix string + if repo == "terraform" { + if version == "beta" { + syncBranchPrefix = "tpgb-sync" + } else if version == "ga" { + syncBranchPrefix = "tpg-sync" + } + } else if repo == "terraform-google-conversion" { + syncBranchPrefix = "tgc-sync" + } else if repo == "tf-oics" { + syncBranchPrefix = "tf-oics-sync" + } + syncBranch := getSyncBranch(syncBranchPrefix, baseBranch) + if syncBranchHasCommit(ref, syncBranch, rnr) { + fmt.Printf("Sync branch %s already has commit %s, skipping generation\n", syncBranch, ref) + os.Exit(0) + } + mmLocalPath := filepath.Join(rnr.GetCWD(), "..", "..") mmCopyPath := filepath.Join(mmLocalPath, "..", fmt.Sprintf("mm-%s-%s-%s", repo, version, command)) if _, err := rnr.Run("cp", []string{"-rp", mmLocalPath, mmCopyPath}, nil); err != nil { diff --git a/.ci/magician/cmd/sync_branch.go b/.ci/magician/cmd/sync_branch.go index 7722cc4f4c83..d3950b268235 100644 --- a/.ci/magician/cmd/sync_branch.go +++ b/.ci/magician/cmd/sync_branch.go @@ -58,6 +58,11 @@ func execSyncBranchCmd(syncBranchPrefix, baseBranch, sha, githubToken string, ru syncBranch := getSyncBranch(syncBranchPrefix, baseBranch) fmt.Println("SYNC_BRANCH: ", syncBranch) + if syncBranchHasCommit(sha, syncBranch, runner) { + fmt.Printf("Commit %s already in sync branch %s, skipping sync\n", sha, syncBranch) + return nil + } + _, err := runner.Run("git", []string{"push", fmt.Sprintf("https://modular-magician:%s@github.com/GoogleCloudPlatform/magic-modules", githubToken), fmt.Sprintf("%s:%s", sha, syncBranch)}, nil) return err } diff --git a/.ci/magician/cmd/wait_for_commit.go b/.ci/magician/cmd/wait_for_commit.go index 2059885e35c8..ecf3a2a7ebb4 100644 --- a/.ci/magician/cmd/wait_for_commit.go +++ b/.ci/magician/cmd/wait_for_commit.go @@ -48,7 +48,8 @@ func execWaitForCommit(syncBranchPrefix, baseBranch, sha string, runner source.R fmt.Println("SYNC_BRANCH: ", syncBranch) if syncBranchHasCommit(sha, syncBranch, runner) { - return fmt.Errorf("found %s in history of %s - dying to avoid double-generating that commit", sha, syncBranch) + fmt.Printf("found %s in history of %s - skipping wait\n", sha, syncBranch) + return nil } for { @@ -68,7 +69,7 @@ func execWaitForCommit(syncBranchPrefix, baseBranch, sha string, runner source.R } fmt.Println("sync branch is at: ", syncHead) fmt.Println("current commit is: ", sha) - + if _, err := runner.Run("git", []string{"fetch", "origin", syncBranch}, nil); err != nil { return err } From 0b9e2683dd1750ec04a869d4ffe54ff7fe0ad292 Mon Sep 17 00:00:00 2001 From: Hamza Hassan <43001514+Hamzawy63@users.noreply.github.com> Date: Thu, 27 Jun 2024 23:01:18 +0200 Subject: [PATCH 237/356] Add allowlistedCertificates field to TrustConfig (#10906) Co-authored-by: Hamza Hassan --- .../certificatemanager/TrustConfig.yaml | 20 ++++++++++++++++--- ...ust_config_allowlisted_certificates.tf.erb | 16 +++++++++++++++ ...e_certificate_manager_trust_config_test.go | 12 +++++++++-- 3 files changed, 43 insertions(+), 5 deletions(-) create mode 100644 mmv1/templates/terraform/examples/certificate_manager_trust_config_allowlisted_certificates.tf.erb diff --git a/mmv1/products/certificatemanager/TrustConfig.yaml b/mmv1/products/certificatemanager/TrustConfig.yaml index d7395fa22948..414c639bf9da 100644 --- a/mmv1/products/certificatemanager/TrustConfig.yaml +++ b/mmv1/products/certificatemanager/TrustConfig.yaml @@ -50,8 +50,11 @@ examples: primary_resource_id: 'default' vars: trust_config_name: 'trust-config' -custom_code: !ruby/object:Provider::Terraform::CustomCode - pre_update: templates/terraform/pre_update/certificate_manager_trust_config.go.erb + - !ruby/object:Provider::Terraform::Examples + name: 'certificate_manager_trust_config_allowlisted_certificates' + primary_resource_id: 'default' + vars: + trust_config_name: 'trust-config' parameters: - !ruby/object:Api::Type::String name: 'name' @@ -87,7 +90,6 @@ properties: - !ruby/object:Api::Type::KeyValueLabels name: 'labels' description: 'Set of label tags associated with the trust config.' - immutable: true - !ruby/object:Api::Type::String name: 'description' description: | @@ -124,3 +126,15 @@ properties: PEM intermediate certificate used for building up paths for validation. Each certificate provided in PEM format may occupy up to 5kB. sensitive: true + - !ruby/object:Api::Type::Array + name: allowlistedCertificates + description: | + Allowlisted PEM-encoded certificates. A certificate matching an allowlisted certificate is always considered valid as long as + the certificate is parseable, proof of private key possession is established, and constraints on the certificate's SAN field are met. + item_type: !ruby/object:Api::Type::NestedObject + properties: + - !ruby/object:Api::Type::String + name: 'pemCertificate' + description: | + PEM certificate that is allowlisted. The certificate can be up to 5k bytes, and must be a parseable X.509 certificate. + required: true diff --git a/mmv1/templates/terraform/examples/certificate_manager_trust_config_allowlisted_certificates.tf.erb b/mmv1/templates/terraform/examples/certificate_manager_trust_config_allowlisted_certificates.tf.erb new file mode 100644 index 000000000000..5b9890dcf018 --- /dev/null +++ b/mmv1/templates/terraform/examples/certificate_manager_trust_config_allowlisted_certificates.tf.erb @@ -0,0 +1,16 @@ +resource "google_certificate_manager_trust_config" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]["trust_config_name"] %>" + description = "A sample trust config resource with allowlisted certificates" + location = "global" + + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert2.pem") + } + + labels = { + foo = "bar" + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/certificatemanager/resource_certificate_manager_trust_config_test.go b/mmv1/third_party/terraform/services/certificatemanager/resource_certificate_manager_trust_config_test.go index b84cbb0530f6..d976cb10fa39 100644 --- a/mmv1/third_party/terraform/services/certificatemanager/resource_certificate_manager_trust_config_test.go +++ b/mmv1/third_party/terraform/services/certificatemanager/resource_certificate_manager_trust_config_test.go @@ -46,7 +46,7 @@ func testAccCertificateManagerTrustConfig_update0(context map[string]interface{} resource "google_certificate_manager_trust_config" "default" { name = "tf-test-trust-config%{random_suffix}" description = "sample description for the trust config" - location = "us-central1" + location = "global" trust_stores { trust_anchors { @@ -57,6 +57,10 @@ resource "google_certificate_manager_trust_config" "default" { } } + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } + labels = { "foo" = "bar" } @@ -69,7 +73,7 @@ func testAccCertificateManagerTrustConfig_update1(context map[string]interface{} resource "google_certificate_manager_trust_config" "default" { name = "tf-test-trust-config%{random_suffix}" description = "sample description for the trust config 2" - location = "us-central1" + location = "global" trust_stores { trust_anchors { @@ -80,6 +84,10 @@ resource "google_certificate_manager_trust_config" "default" { } } + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } + labels = { "bar" = "foo" } From 19fa1816cf373f84daa252027cf7e10855f37b14 Mon Sep 17 00:00:00 2001 From: Tejal Desai Date: Thu, 27 Jun 2024 14:15:02 -0700 Subject: [PATCH 238/356] feat: added new field vm_tags to the workstation config (#11015) --- .../workstations/WorkstationConfig.yaml | 11 +++ .../examples/workstation_config_basic.tf.erb | 21 +++++ ...rkstations_workstation_config_test.go.tmpl | 86 ++++++++++++++++++ ...orkstations_workstation_config_test.go.erb | 87 +++++++++++++++++++ 4 files changed, 205 insertions(+) diff --git a/mmv1/products/workstations/WorkstationConfig.yaml b/mmv1/products/workstations/WorkstationConfig.yaml index ff575c7801f5..83086c194d3b 100644 --- a/mmv1/products/workstations/WorkstationConfig.yaml +++ b/mmv1/products/workstations/WorkstationConfig.yaml @@ -73,6 +73,8 @@ examples: vars: workstation_cluster_name: 'workstation-cluster' workstation_config_name: 'workstation-config' + tag_key1: 'tag_key1' + tag_value1: 'tag_value1' - !ruby/object:Provider::Terraform::Examples name: 'workstation_config_container' min_version: beta @@ -222,6 +224,7 @@ properties: - 'host.gceInstance.accelerators' - 'host.gceInstance.boostConfigs' - 'host.gceInstance.disableSsh' + - 'host.gceInstance.vmTags' properties: - !ruby/object:Api::Type::NestedObject name: 'gceInstance' @@ -376,6 +379,14 @@ properties: description: | Number of accelerator cards exposed to the instance. required: true + - !ruby/object:Api::Type::KeyValuePairs + name: 'vmTags' + description: | + Resource manager tags to be bound to the VM instances backing the Workstations. + Tag keys and values have the same definition as + https://cloud.google.com/resource-manager/docs/tags/tags-overview + Keys must be in the format `tagKeys/{tag_key_id}`, and + values are in the format `tagValues/456`. - !ruby/object:Api::Type::Array name: 'persistentDirectories' description: | diff --git a/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb b/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb index 1925d7b016b6..692461e52dab 100644 --- a/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb +++ b/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb @@ -1,3 +1,21 @@ +resource "google_project" "project" { + project_id = "<%= ctx[:vars]['project_id'] %>" + name = "<%= ctx[:vars]['project_id'] %>" + org_id = "<%= ctx[:test_env_vars]['org_id'] %>" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = "google-beta" + parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" + short_name = "<%= ctx[:vars]['tag_key1'] %>" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = "google-beta" + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "<%= ctx[:vars]['tag_value1'] %>" +} + resource "google_compute_network" "default" { provider = google-beta name = "<%= ctx[:vars]['workstation_cluster_name'] %>" @@ -52,6 +70,9 @@ resource "google_workstations_workstation_config" "<%= ctx[:primary_resource_id] boot_disk_size_gb = 35 disable_public_ip_addresses = true disable_ssh = false + vm_tags = { + "tagKeys/${google_tags_tag_key.tag_key1.short_name}" = "tagValues/${google_tags_tag_value.tag_value1.short_name}" + } } } } diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl index 576a21ca1262..b3fa64133ba6 100644 --- a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl @@ -1292,4 +1292,90 @@ resource "google_workstations_workstation_config" "default" { } `, context) } + +func TestAccWorkstationsWorkstationConfig_vmTags(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_vmTags(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_vmTags(context map[string]interface{}) string { + return acctest.Nprintf(` + data "google_project" "project" { + provider = "google-beta" + } + + resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" + } + + resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" + } + +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + host { + gce_instance { + machine_type = "e2-standard-4" + boot_disk_size_gb = 35 + disable_public_ip_addresses = true + vm_tags = { + "tagKeys/${google_tags_tag_key.tag_key1.name}" = "tagValues/${google_tags_tag_value.tag_value1.name}" + } + } + } + +} +`, context) +} {{- end }} diff --git a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb index 16fb4c655cdf..7fed741b8039 100644 --- a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb +++ b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb @@ -1293,4 +1293,91 @@ resource "google_workstations_workstation_config" "default" { } `, context) } + +func TestAccWorkstationsWorkstationConfig_vmTags(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckWorkstationsWorkstationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccWorkstationsWorkstationConfig_vmTags(context), + }, + { + ResourceName: "google_workstations_workstation_cluster.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag"}, + }, + }, + }) +} + +func testAccWorkstationsWorkstationConfig_vmTags(context map[string]interface{}) string { + return acctest.Nprintf(` + data "google_project" "project" { + provider = "google-beta" + } + + resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" + } + + resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" + } + +resource "google_compute_network" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + auto_create_subnetworks = false +} + +resource "google_compute_subnetwork" "default" { + provider = google-beta + name = "tf-test-workstation-cluster%{random_suffix}" + ip_cidr_range = "10.0.0.0/24" + region = "us-central1" + network = google_compute_network.default.name +} + +resource "google_workstations_workstation_cluster" "default" { + provider = google-beta + workstation_cluster_id = "tf-test-workstation-cluster%{random_suffix}" + network = google_compute_network.default.id + subnetwork = google_compute_subnetwork.default.id + location = "us-central1" +} + +resource "google_workstations_workstation_config" "default" { + provider = google-beta + workstation_config_id = "tf-test-workstation-config%{random_suffix}" + workstation_cluster_id = google_workstations_workstation_cluster.default.workstation_cluster_id + location = "us-central1" + + host { + gce_instance { + machine_type = "e2-standard-4" + boot_disk_size_gb = 35 + disable_public_ip_addresses = true + vm_tags = { + "tagKeys/${google_tags_tag_key.tag_key1.name}" = "tagValues/${google_tags_tag_value.tag_value1.name}" + } + } + } + +} +`, context) +} + <% end -%> From 52459d3601276ca84da3f6a2aa5e6be10287a841 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Thu, 27 Jun 2024 14:45:43 -0700 Subject: [PATCH 239/356] go rewrite fix nested schema template (#11071) --- mmv1/templates/terraform/resource.go.tmpl | 2 -- mmv1/templates/terraform/schema_subresource.go.tmpl | 3 ++- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 84f1248fa77d..02cb0a4878d3 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -172,9 +172,7 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { } {{- range $prop := $.AllUserProperties }} -{{if and (eq $prop.Type "Array") ($prop.IsSet) (eq $prop.ItemType.Type "NestedObject")}} {{template "SchemaSubResource" $prop}} -{{end}} {{- end}} {{- range $prop := $.UnorderedListProperties }} diff --git a/mmv1/templates/terraform/schema_subresource.go.tmpl b/mmv1/templates/terraform/schema_subresource.go.tmpl index 86e1f7a1c869..485f1425ebdb 100644 --- a/mmv1/templates/terraform/schema_subresource.go.tmpl +++ b/mmv1/templates/terraform/schema_subresource.go.tmpl @@ -13,6 +13,7 @@ # limitations under the License. */}} {{define "SchemaSubResource"}} +{{if and (eq .Type "Array") (.IsSet) (eq .ItemType.Type "NestedObject")}} {{ if and (.IsSet) (eq .Type "Array") (eq .ItemType.Type "NestedObject") -}} func {{ .NamespaceProperty }}Schema() *schema.Resource { return &schema.Resource{ @@ -24,7 +25,7 @@ func {{ .NamespaceProperty }}Schema() *schema.Resource { } } {{ end -}} - +{{end}} {{ if .NestedProperties }} {{- range $prop := $.NestedProperties }} {{template "SchemaSubResource" $prop}} From c60abd77c1ff0a1fc176201561c7c2fe5ed0a262 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Thu, 27 Jun 2024 14:47:13 -0700 Subject: [PATCH 240/356] Fix Go rewrite diffs for updateMask and CustomizeDiff (#11072) --- mmv1/templates/terraform/extra_schema_entry/go/route.tmpl | 2 +- mmv1/templates/terraform/extra_schema_entry/route.erb | 2 +- mmv1/templates/terraform/resource.go.tmpl | 5 +++++ mmv1/templates/terraform/schema_property.go.tmpl | 2 +- mmv1/templates/terraform/update_mask.go.tmpl | 2 +- 5 files changed, 9 insertions(+), 4 deletions(-) diff --git a/mmv1/templates/terraform/extra_schema_entry/go/route.tmpl b/mmv1/templates/terraform/extra_schema_entry/go/route.tmpl index e395a1edcdf3..703d043d4696 100644 --- a/mmv1/templates/terraform/extra_schema_entry/go/route.tmpl +++ b/mmv1/templates/terraform/extra_schema_entry/go/route.tmpl @@ -1,4 +1,4 @@ -"next_hop_instance_zone": &schema.Schema{ +"next_hop_instance_zone": { Type: schema.TypeString, Optional: true, ForceNew: true, diff --git a/mmv1/templates/terraform/extra_schema_entry/route.erb b/mmv1/templates/terraform/extra_schema_entry/route.erb index e395a1edcdf3..703d043d4696 100644 --- a/mmv1/templates/terraform/extra_schema_entry/route.erb +++ b/mmv1/templates/terraform/extra_schema_entry/route.erb @@ -1,4 +1,4 @@ -"next_hop_instance_zone": &schema.Schema{ +"next_hop_instance_zone": { Type: schema.TypeString, Optional: true, ForceNew: true, diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 02cb0a4878d3..e5820a3091a4 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -109,6 +109,11 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- end }} {{- if or (and (or $.HasProject $.HasRegion $.HasZone) (not $.SkipDefaultCdiff)) $.CustomDiff }} CustomizeDiff: customdiff.All( +{{- if $.UnorderedListProperties }} +{{- range $prop := $.UnorderedListProperties }} + resource{{ $.ResourceName }}{{ camelize $prop.Name "upper" }}SetStyleDiff, +{{- end}} +{{- end}} {{- if $.CustomDiff -}} {{- range $cdiff := $.CustomDiff }} {{ $cdiff }}, diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 202e05563b2f..57bde43a980b 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -122,7 +122,7 @@ Default value: {{ .ItemType.DefaultValue -}} DiffSuppressFunc: tpgresource.CompareSelfLinkOrResourceName, {{- end }} {{ else -}} - Type: schema.Type{{ .ItemTypeClass -}}, + Type: {{ .TFType .ItemType.Type }}, {{ end -}} }, {{ end -}} diff --git a/mmv1/templates/terraform/update_mask.go.tmpl b/mmv1/templates/terraform/update_mask.go.tmpl index 6702943ade2e..3f29c3fd10ad 100644 --- a/mmv1/templates/terraform/update_mask.go.tmpl +++ b/mmv1/templates/terraform/update_mask.go.tmpl @@ -16,7 +16,7 @@ updateMask := []string{} {{- range $key := $.GetPropertyUpdateMasksGroupKeys $.UpdateBodyProperties }} if d.HasChange("{{ $key }}") { - updateMask = append(updateMask, "{{ join (index $maskGroups $key) "\",\""}}") + updateMask = append(updateMask, "{{ join (index $maskGroups $key) "\",\n\""}}") } {{- end }} // updateMask is a URL parameter but not present in the schema, so ReplaceVars From 9c705fb79114eeb1f7fb4aa9e160304ce9443336 Mon Sep 17 00:00:00 2001 From: Pournami Rajan <167927364+pournami-rajan@users.noreply.github.com> Date: Thu, 27 Jun 2024 15:10:36 -0700 Subject: [PATCH 241/356] Add sourceRegions field to google compute health check resource. (#10878) --- mmv1/products/compute/HealthCheck.yaml | 24 +++++++ ...te_health_check_http_source_regions.tf.erb | 12 ++++ ...e_health_check_https_source_regions.tf.erb | 12 ++++ ...ute_health_check_tcp_source_regions.tf.erb | 12 ++++ .../resource_compute_health_check_test.go.erb | 65 +++++++++++++++++++ 5 files changed, 125 insertions(+) create mode 100644 mmv1/templates/terraform/examples/compute_health_check_http_source_regions.tf.erb create mode 100644 mmv1/templates/terraform/examples/compute_health_check_https_source_regions.tf.erb create mode 100644 mmv1/templates/terraform/examples/compute_health_check_tcp_source_regions.tf.erb diff --git a/mmv1/products/compute/HealthCheck.yaml b/mmv1/products/compute/HealthCheck.yaml index f0e233c595a2..4f5b45c9b6b0 100644 --- a/mmv1/products/compute/HealthCheck.yaml +++ b/mmv1/products/compute/HealthCheck.yaml @@ -170,6 +170,30 @@ properties: The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec. default_value: 5 + - !ruby/object:Api::Type::Array + name: 'sourceRegions' + item_type: Api::Type::String + min_size: 3 + max_size: 3 + min_version: beta + description: | + The list of cloud regions from which health checks are performed. If + any regions are specified, then exactly 3 regions should be specified. + The region names must be valid names of Google Cloud regions. This can + only be set for global health check. If this list is non-empty, then + there are restrictions on what other health check fields are supported + and what other resources can use this health check: + + * SSL, HTTP2, and GRPC protocols are not supported. + + * The TCP request field is not supported. + + * The proxyHeader field for HTTP, HTTPS, and TCP is not supported. + + * The checkIntervalSec field must be at least 30. + + * The health check cannot be used with BackendService nor with managed + instance group auto-healing. - !ruby/object:Api::Type::Integer name: 'unhealthyThreshold' description: | diff --git a/mmv1/templates/terraform/examples/compute_health_check_http_source_regions.tf.erb b/mmv1/templates/terraform/examples/compute_health_check_http_source_regions.tf.erb new file mode 100644 index 000000000000..84e955e6d6dc --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_health_check_http_source_regions.tf.erb @@ -0,0 +1,12 @@ +resource "google_compute_health_check" "<%= ctx[:primary_resource_id] %>" { + provider = "google-beta" + name = "<%= ctx[:vars]['health_check_name'] %>" + check_interval_sec = 30 + + http_health_check { + port = 80 + port_specification = "USE_FIXED_PORT" + } + + source_regions = ["us-west1", "us-central1", "us-east5"] +} diff --git a/mmv1/templates/terraform/examples/compute_health_check_https_source_regions.tf.erb b/mmv1/templates/terraform/examples/compute_health_check_https_source_regions.tf.erb new file mode 100644 index 000000000000..44747f0ba7b3 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_health_check_https_source_regions.tf.erb @@ -0,0 +1,12 @@ +resource "google_compute_health_check" "<%= ctx[:primary_resource_id] %>" { + provider = "google-beta" + name = "<%= ctx[:vars]['health_check_name'] %>" + check_interval_sec = 30 + + https_health_check { + port = 80 + port_specification = "USE_FIXED_PORT" + } + + source_regions = ["us-west1", "us-central1", "us-east5"] +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/compute_health_check_tcp_source_regions.tf.erb b/mmv1/templates/terraform/examples/compute_health_check_tcp_source_regions.tf.erb new file mode 100644 index 000000000000..9f1da3a2e2e8 --- /dev/null +++ b/mmv1/templates/terraform/examples/compute_health_check_tcp_source_regions.tf.erb @@ -0,0 +1,12 @@ +resource "google_compute_health_check" "<%= ctx[:primary_resource_id] %>" { + provider = "google-beta" + name = "<%= ctx[:vars]['health_check_name'] %>" + check_interval_sec = 30 + + tcp_health_check { + port = 80 + port_specification = "USE_FIXED_PORT" + } + + source_regions = ["us-west1", "us-central1", "us-east5"] +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_health_check_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_health_check_test.go.erb index b6630fd7e94e..45adf7269b1e 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_health_check_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_health_check_test.go.erb @@ -383,3 +383,68 @@ resource "google_compute_health_check" "foobar" { `, hckName) } <% end -%> + +<% unless version == 'ga' -%> + +func TestAccComputeHealthCheck_srcRegions_update(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_srcRegions(hckName), + }, + { + ResourceName: "google_compute_health_check.src_region", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeHealthCheck_srcRegions_update(hckName), + }, + { + ResourceName: "google_compute_health_check.src_region", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func testAccComputeHealthCheck_srcRegions(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "src_region" { + provider = "google-beta" + name = "%s" + description = "Resource created for Terraform acceptance testing" + check_interval_sec = 30 + source_regions = ["us-central1", "us-east1", "asia-south1"] + http_health_check { + port = "80" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_srcRegions_update(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "src_region" { + provider = "google-beta" + name = "%s" + description = "Resource updated for Terraform acceptance testing" + check_interval_sec = 30 + source_regions = ["us-west1", "europe-north1", "asia-south1"] + http_health_check { + port = "80" + } +} +`, hckName) +} + +<% end -%> From f93c219bcc77fe76357941e496338649fded64a2 Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Thu, 27 Jun 2024 15:31:57 -0700 Subject: [PATCH 242/356] go rewrite workstations diffs (#11073) --- mmv1/api/resource.go | 3 --- mmv1/templates/terraform/iam_policy.go.tmpl | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 5a91ac950662..344e236f43c3 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -593,9 +593,6 @@ func buildEffectiveLabelsField(name string, labels *Type) *Type { "including the %s configured through Terraform, other clients and services.", name, name) t := "KeyValueEffectiveLabels" - if name == "annotations" { - t = "KeyValueEffectiveAnnotations" - } n := fmt.Sprintf("effective%s", strings.Title(name)) diff --git a/mmv1/templates/terraform/iam_policy.go.tmpl b/mmv1/templates/terraform/iam_policy.go.tmpl index 1c476b0631c6..fc06518313b5 100644 --- a/mmv1/templates/terraform/iam_policy.go.tmpl +++ b/mmv1/templates/terraform/iam_policy.go.tmpl @@ -97,7 +97,7 @@ func {{ $.ResourceName }}IamUpdaterProducer(d tpgresource.TerraformResourceData, values["{{ $param }}"] = v.(string) } -{{- end }} +{{ end }} {{- end }}{{- /* range $param := $.IamResourceParams */}} // We may have gotten either a long or short name, so attempt to parse long name if possible From eff19adf97250c0d62e8c417e9d7cefb60203f35 Mon Sep 17 00:00:00 2001 From: thokalavinod Date: Thu, 27 Jun 2024 22:52:36 +0000 Subject: [PATCH 243/356] Notifcation config (#10817) --- .../OrganizationNotificationConfig.yaml | 129 ++++++++++++++++++ mmv1/products/securitycenterv2/product.yaml | 23 ++++ ..._self_link_as_name_set_organization.go.erb | 26 ++++ ...anization_notification_config_basic.tf.erb | 15 ++ .../components/inputs/services_beta.kt | 5 + .../components/inputs/services_ga.kt | 5 + ...2_organization_notification_config_test.go | 87 ++++++++++++ 7 files changed, 290 insertions(+) create mode 100644 mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml create mode 100644 mmv1/products/securitycenterv2/product.yaml create mode 100644 mmv1/templates/terraform/custom_import/scc_v2_source_self_link_as_name_set_organization.go.erb create mode 100644 mmv1/templates/terraform/examples/scc_v2_organization_notification_config_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go diff --git a/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml b/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml new file mode 100644 index 000000000000..1026cf8d27be --- /dev/null +++ b/mmv1/products/securitycenterv2/OrganizationNotificationConfig.yaml @@ -0,0 +1,129 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'OrganizationNotificationConfig' +base_url: organizations/{{organization}}/locations/{{location}}/notificationConfigs +self_link: '{{name}}' +create_url: organizations/{{organization}}/locations/{{location}}/notificationConfigs?configId={{config_id}} +update_verb: :PATCH +update_mask: true +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v2/organizations.locations.notificationConfigs' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'scc_v2_organization_notification_config_basic' + primary_resource_id: 'custom_organization_notification_config' + vars: + topic_name: 'my-topic' + config_id: 'my-config' + test_env_vars: + org_id: :ORG_ID +custom_code: !ruby/object:Provider::Terraform::CustomCode + custom_import: templates/terraform/custom_import/scc_v2_source_self_link_as_name_set_organization.go.erb + post_create: templates/terraform/post_create/set_computed_name.erb +parameters: + - !ruby/object:Api::Type::String + name: organization + required: true + immutable: true + url_param_only: true + description: | + The organization whose Cloud Security Command Center the Notification + Config lives in. + - !ruby/object:Api::Type::String + name: configId + required: true + immutable: true + url_param_only: true + description: | + This must be unique within the organization. + - !ruby/object:Api::Type::String + name: location + immutable: true + url_param_only: true + default_value: global + description: | + location Id is provided by organization. If not provided, Use global as default. +properties: + - !ruby/object:Api::Type::String + name: name + output: true + description: | + The resource name of this notification config, in the format + `organizations/{{organization}}/notificationConfigs/{{config_id}}`. + - !ruby/object:Api::Type::String + name: description + description: | + The description of the notification config (max of 1024 characters). + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.StringLenBetween(0, 1024)' + - !ruby/object:Api::Type::String + name: pubsubTopic + required: true + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + - !ruby/object:Api::Type::String + name: serviceAccount + output: true + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + - !ruby/object:Api::Type::NestedObject + name: streamingConfig + required: true + description: | + The config for triggering streaming-based notifications. + update_mask_fields: + - 'streamingConfig.filter' + properties: + - !ruby/object:Api::Type::String + name: filter + required: true + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/products/securitycenterv2/product.yaml b/mmv1/products/securitycenterv2/product.yaml new file mode 100644 index 000000000000..beddce3f145d --- /dev/null +++ b/mmv1/products/securitycenterv2/product.yaml @@ -0,0 +1,23 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Product +name: SecurityCenterV2 +display_name: Security Command Center (SCC)v2 API +legacy_name: scc_v2 +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://securitycenter.googleapis.com/v2/ +scopes: + - https://www.googleapis.com/auth/cloud-platform diff --git a/mmv1/templates/terraform/custom_import/scc_v2_source_self_link_as_name_set_organization.go.erb b/mmv1/templates/terraform/custom_import/scc_v2_source_self_link_as_name_set_organization.go.erb new file mode 100644 index 000000000000..b74ffa3fd24d --- /dev/null +++ b/mmv1/templates/terraform/custom_import/scc_v2_source_self_link_as_name_set_organization.go.erb @@ -0,0 +1,26 @@ +config := meta.(*transport_tpg.Config) + +// current import_formats can't import fields with forward slashes in their value +if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err +} + +stringParts := strings.Split(d.Get("name").(string), "/") +if len(stringParts) != 6 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{organization}}/locations/{{location}}/notificationConfigs/{{config_id}}", + ) +} + +if err := d.Set("organization", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) +} +if err := d.Set("location", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) +} +if err := d.Set("config_id", stringParts[5]); err != nil { + return nil, fmt.Errorf("Error setting config_id: %s", err) +} +return []*schema.ResourceData{d}, nil diff --git a/mmv1/templates/terraform/examples/scc_v2_organization_notification_config_basic.tf.erb b/mmv1/templates/terraform/examples/scc_v2_organization_notification_config_basic.tf.erb new file mode 100644 index 000000000000..c186e028d4ee --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_v2_organization_notification_config_basic.tf.erb @@ -0,0 +1,15 @@ +resource "google_pubsub_topic" "scc_v2_organization_notification_config" { + name = "<%= ctx[:vars]['topic_name'] %>" +} + +resource "google_scc_v2_organization_notification_config" "<%= ctx[:primary_resource_id] %>" { + config_id = "<%= ctx[:vars]['config_id'] %>" + organization = "<%= ctx[:test_env_vars]['org_id'] %>" + location = "global" + description = "My custom Cloud Security Command Center Finding Organization Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_v2_organization_notification_config.id + + streaming_config { + filter = "category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"" + } +} diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index 7804a4a11ad7..92b46f197d84 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -626,6 +626,11 @@ var ServicesListBeta = mapOf( "displayName" to "Securitycentermanagement", "path" to "./google-beta/services/securitycentermanagement" ), + "securitycenterv2" to mapOf( + "name" to "securitycenterv2", + "displayName" to "securitycenterv2", + "path" to "./google-beta/services/securitycenterv2" + ), "securityposture" to mapOf( "name" to "securityposture", "displayName" to "Securityposture", diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index 76cf658afd9b..c0d118fdc1b6 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -621,6 +621,11 @@ var ServicesListGa = mapOf( "displayName" to "Securitycentermanagement", "path" to "./google/services/securitycentermanagement" ), + "securitycenterv2" to mapOf( + "name" to "securitycenterv2", + "displayName" to "securitycenterv2", + "path" to "./google/services/securitycenterv2" + ), "securityposture" to mapOf( "name" to "securityposture", "displayName" to "Securityposture", diff --git a/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go new file mode 100644 index 000000000000..aa0211acb79b --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycenterv2/resource_scc_v2_organization_notification_config_test.go @@ -0,0 +1,87 @@ +package securitycenterv2_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccSecurityCenterV2OrganizationNotificationConfig_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterV2OrganizationNotificationConfig_basic(context), + }, + { + ResourceName: "google_scc_v2_organization_notification_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "config_id", + }, + }, + { + Config: testAccSecurityCenterV2OrganizationNotificationConfig_update(context), + }, + { + ResourceName: "google_scc_v2_organization_notification_config.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "config_id", + }, + }, + }, + }) +} + +func testAccSecurityCenterV2OrganizationNotificationConfig_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_v2_organization_notification_config" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_scc_v2_organization_notification_config" "default" { + config_id = "tf-test-config-%{random_suffix}" + organization = "%{org_id}" + location = "global" + description = "A test organization notification config" + pubsub_topic = google_pubsub_topic.scc_v2_organization_notification_config.id + + streaming_config { + filter = "severity = \"HIGH\"" + } +} +`, context) +} + +func testAccSecurityCenterV2OrganizationNotificationConfig_update(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_v2_organization_notification_config" { + name = "tf-test-topic-%{random_suffix}" +} + +resource "google_scc_v2_organization_notification_config" "default" { + config_id = "tf-test-config-%{random_suffix}" + organization = "%{org_id}" + location = "global" + description = "An updated test organization notification config" + pubsub_topic = google_pubsub_topic.scc_v2_organization_notification_config.id + + streaming_config { + filter = "severity = \"CRITICAL\"" + } +} +`, context) +} From 7da306697a4c3487098484a54dc18750ff547a3e Mon Sep 17 00:00:00 2001 From: dfdossantos Date: Fri, 28 Jun 2024 13:54:18 +0000 Subject: [PATCH 244/356] Add support for ```google_app_engine_standard_app_version``` to TGC (#11055) --- mmv1/provider/terraform_tgc.rb | 4 +- mmv1/templates/tgc/resource_converters.go.erb | 1 + .../tgc/appengine_standard_version.go | 1014 +++++++++++++++++ ...oogle_app_engine_standard_app_version.json | 28 + ..._google_app_engine_standard_app_version.tf | 28 + 5 files changed, 1074 insertions(+), 1 deletion(-) create mode 100644 mmv1/third_party/tgc/appengine_standard_version.go create mode 100644 mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.json create mode 100644 mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.tf diff --git a/mmv1/provider/terraform_tgc.rb b/mmv1/provider/terraform_tgc.rb index 075888bfb87d..034a35d4665d 100644 --- a/mmv1/provider/terraform_tgc.rb +++ b/mmv1/provider/terraform_tgc.rb @@ -336,7 +336,9 @@ def copy_common_files(output_folder, generate_code, _generate_docs) ['converters/google/resources/logging_project_bucket_config.go', 'third_party/tgc/logging_project_bucket_config.go'], ['converters/google/resources/logging_billing_account_bucket_config.go', - 'third_party/tgc/logging_billing_account_bucket_config.go'] + 'third_party/tgc/logging_billing_account_bucket_config.go'], + ['converters/google/resources/appengine_standard_version.go', + 'third_party/tgc/appengine_standard_version.go'] ]) end diff --git a/mmv1/templates/tgc/resource_converters.go.erb b/mmv1/templates/tgc/resource_converters.go.erb index 7b27f8edc4ff..e0a3f62a3496 100644 --- a/mmv1/templates/tgc/resource_converters.go.erb +++ b/mmv1/templates/tgc/resource_converters.go.erb @@ -115,6 +115,7 @@ func ResourceConverters() map[string][]cai.ResourceConverter { "google_storage_bucket_iam_member": {resourceConverterStorageBucketIamMember()}, "google_compute_node_group": {compute.ResourceConverterComputeNodeGroup()}, "google_logging_folder_bucket_config": {resourceConverterLogFolderBucket()}, + "google_app_engine_standard_app_version": {resourceAppEngineStandardAppVersion()}, "google_logging_organization_bucket_config": {resourceConverterLogOrganizationBucket()}, "google_logging_project_bucket_config": {resourceConverterLogProjectBucket()}, "google_logging_billing_account_bucket_config": {resourceConverterLogBillingAccountBucket()}, diff --git a/mmv1/third_party/tgc/appengine_standard_version.go b/mmv1/third_party/tgc/appengine_standard_version.go new file mode 100644 index 000000000000..a86bfb70e97d --- /dev/null +++ b/mmv1/third_party/tgc/appengine_standard_version.go @@ -0,0 +1,1014 @@ +package google + +import ( + "reflect" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/GoogleCloudPlatform/terraform-google-conversion/v5/tfplan2cai/converters/google/resources/cai" + "github.com/hashicorp/terraform-provider-google-beta/google-beta/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google-beta/google-beta/transport" +) + +const AppEngineVersionAssetType string = "appengine.googleapis.com/Version" + +func resourceAppEngineStandardAppVersion() cai.ResourceConverter { + return cai.ResourceConverter{ + AssetType: AppEngineVersionAssetType, + Convert: GetAppEngineVersionCaiObject, + } +} + +func GetAppEngineVersionCaiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) ([]cai.Asset, error) { + name, err := cai.AssetName(d, config, "//appengine.googleapis.com/apps/{{project}}/services/default/versions/v1") + if err != nil { + return []cai.Asset{}, err + } + if obj, err := GetAppEngineVersionApiObject(d, config); err == nil { + return []cai.Asset{{ + Name: name, + Type: AppEngineVersionAssetType, + Resource: &cai.AssetResource{ + Version: "v1", + DiscoveryDocumentURI: "https://www.googleapis.com/discovery/v1/apis/appengine/v1/rest", + DiscoveryName: "Version", + Data: obj, + }, + }}, nil + } else { + return []cai.Asset{}, err + } +} + +func GetAppEngineVersionApiObject(d tpgresource.TerraformResourceData, config *transport_tpg.Config) (map[string]interface{}, error) { + obj := make(map[string]interface{}) + + nameProp, err := expandAppEngineVersionName(d.Get("name"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("name"); !tpgresource.IsEmptyValue(reflect.ValueOf(nameProp)) && (ok || !reflect.DeepEqual(v, nameProp)) { + obj["name"] = nameProp + } + + idProp, err := expandAppEngineVersionId(d.Get("version_id"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("version_id"); !tpgresource.IsEmptyValue(reflect.ValueOf(idProp)) && (ok || !reflect.DeepEqual(v, idProp)) { + obj["id"] = idProp + } + + automaticScalingProp, err := expandAppEngineVersionAutomaticScaling(d.Get("automatic_scaling"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("automatic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(automaticScalingProp)) && (ok || !reflect.DeepEqual(v, automaticScalingProp)) { + obj["automaticScaling"] = automaticScalingProp + } + + basicScalingProp, err := expandAppEngineVersionBasicScaling(d.Get("basic_scaling"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("basic_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(basicScalingProp)) && (ok || !reflect.DeepEqual(v, basicScalingProp)) { + obj["basicScaling"] = basicScalingProp + } + + manualScalingProp, err := expandAppEngineVersionManualScaling(d.Get("manual_scaling"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("manual_scaling"); !tpgresource.IsEmptyValue(reflect.ValueOf(manualScalingProp)) && (ok || !reflect.DeepEqual(v, manualScalingProp)) { + obj["manualScaling"] = manualScalingProp + } + + instanceClassProp, err := expandAppEngineVersionInstanceClass(d.Get("instance_class"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("instance_class"); !tpgresource.IsEmptyValue(reflect.ValueOf(instanceClassProp)) && (ok || !reflect.DeepEqual(v, instanceClassProp)) { + obj["instanceClass"] = instanceClassProp + } + + zonesProp, err := expandAppEngineVersionZones(d.Get("zones"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("zones"); !tpgresource.IsEmptyValue(reflect.ValueOf(zonesProp)) && (ok || !reflect.DeepEqual(v, zonesProp)) { + obj["zones"] = zonesProp + } + + runtimeProp, err := expandAppEngineVersionRuntime(d.Get("runtime"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("runtime"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeProp)) && (ok || !reflect.DeepEqual(v, runtimeProp)) { + obj["runtime"] = runtimeProp + } + + threadsafeProp, err := expandAppEngineVersionThreadsafe(d.Get("threadsafe"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("threadsafe"); !tpgresource.IsEmptyValue(reflect.ValueOf(threadsafeProp)) && (ok || !reflect.DeepEqual(v, threadsafeProp)) { + obj["threadsafe"] = threadsafeProp + } + + vmProp, err := expandAppEngineVersionVm(d.Get("vm"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("vm"); !tpgresource.IsEmptyValue(reflect.ValueOf(vmProp)) && (ok || !reflect.DeepEqual(v, vmProp)) { + obj["vm"] = vmProp + } + + appEngineApisProp, err := expandAppEngineVersionAppEngineApis(d.Get("app_engine_apis"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("app_engine_apis"); !tpgresource.IsEmptyValue(reflect.ValueOf(appEngineApisProp)) && (ok || !reflect.DeepEqual(v, appEngineApisProp)) { + obj["appEngineApis"] = appEngineApisProp + } + + envProp, err := expandAppEngineVersionEnv(d.Get("env"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("env"); !tpgresource.IsEmptyValue(reflect.ValueOf(envProp)) && (ok || !reflect.DeepEqual(v, envProp)) { + obj["env"] = envProp + } + + createTimeProp, err := expandAppEngineVersionCreateTime(d.Get("create"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("create"); !tpgresource.IsEmptyValue(reflect.ValueOf(createTimeProp)) && (ok || !reflect.DeepEqual(v, createTimeProp)) { + obj["createTime"] = createTimeProp + } + + runtimeApiVersionProp, err := expandAppEngineVersionRuntimeApiVersion(d.Get("runtime_api_version"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("runtime_api_version"); !tpgresource.IsEmptyValue(reflect.ValueOf(runtimeApiVersionProp)) && (ok || !reflect.DeepEqual(v, runtimeApiVersionProp)) { + obj["runtimeApiVersion"] = runtimeApiVersionProp + } + + serviceAccountProp, err := expandAppEngineVersionServiceAccount(d.Get("service_account"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("service_account"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceAccountProp)) && (ok || !reflect.DeepEqual(v, serviceAccountProp)) { + obj["serviceAccount"] = serviceAccountProp + } + + handlersProp, err := expandAppEngineVersionHandlers(d.Get("handlers"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("handlers"); !tpgresource.IsEmptyValue(reflect.ValueOf(handlersProp)) && (ok || !reflect.DeepEqual(v, handlersProp)) { + obj["handlers"] = handlersProp + } + + librariesProp, err := expandAppEngineVersionLibraries(d.Get("libraries"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("libraries"); !tpgresource.IsEmptyValue(reflect.ValueOf(librariesProp)) && (ok || !reflect.DeepEqual(v, librariesProp)) { + obj["libraries"] = librariesProp + } + + deploymentProp, err := expandAppEngineVersionDeployment(d.Get("deployment"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("deployment"); !tpgresource.IsEmptyValue(reflect.ValueOf(deploymentProp)) && (ok || !reflect.DeepEqual(v, deploymentProp)) { + obj["deployment"] = deploymentProp + } + + entrypointProp, err := expandAppEngineVersionEntrypoint(d.Get("entrypoint"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("entrypoint"); !tpgresource.IsEmptyValue(reflect.ValueOf(entrypointProp)) && (ok || !reflect.DeepEqual(v, entrypointProp)) { + obj["entrypoint"] = entrypointProp + } + + envVariablesProp, err := expandAppEngineVersionEnvVariables(d.Get("env_variables"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("envVariables"); !tpgresource.IsEmptyValue(reflect.ValueOf(envVariablesProp)) && (ok || !reflect.DeepEqual(v, envVariablesProp)) { + obj["envVariables"] = envVariablesProp + } + + vpcAccessConnectorProp, err := expandAppEngineVersionVpcAccessConnector(d.Get("vpc_access_connector"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("vpc_access_connector"); !tpgresource.IsEmptyValue(reflect.ValueOf(vpcAccessConnectorProp)) && (ok || !reflect.DeepEqual(v, vpcAccessConnectorProp)) { + obj["vpcAccessConnector"] = vpcAccessConnectorProp + } + + inboundServicesProp, err := expandAppEngineVersionInboundServices(d.Get("inbound_services"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("inbound_services"); !tpgresource.IsEmptyValue(reflect.ValueOf(inboundServicesProp)) && (ok || !reflect.DeepEqual(v, inboundServicesProp)) { + obj["inboundServices"] = inboundServicesProp + } + + projectProp, err := expandAppEngineVersionProject(d.Get("project"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("project"); !tpgresource.IsEmptyValue(reflect.ValueOf(projectProp)) && (ok || !reflect.DeepEqual(v, projectProp)) { + obj["project"] = projectProp + } + + noopOnDestroyProp, err := expandAppEngineVersionNoopOnDestroy(d.Get("noop_on_destroy"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("noop_on_destroy"); !tpgresource.IsEmptyValue(reflect.ValueOf(noopOnDestroyProp)) && (ok || !reflect.DeepEqual(v, noopOnDestroyProp)) { + obj["noopOnDestroy"] = noopOnDestroyProp + } + + deleteServiceOnDestroyProp, err := expandAppEngineVersionDeleteServiceOnDestroy(d.Get("delete_service_on_destroy"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("delete_service_on_destroy"); !tpgresource.IsEmptyValue(reflect.ValueOf(deleteServiceOnDestroyProp)) && (ok || !reflect.DeepEqual(v, deleteServiceOnDestroyProp)) { + obj["deleteServiceOnDestroy"] = deleteServiceOnDestroyProp + } + + serviceProp, err := expandAppEngineVersionService(d.Get("service"), d, config) + if err != nil { + return nil, err + } else if v, ok := d.GetOkExists("service"); !tpgresource.IsEmptyValue(reflect.ValueOf(serviceProp)) && (ok || !reflect.DeepEqual(v, serviceProp)) { + obj["service"] = serviceProp + } + + return obj, nil +} + +func expandAppEngineVersionService(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionDeleteServiceOnDestroy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionNoopOnDestroy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionProject(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionInboundServices(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + return v, nil +} + +func expandAppEngineVersionVpcAccessConnector(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandAppEngineVersionName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedEgressSetting, err := expandAppEngineVersionEgressSetting(original["egress_setting"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEgressSetting); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["egressSetting"] = transformedEgressSetting + } + + return transformed, nil +} + +func expandAppEngineVersionEgressSetting(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionEnvVariables(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionId(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionAutomaticScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedMaxConcurrentRequests, err := expandAppEngineVersionMaxConcurrentRequests(original["max_concurrent_requests"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxConcurrentRequests); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxConcurrentRequests"] = transformedMaxConcurrentRequests + } + + transformedMaxIdleInstances, err := expandAppEngineVersionMaxIdleInstances(original["max_idle_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxIdleInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxIdleInstances"] = transformedMaxIdleInstances + } + + transformedMaxPendingLatency, err := expandAppEngineVersionMaxPendingLatency(original["max_pending_latency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxPendingLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxPendingLatency"] = transformedMaxPendingLatency + } + + transformedMinIdleInstances, err := expandAppEngineVersionMinIdleInstances(original["min_idle_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinIdleInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minIdleInstances"] = transformedMinIdleInstances + } + + transformedMinPendingLatency, err := expandAppEngineVersionMinPendingLatency(original["min_pending_latency"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinPendingLatency); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minPendingLatency"] = transformedMinPendingLatency + } + + transformedStandardSchedulerSettings, err := expandAppEngineVersionStandardSchedulerSettings(original["standard_scheduler_settings"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStandardSchedulerSettings); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["standardSchedulerSettings"] = transformedStandardSchedulerSettings + } + + return transformed, nil +} + +func expandAppEngineVersionStandardSchedulerSettings(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedTargetCpuUtilization, err := expandAppEngineVersionTargetCpuUtilization(original["target_cpu_utilization"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetCpuUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetCpuUtilization"] = transformedTargetCpuUtilization + } + + transformedTargetThroughputUtilization, err := expandAppEngineVersionTargetThroughputUtilization(original["target_throughput_utilization"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTargetThroughputUtilization); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["targetThroughputUtilization"] = transformedTargetThroughputUtilization + } + + transformedMinInstances, err := expandAppEngineVersionMinInstances(original["min_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMinInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["minInstances"] = transformedMinInstances + } + + transformedMaxInstances, err := expandAppEngineVersionMaxInstances(original["max_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxInstances"] = transformedMaxInstances + } + + return transformed, nil +} + +func expandAppEngineVersionForwardedPorts(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionInstanceTag(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionSubnetworkName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionSessionAffinity(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionCoolDownPeriod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionAggregationWindowLength(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMaxConcurrentRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMaxIdleInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMaxTotalInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMaxPendingLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMinIdleInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMinTotalInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMinPendingLatency(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetRequestCountPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetConcurrentRequests(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetWriteOpsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetReadBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetWriteBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetReadOpsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetSentBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetSentPacketsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetReceivedBytesPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetReceivedPacketsPerSecond(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetCpuUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionTargetThroughputUtilization(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMinInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMaxInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionBasicScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedIdleTimeout, err := expandAppEngineVersionIdleTimeout(original["idle_timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedIdleTimeout); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["idleTimeout"] = transformedIdleTimeout + } + + transformedMaxInstances, err := expandAppEngineVersionMaxInstances(original["max_instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["maxInstances"] = transformedMaxInstances + } + + return transformed, nil +} + +func expandAppEngineVersionIdleTimeout(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionManualScaling(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstances, err := expandAppEngineVersionInstances(original["instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["instances"] = transformedInstances + } + + return transformed, nil +} + +func expandAppEngineVersionInstances(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionInstanceClass(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionZones(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionCpu(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionDiskGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMemoryGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionKmsKeyReference(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionVolumeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionSizeGb(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionRuntime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionRuntimeChannel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionThreadsafe(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionVm(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionOperatingSystem(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionrRuntimeVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionAppEngineApis(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionEnv(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionCreateTime(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionDiskUsageBytes(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionRuntimeApiVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionRuntimeMainExecutablePath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionServiceAccount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionCreatedBy(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionUrlRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionSecurityLevel(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionLogin(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionAuthFailAction(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionRedirectHttpResponseCode(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionStaticFilesHandler(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandAppEngineVersionPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedUploadPathRegex, err := expandAppEngineVersionUploadPathRegex(original["upload_path_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUploadPathRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["uploadPathRegex"] = transformedUploadPathRegex + } + + transformedHttpHeaders, err := expandAppEngineVersionHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + transformedMimeType, err := expandAppEngineVersionMimeType(original["mime_type"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMimeType); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["mimeType"] = transformedMimeType + } + + transformedExpiration, err := expandAppEngineVersionExpiration(original["expirtation"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExpiration); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["expiration"] = transformedExpiration + } + + transformedRequireMatchingFile, err := expandAppEngineVersionRequireMatchingFile(original["require_matching_file"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRequireMatchingFile); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["requireMatchingFile"] = transformedRequireMatchingFile + } + + transformedApplicationReadable, err := expandAppEngineVersionApplicationReadable(original["application_readable"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedApplicationReadable); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["applicationReadable"] = transformedApplicationReadable + } + + return transformed, nil +} + +func expandAppEngineVersionPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionUploadPathRegex(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionHttpHeaders(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMimeType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionExpiration(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionRequireMatchingFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionApplicationReadable(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionScriptHandler(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedScriptPath, err := expandAppEngineVersionScriptPath(original["script_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScriptPath); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["scriptPath"] = transformedScriptPath + } + + return transformed, nil +} + +func expandAppEngineVersionHandlers(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedUrlRegex, err := expandAppEngineVersionUrlRegex(original["url_regex"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUrlRegex); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["urlRegex"] = transformedUrlRegex + } + + transformedSecurityLevel, err := expandAppEngineVersionSecurityLevel(original["security_level"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecurityLevel); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["securityLevel"] = transformedSecurityLevel + } + + transformedLogin, err := expandAppEngineVersionLogin(original["login"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLogin); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["login"] = transformedLogin + } + + transformedAuthFailAction, err := expandAppEngineVersionAuthFailAction(original["auth_fail_action"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedAuthFailAction); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["authFailAction"] = transformedAuthFailAction + } + + transformedRedirectHttpResponseCode, err := expandAppEngineVersionRedirectHttpResponseCode(original["redirect_http_response_code"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedRedirectHttpResponseCode); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["redirectHttpResponseCode"] = transformedRedirectHttpResponseCode + } + + transformedScript, err := expandAppEngineVersionScriptHandler(original["script"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedScript); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["script"] = transformedScript + } + + transformedStaticFiles, err := expandAppEngineVersionStaticFilesHandler(original["static_files"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStaticFiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["staticFiles"] = transformedStaticFiles + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandAppEngineVersionScriptPath(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionStaticFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionMimeTypeFile(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionLibraries(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandAppEngineVersionName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedVersion, err := expandAppEngineVersionVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["version"] = transformedVersion + } + req = append(req, transformed) + } + + return req, nil +} + +func expandAppEngineVersionVersion(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionEntrypoint(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedShell, err := expandAppEngineVersionShell(original["shell"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedShell); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["shell"] = transformedShell + } + + return transformed, nil +} + +func expandAppEngineVersionShell(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionDeployment(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedFiles, err := expandAppEngineVersionFiles(original["files"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFiles); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["files"] = transformedFiles + } + + transformedZip, err := expandAppEngineVersionZip(original["zip"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedZip); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["zip"] = transformedZip + } + + return transformed, nil +} + +func expandAppEngineVersionZip(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSourceUrlProp, err := expandAppEngineVersionSourceUrl(original["source_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceUrlProp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceUrl"] = transformedSourceUrlProp + } + + filesCountProp, err := expandAppEngineVersionFilesCount(original["files_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(filesCountProp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["filesCount"] = filesCountProp + } + + return transformed, nil +} + +func expandAppEngineVersionSourceUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionFilesCount(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineVersionFiles(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + v = v.(*schema.Set).List() + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSourceUrlProp, err := expandAppEngineSourceUrl(original["source_url"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSourceUrlProp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sourceUrl"] = transformedSourceUrlProp + } + + transformedSha1SumProp, err := expandAppEngineSha1Sum(original["sha1_sum"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSha1SumProp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["sha1Sum"] = transformedSha1SumProp + } + + transformedNameProp, err := expandAppEngineName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedNameProp); val.IsValid() && !tpgresource.IsEmptyValue(val) { + transformed["name"] = transformedNameProp + } + + req = append(req, transformed) + } + + return req, nil +} + +func expandAppEngineSourceUrl(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineSha1Sum(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandAppEngineName(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.json b/mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.json new file mode 100644 index 000000000000..9bfe1a105c46 --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.json @@ -0,0 +1,28 @@ +[ + { + "name": "//appengine.googleapis.com/apps/{{.Provider.project}}/services/default/versions/v1", + "asset_type": "appengine.googleapis.com/Version", + "ancestry_path": "{{.Ancestry}}/project/{{.Provider.project}}", + "resource": { + "version": "v1", + "discovery_document_uri": "https://www.googleapis.com/discovery/v1/apis/appengine/v1/rest", + "discovery_name": "Version", + "parent": "//cloudresourcemanager.googleapis.com/projects/{{.Provider.project}}", + "data": { + "deployment": { + "zip": { + "sourceUrl": "https://storage.googleapis.com/bucket-app-engine/world.zip" + } + }, + "entrypoint": { + "shell": "python3 world.py" + }, + "id": "v1", + "project": "{{.Provider.project}}", + "runtime": "python39", + "service": "default" + } + }, + "ancestors": ["organizations/{{.OrgID}}"] + } +] \ No newline at end of file diff --git a/mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.tf b/mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.tf new file mode 100644 index 000000000000..2f214bf7267d --- /dev/null +++ b/mmv1/third_party/tgc/tests/data/example_google_app_engine_standard_app_version.tf @@ -0,0 +1,28 @@ +terraform { + required_providers { + google = { + source = "hashicorp/google-beta" + version = "~> {{.Provider.version}}" + } + } +} + +provider "google" { + {{if .Provider.credentials }}credentials = "{{.Provider.credentials}}"{{end}} +} + +resource "google_app_engine_standard_app_version" "my_app_v1" { + version_id = "v1" + service = "default" + runtime = "python39" + + entrypoint { + shell = "python3 world.py" + } + + deployment { + zip { + source_url = "https://storage.googleapis.com/bucket-app-engine/world.zip" + } + } +} \ No newline at end of file From e731d56015cd016b0ab28e063dcb664e9086610a Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Fri, 28 Jun 2024 08:30:25 -0700 Subject: [PATCH 245/356] Refresh handwritten files and fix conversion (#11075) --- mmv1/api/resource.rb | 6 +- mmv1/third_party/terraform/go/go.mod | 28 +- .../go/compute_instance_helpers.go.tmpl | 4 - ...ompute_global_forwarding_rule_test.go.tmpl | 139 ++++++++ .../go/resource_compute_instance.go.tmpl | 8 - ...resource_compute_instance_template.go.tmpl | 9 - ...rce_compute_instance_template_test.go.tmpl | 62 +++- .../go/resource_compute_instance_test.go.tmpl | 18 - ...e_compute_region_instance_template.go.tmpl | 9 - .../resource_container_cluster_test.go.tmpl | 44 +++ .../go/resource_gke_hub_feature_test.go.tmpl | 5 +- ...rkstations_workstation_config_test.go.tmpl | 3 +- .../tpgresource/go/common_diff_suppress.go | 124 +++++++ .../go/common_diff_suppress.go.tmpl | 315 ------------------ 14 files changed, 375 insertions(+), 399 deletions(-) create mode 100644 mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go delete mode 100644 mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl diff --git a/mmv1/api/resource.rb b/mmv1/api/resource.rb index de04aafe0e83..98d2b0fe775a 100644 --- a/mmv1/api/resource.rb +++ b/mmv1/api/resource.rb @@ -396,7 +396,11 @@ def all_nested_properties(props) def convert_go_file(file) dir, base = File.split(file) base.slice! '.erb' - "#{dir}/go/#{base}.tmpl" + if dir.end_with?('terraform') + "#{dir}/#{base}.go.tmpl" + else + "#{dir}/go/#{base}.tmpl" + end end # All settable properties in the resource. diff --git a/mmv1/third_party/terraform/go/go.mod b/mmv1/third_party/terraform/go/go.mod index 9aeb1b3967d5..5fa8af5fb7ca 100644 --- a/mmv1/third_party/terraform/go/go.mod +++ b/mmv1/third_party/terraform/go/go.mod @@ -27,18 +27,18 @@ require ( github.com/sirupsen/logrus v1.8.1 go4.org/netipx v0.0.0-20231129151722-fdeea329fbba golang.org/x/exp v0.0.0-20240409090435-93d18d7e34b8 - golang.org/x/net v0.25.0 - golang.org/x/oauth2 v0.20.0 - google.golang.org/api v0.180.0 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 - google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.34.1 + golang.org/x/net v0.26.0 + golang.org/x/oauth2 v0.21.0 + google.golang.org/api v0.185.0 + google.golang.org/genproto/googleapis/rpc v0.0.0-20240617180043-68d350f18fd4 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.2 ) require ( bitbucket.org/creachadair/stringset v0.0.8 // indirect - cloud.google.com/go v0.113.0 // indirect - cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go v0.115.0 // indirect + cloud.google.com/go/auth v0.5.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect @@ -50,7 +50,7 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cloudflare/circl v1.3.7 // indirect - github.com/cncf/xds/go v0.0.0-20231128003011-0fa0005c9caa // indirect + github.com/cncf/xds/go v0.0.0-20240318125728-8a4994d93e50 // indirect github.com/envoyproxy/go-control-plane v0.12.0 // indirect github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect github.com/fatih/color v1.16.0 // indirect @@ -98,14 +98,14 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.23.0 // indirect + golang.org/x/crypto v0.24.0 // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/sync v0.7.0 // indirect - golang.org/x/sys v0.20.0 // indirect - golang.org/x/text v0.15.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240429193739-8cf5692501f6 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240506185236-b8a5c65736ae // indirect + google.golang.org/genproto v0.0.0-20240617180043-68d350f18fd4 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl index 1b6d42dafc33..d6bced2b7fee 100644 --- a/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl @@ -612,9 +612,7 @@ func expandConfidentialInstanceConfig(d tpgresource.TerraformResourceData) *comp prefix := "confidential_instance_config.0" return &compute.ConfidentialInstanceConfig{ EnableConfidentialCompute: d.Get(prefix + ".enable_confidential_compute").(bool), - {{- if ne $.TargetVersionName "ga" }} ConfidentialInstanceType: d.Get(prefix + ".confidential_instance_type").(string), - {{- end }} } } @@ -625,9 +623,7 @@ func flattenConfidentialInstanceConfig(ConfidentialInstanceConfig *compute.Confi return []map[string]interface{}{{"{{"}} "enable_confidential_compute": ConfidentialInstanceConfig.EnableConfidentialCompute, - {{- if ne $.TargetVersionName "ga" }} "confidential_instance_type": ConfidentialInstanceConfig.ConfidentialInstanceType, - {{- end }} {{"}}"}} } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl index 8e0191a8b4e8..8d837b9c2062 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_global_forwarding_rule_test.go.tmpl @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/services/compute" ) func TestAccComputeGlobalForwardingRule_updateTarget(t *testing.T) { @@ -163,6 +164,144 @@ func TestAccComputeGlobalForwardingRule_internalLoadBalancing(t *testing.T) { } {{- end }} +func TestUnitComputeGlobalForwardingRule_PortRangeDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "different single values": { + Old: "80-80", + New: "443", + ExpectDiffSuppress: false, + }, + "different ranges": { + Old: "80-80", + New: "443-444", + ExpectDiffSuppress: false, + }, + "same single values": { + Old: "80-80", + New: "80", + ExpectDiffSuppress: true, + }, + "same ranges": { + Old: "80-80", + New: "80-80", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if compute.PortRangeDiffSuppress("ports", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + +func TestUnitComputeGlobalForwardingRule_InternalIpDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "suppress - same long and short ipv6 IPs without netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8000::", + ExpectDiffSuppress: true, + }, + "suppress - long and short ipv6 IPs with netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "2600:1900:4020:31cd:8000::/96", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP with netmask and short ipv6 IP without netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "2600:1900:4020:31cd:8000::", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP without netmask and short ipv6 IP with netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8000::/96", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP with netmask and reference": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: true, + }, + "suppress - long ipv6 IP without netmask and reference": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: true, + }, + "do not suppress - ipv6 IPs different netmask": { + Old: "2600:1900:4020:31cd:8000:0:0:0/96", + New: "2600:1900:4020:31cd:8000:0:0:0/95", + ExpectDiffSuppress: false, + }, + "do not suppress - reference and ipv6 IP with netmask": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "2600:1900:4020:31cd:8000:0:0:0/96", + ExpectDiffSuppress: false, + }, + "do not suppress - ipv6 IPs - 1": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8001::", + ExpectDiffSuppress: false, + }, + "do not suppress - ipv6 IPs - 2": { + Old: "2600:1900:4020:31cd:8000:0:0:0", + New: "2600:1900:4020:31cd:8000:0:0:8000", + ExpectDiffSuppress: false, + }, + "suppress - ipv4 IPs": { + Old: "1.2.3.4", + New: "1.2.3.4", + ExpectDiffSuppress: true, + }, + "suppress - ipv4 IP without netmask and ipv4 IP with netmask": { + Old: "1.2.3.4", + New: "1.2.3.4/24", + ExpectDiffSuppress: true, + }, + "suppress - ipv4 IP without netmask and reference": { + Old: "1.2.3.4", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: true, + }, + "do not suppress - reference and ipv4 IP without netmask": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "1.2.3.4", + ExpectDiffSuppress: false, + }, + "do not suppress - different ipv4 IPs": { + Old: "1.2.3.4", + New: "1.2.3.5", + ExpectDiffSuppress: false, + }, + "do not suppress - ipv4 IPs different netmask": { + Old: "1.2.3.4/24", + New: "1.2.3.5/25", + ExpectDiffSuppress: false, + }, + "do not suppress - different references": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "projects/project_id/regions/region/addresses/address-name-1", + ExpectDiffSuppress: false, + }, + "do not suppress - same references": { + Old: "projects/project_id/regions/region/addresses/address-name", + New: "projects/project_id/regions/region/addresses/address-name", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if compute.InternalIpDiffSuppress("ipv4/v6_compare", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccComputeGlobalForwardingRule_httpProxy(fr, targetProxy, proxy, proxy2, backend, hc, urlmap string) string { return fmt.Sprintf(` resource "google_compute_global_forwarding_rule" "forwarding_rule" { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl index aadfe28a5828..5789b30f00ef 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl @@ -1048,13 +1048,6 @@ be from 0 to 999,999,999 inclusive.`, Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- if eq $.TargetVersionName "ga" }} - "enable_confidential_compute": { - Type: schema.TypeBool, - Required: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - {{- else }} "enable_confidential_compute": { Type: schema.TypeBool, Optional: true, @@ -1070,7 +1063,6 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, - {{- end }} }, }, }, diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl index b2feb876803c..5937612209e1 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl @@ -898,14 +898,6 @@ be from 0 to 999,999,999 inclusive.`, Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- if eq $.TargetVersionName "ga" }} - "enable_confidential_compute": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - {{- else }} "enable_confidential_compute": { Type: schema.TypeBool, Optional: true, @@ -923,7 +915,6 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, - {{- end }} }, }, }, diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index ba0a4fbbe2d1..9d4d7dfae60d 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -779,9 +779,7 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) t.Parallel() var instanceTemplate compute.InstanceTemplate - {{- if ne $.TargetVersionName "ga" }} var instanceTemplate2 compute.InstanceTemplate - {{- end }} acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -793,13 +791,10 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar", &instanceTemplate), testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate, true, "SEV"), - {{- if ne $.TargetVersionName "ga" }} testAccCheckComputeInstanceTemplateExists(t, "google_compute_instance_template.foobar2", &instanceTemplate2), testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, true, ""), - {{- end }} ), }, - {{- if ne $.TargetVersionName "ga" }} { Config: testAccComputeInstanceTemplateConfidentialInstanceConfigNoEnable(acctest.RandString(t, 10), "AMD Milan", "SEV_SNP"), Check: resource.ComposeTestCheckFunc( @@ -809,7 +804,6 @@ func TestAccComputeInstanceTemplate_ConfidentialInstanceConfigMain(t *testing.T) testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(&instanceTemplate2, false, "SEV_SNP"), ), }, - {{- end }} }, }) } @@ -1534,6 +1528,50 @@ func TestAccComputeInstanceTemplate_resourceManagerTags(t *testing.T) { }) } +func TestUnitComputeInstanceTemplate_IpCidrRangeDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "single ip address": { + Old: "10.2.3.4", + New: "10.2.3.5", + ExpectDiffSuppress: false, + }, + "cidr format string": { + Old: "10.1.2.0/24", + New: "10.1.3.0/24", + ExpectDiffSuppress: false, + }, + "netmask same mask": { + Old: "10.1.2.0/24", + New: "/24", + ExpectDiffSuppress: true, + }, + "netmask different mask": { + Old: "10.1.2.0/24", + New: "/32", + ExpectDiffSuppress: false, + }, + "add netmask": { + Old: "", + New: "/24", + ExpectDiffSuppress: false, + }, + "remove netmask": { + Old: "/24", + New: "", + ExpectDiffSuppress: false, + }, + } + + for tn, tc := range cases { + if tpgcompute.IpCidrRangeDiffSuppress("ip_cidr_range", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Fatalf("bad: %s, '%s' => '%s' expect %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + func testAccCheckComputeInstanceTemplateDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -1987,11 +2025,9 @@ func testAccCheckComputeInstanceTemplateHasConfidentialInstanceConfig(instanceTe if instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instanceTemplate.Properties.ConfidentialInstanceConfig.EnableConfidentialCompute) } - {{- if ne $.TargetVersionName "ga" }} if instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instanceTemplate.Properties.ConfidentialInstanceConfig.ConfidentialInstanceType) } - {{- end }} return nil } @@ -3312,9 +3348,7 @@ resource "google_compute_instance_template" "foobar" { confidential_instance_config { enable_confidential_compute = true -{{- if ne $.TargetVersionName "ga" }} confidential_instance_type = %q -{{- end }} } scheduling { @@ -3323,7 +3357,6 @@ resource "google_compute_instance_template" "foobar" { } -{{ if ne $.TargetVersionName `ga` -}} resource "google_compute_instance_template" "foobar2" { name = "tf-test-instance2-template-%s" machine_type = "n2d-standard-2" @@ -3347,15 +3380,9 @@ resource "google_compute_instance_template" "foobar2" { } } -{{- end }} -{{- if eq $.TargetVersionName "ga" }} -`, suffix) -{{- else }} `, suffix, confidentialInstanceType, suffix) -{{- end }} } -{{ if ne $.TargetVersionName `ga` -}} func testAccComputeInstanceTemplateConfidentialInstanceConfigNoEnable(suffix string, minCpuPlatform, confidentialInstanceType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image2" { @@ -3416,7 +3443,6 @@ resource "google_compute_instance_template" "foobar4" { } `, suffix, minCpuPlatform, confidentialInstanceType, suffix, minCpuPlatform, confidentialInstanceType) } -{{- end }} func testAccComputeInstanceTemplateAdvancedMachineFeatures(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 3761400e3f07..3e71b164f462 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -1889,9 +1889,7 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { t.Parallel() var instance compute.Instance - {{- if ne $.TargetVersionName "ga" }} var instance2 compute.Instance - {{- end }} instanceName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ @@ -1904,13 +1902,10 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar", &instance), testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance, true, "SEV"), - {{- if ne $.TargetVersionName "ga" }} testAccCheckComputeInstanceExists(t, "google_compute_instance.foobar2", &instance2), testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, true, ""), - {{- end }} ), }, - {{- if ne $.TargetVersionName "ga" }} { Config: testAccComputeInstanceConfidentialInstanceConfigNoEnable(instanceName, "AMD Milan", "SEV_SNP"), Check: resource.ComposeTestCheckFunc( @@ -1920,7 +1915,6 @@ func TestAccComputeInstanceConfidentialInstanceConfigMain(t *testing.T) { testAccCheckComputeInstanceHasConfidentialInstanceConfig(&instance2, false, "SEV_SNP"), ), }, - {{- end }} }, }) } @@ -4395,11 +4389,9 @@ func testAccCheckComputeInstanceHasConfidentialInstanceConfig(instance *compute. if instance.ConfidentialInstanceConfig.EnableConfidentialCompute != EnableConfidentialCompute { return fmt.Errorf("Wrong ConfidentialInstanceConfig EnableConfidentialCompute: expected %t, got, %t", EnableConfidentialCompute, instance.ConfidentialInstanceConfig.EnableConfidentialCompute) } - {{- if ne $.TargetVersionName "ga" }} if instance.ConfidentialInstanceConfig.ConfidentialInstanceType != ConfidentialInstanceType { return fmt.Errorf("Wrong ConfidentialInstanceConfig ConfidentialInstanceType: expected %s, got, %s", ConfidentialInstanceType, instance.ConfidentialInstanceConfig.ConfidentialInstanceType) } - {{- end }} return nil } @@ -7681,9 +7673,7 @@ resource "google_compute_instance" "foobar" { confidential_instance_config { enable_confidential_compute = true -{{- if ne $.TargetVersionName "ga" }} confidential_instance_type = %q -{{- end }} } scheduling { @@ -7692,7 +7682,6 @@ resource "google_compute_instance" "foobar" { } -{{ if ne $.TargetVersionName `ga` -}} resource "google_compute_instance" "foobar2" { name = "%s2" machine_type = "n2d-standard-2" @@ -7717,15 +7706,9 @@ resource "google_compute_instance" "foobar2" { } } -{{- end }} -{{- if eq $.TargetVersionName "ga" }} -`, instance) -{{- else }} `, instance, confidentialInstanceType, instance) -{{- end }} } -{{ if ne $.TargetVersionName `ga` -}} func testAccComputeInstanceConfidentialInstanceConfigNoEnable(instance string, minCpuPlatform, confidentialInstanceType string) string { return fmt.Sprintf(` data "google_compute_image" "my_image2" { @@ -7788,7 +7771,6 @@ resource "google_compute_instance" "foobar4" { } `, instance, minCpuPlatform, confidentialInstanceType, instance, minCpuPlatform, confidentialInstanceType) } -{{- end }} func testAccComputeInstance_attributionLabelCreate(instance, add, strategy string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl index c88a923a92f4..6ffd6a99546d 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl @@ -851,14 +851,6 @@ be from 0 to 999,999,999 inclusive.`, Description: `The Confidential VM config being used by the instance. on_host_maintenance has to be set to TERMINATE or this will fail to create.`, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - {{- if eq $.TargetVersionName "ga" }} - "enable_confidential_compute": { - Type: schema.TypeBool, - Required: true, - ForceNew: true, - Description: `Defines whether the instance should have confidential compute enabled.`, - }, - {{- else }} "enable_confidential_compute": { Type: schema.TypeBool, Optional: true, @@ -876,7 +868,6 @@ be from 0 to 999,999,999 inclusive.`, If SEV_SNP, min_cpu_platform = "AMD Milan" is currently required.`, AtLeastOneOf: []string{"confidential_instance_config.0.enable_confidential_compute", "confidential_instance_config.0.confidential_instance_type"}, }, - {{- end }} }, }, }, diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl index 8f4d6808b5a9..5b289f0027f4 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_test.go.tmpl @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-google/google/acctest" "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/services/container" ) func TestAccContainerCluster_basic(t *testing.T) { @@ -596,6 +597,49 @@ func TestAccContainerCluster_withAuthenticatorGroupsConfig(t *testing.T) { }) } +func TestUnitContainerCluster_Rfc3339TimeDiffSuppress(t *testing.T) { + cases := map[string]struct { + Old, New string + ExpectDiffSuppress bool + }{ + "same time, format changed to have leading zero": { + Old: "2:00", + New: "02:00", + ExpectDiffSuppress: true, + }, + "same time, format changed not to have leading zero": { + Old: "02:00", + New: "2:00", + ExpectDiffSuppress: true, + }, + "different time, both without leading zero": { + Old: "2:00", + New: "3:00", + ExpectDiffSuppress: false, + }, + "different time, old with leading zero, new without": { + Old: "02:00", + New: "3:00", + ExpectDiffSuppress: false, + }, + "different time, new with leading zero, oldwithout": { + Old: "2:00", + New: "03:00", + ExpectDiffSuppress: false, + }, + "different time, both with leading zero": { + Old: "02:00", + New: "03:00", + ExpectDiffSuppress: false, + }, + } + for tn, tc := range cases { + if container.Rfc3339TimeDiffSuppress("time", tc.Old, tc.New, nil) != tc.ExpectDiffSuppress { + t.Errorf("bad: %s, '%s' => '%s' expect DiffSuppress to return %t", tn, tc.Old, tc.New, tc.ExpectDiffSuppress) + } + } +} + {{ if ne $.TargetVersionName `ga` -}} func testAccContainerCluster_enableMultiNetworking(clusterName string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl index 6ade6964b38e..d64642fc0f6a 100644 --- a/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl +++ b/mmv1/third_party/terraform/services/gkehub2/go/resource_gke_hub_feature_test.go.tmpl @@ -530,8 +530,9 @@ resource "google_gke_hub_feature" "feature" { fleet_default_member_config { configmanagement { version = "1.16.1" - config_sync { - prevent_drift = true + config_sync { + enabled = true + prevent_drift = true source_format = "unstructured" oci { sync_repo = "us-central1-docker.pkg.dev/corp-gke-build-artifacts/acm/configs:latest" diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl index b3fa64133ba6..ddced48c78b2 100644 --- a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl @@ -1378,4 +1378,5 @@ resource "google_workstations_workstation_config" "default" { } `, context) } -{{- end }} + +{{ end }} diff --git a/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go b/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go new file mode 100644 index 000000000000..70763fdd58da --- /dev/null +++ b/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go @@ -0,0 +1,124 @@ +// Contains common diff suppress functions. + +package tpgresource + +import ( + "reflect" + "regexp" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func EmptyOrDefaultStringSuppress(defaultVal string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) + } +} + +func EmptyOrFalseSuppressBoolean(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange(k) + return (o == nil && !n.(bool)) +} + +func CaseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + return strings.ToUpper(old) == strings.ToUpper(new) +} + +func EmptyOrUnsetBlockDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + o, n := d.GetChange(strings.TrimSuffix(k, ".#")) + return EmptyOrUnsetBlockDiffSuppressLogic(k, old, new, o, n) +} + +// The core logic for EmptyOrUnsetBlockDiffSuppress, in a format that is more conducive +// to unit testing. +func EmptyOrUnsetBlockDiffSuppressLogic(k, old, new string, o, n interface{}) bool { + if !strings.HasSuffix(k, ".#") { + return false + } + var l []interface{} + if old == "0" && new == "1" { + l = n.([]interface{}) + } else if new == "0" && old == "1" { + l = o.([]interface{}) + } else { + // we don't have one set and one unset, so don't suppress the diff + return false + } + + contents, ok := l[0].(map[string]interface{}) + if !ok { + return false + } + for _, v := range contents { + if !IsEmptyValue(reflect.ValueOf(v)) { + return false + } + } + return true +} + +func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { + return func(_, old, new string, _ *schema.ResourceData) bool { + oldT, err := time.Parse(format, old) + if err != nil { + return false + } + + newT, err := time.Parse(format, new) + if err != nil { + return false + } + + return oldT == newT + } +} + +// Suppress diffs for duration format. ex "60.0s" and "60s" same +// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration +func DurationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + oDuration, err := time.ParseDuration(old) + if err != nil { + return false + } + nDuration, err := time.ParseDuration(new) + if err != nil { + return false + } + return oDuration == nDuration +} + +// Suppress diffs when the value read from api +// has the project number instead of the project name +func ProjectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { + var a2, b2 string + reN := regexp.MustCompile("projects/\\d+") + re := regexp.MustCompile("projects/[^/]+") + replacement := []byte("projects/equal") + a2 = string(reN.ReplaceAll([]byte(old), replacement)) + b2 = string(re.ReplaceAll([]byte(new), replacement)) + return a2 == b2 +} + +func IsNewResource(diff TerraformResourceDiff) bool { + name := diff.Get("name") + return name.(string) == "" +} + +func CompareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { + // The API can return cryptoKeyVersions even though it wasn't specified. + // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 + + kmsKeyWithoutVersions := strings.Split(old, "/cryptoKeyVersions")[0] + if kmsKeyWithoutVersions == new { + return true + } + + return false +} + +func CidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + // If the user specified a size and the API returned a full cidr block, suppress. + return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl b/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl deleted file mode 100644 index acf087a314f0..000000000000 --- a/mmv1/third_party/terraform/tpgresource/go/common_diff_suppress.go.tmpl +++ /dev/null @@ -1,315 +0,0 @@ -// Contains common diff suppress functions. - -package tpgresource - -import ( - "crypto/sha256" - "log" - "encoding/hex" - "net" - "reflect" - "regexp" - "strconv" - "strings" - "time" - "bytes" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func OptionalPrefixSuppress(prefix string) schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *schema.ResourceData) bool { - return prefix+old == new || prefix+new == old - } -} - -func IgnoreMissingKeyInMap(key string) schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *schema.ResourceData) bool { - log.Printf("[DEBUG] - suppressing diff %q with old %q, new %q", k, old, new) - if strings.HasSuffix(k, ".%") { - oldNum, err := strconv.Atoi(old) - if err != nil { - log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", old) - return false - } - newNum, err := strconv.Atoi(new) - if err != nil { - log.Printf("[ERROR] could not parse %q as number, no longer attempting diff suppress", new) - return false - } - return oldNum+1 == newNum - } else if strings.HasSuffix(k, "." + key) { - return old == "" - } - return false - } -} - -func OptionalSurroundingSpacesSuppress(k, old, new string, d *schema.ResourceData) bool { - return strings.TrimSpace(old) == strings.TrimSpace(new) -} - -func EmptyOrDefaultStringSuppress(defaultVal string) schema.SchemaDiffSuppressFunc { - return func(k, old, new string, d *schema.ResourceData) bool { - return (old == "" && new == defaultVal) || (new == "" && old == defaultVal) - } -} - -func EmptyOrFalseSuppressBoolean(k, old, new string, d *schema.ResourceData) bool { - o, n := d.GetChange(k) - return (o == nil && !n.(bool)) -} - -func IpCidrRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // The range may be a: - // A) single IP address (e.g. 10.2.3.4) - // B) CIDR format string (e.g. 10.1.2.0/24) - // C) netmask (e.g. /24) - // - // For A) and B), no diff to suppress, they have to match completely. - // For C), The API picks a network IP address and this creates a diff of the form: - // network_interface.0.alias_ip_range.0.ip_cidr_range: "10.128.1.0/24" => "/24" - // We should only compare the mask portion for this case. - if len(new) > 0 && new[0] == '/' { - oldNetmaskStartPos := strings.LastIndex(old, "/") - - if oldNetmaskStartPos != -1 { - oldNetmask := old[strings.LastIndex(old, "/"):] - if oldNetmask == new { - return true - } - } - } - - return false -} - -// Sha256DiffSuppress -// if old is the hex-encoded sha256 sum of new, treat them as equal -func Sha256DiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return hex.EncodeToString(sha256.New().Sum([]byte(old))) == new -} - -func CaseDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - return strings.ToUpper(old) == strings.ToUpper(new) -} - -// Port range '80' and '80-80' is equivalent. -// `old` is read from the server and always has the full range format (e.g. '80-80', '1024-2048'). -// `new` can be either a single port or a port range. -func PortRangeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return old == new+"-"+new -} - -// Single-digit hour is equivalent to hour with leading zero e.g. suppress diff 1:00 => 01:00. -// Assume either value could be in either format. -func Rfc3339TimeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - if (len(old) == 4 && "0"+old == new) || (len(new) == 4 && "0"+new == old) { - return true - } - return false -} - -func EmptyOrUnsetBlockDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - o, n := d.GetChange(strings.TrimSuffix(k, ".#")) - return EmptyOrUnsetBlockDiffSuppressLogic(k, old, new, o, n) -} - -// The core logic for EmptyOrUnsetBlockDiffSuppress, in a format that is more conducive -// to unit testing. -func EmptyOrUnsetBlockDiffSuppressLogic(k, old, new string, o, n interface{}) bool { - if !strings.HasSuffix(k, ".#") { - return false - } - var l []interface{} - if old == "0" && new == "1" { - l = n.([]interface{}) - } else if new == "0" && old == "1" { - l = o.([]interface{}) - } else { - // we don't have one set and one unset, so don't suppress the diff - return false - } - - contents, ok := l[0].(map[string]interface{}) - if !ok { - return false - } - for _, v := range contents { - if !IsEmptyValue(reflect.ValueOf(v)) { - return false - } - } - return true -} - -// Suppress diffs for values that are equivalent except for their use of the words "location" -// compared to "region" or "zone" -func LocationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - return LocationDiffSuppressHelper(old, new) || LocationDiffSuppressHelper(new, old) -} - -func LocationDiffSuppressHelper(a, b string) bool { - return strings.Replace(a, "/locations/", "/regions/", 1) == b || - strings.Replace(a, "/locations/", "/zones/", 1) == b -} - -// For managed SSL certs, if new is an absolute FQDN (trailing '.') but old isn't, treat them as equals. -func AbsoluteDomainSuppress(k, old, new string, _ *schema.ResourceData) bool { - if strings.HasPrefix(k, "managed.0.domains.") { - return old == strings.TrimRight(new, ".") || new == strings.TrimRight(old, ".") - } - return false -} - -func TimestampDiffSuppress(format string) schema.SchemaDiffSuppressFunc { - return func(_, old, new string, _ *schema.ResourceData) bool { - oldT, err := time.Parse(format, old) - if err != nil { - return false - } - - newT, err := time.Parse(format, new) - if err != nil { - return false - } - - return oldT == newT - } -} - -// Suppresses diff for IPv4 and IPv6 different formats. -// It also suppresses diffs if an IP is changing to a reference. -func InternalIpDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - addr_equality := false - netmask_equality := false - - addr_netmask_old := strings.Split(old, "/") - addr_netmask_new := strings.Split(new, "/") - - // Check if old or new are IPs (with or without netmask) - var addr_old net.IP - if net.ParseIP(addr_netmask_old[0]) == nil { - addr_old = net.ParseIP(old) - } else { - addr_old = net.ParseIP(addr_netmask_old[0]) - } - var addr_new net.IP - if net.ParseIP(addr_netmask_new[0]) == nil { - addr_new = net.ParseIP(new) - } else { - addr_new = net.ParseIP(addr_netmask_new[0]) - } - - if addr_old != nil { - if addr_new == nil { - // old is an IP and new is a reference - addr_equality = true - } else { - // old and new are IP addresses - addr_equality = bytes.Equal(addr_old, addr_new) - } - } - - // If old and new both have a netmask compare them, otherwise suppress - // This is not technically correct but prevents the permadiff described in https://github.com/hashicorp/terraform-provider-google/issues/16400 - if (len(addr_netmask_old)) == 2 && (len(addr_netmask_new) == 2) { - netmask_equality = addr_netmask_old[1] == addr_netmask_new[1] - } else { - netmask_equality = true - } - - return addr_equality && netmask_equality -} - -// Suppress diffs for duration format. ex "60.0s" and "60s" same -// https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#duration -func DurationDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - oDuration, err := time.ParseDuration(old) - if err != nil { - return false - } - nDuration, err := time.ParseDuration(new) - if err != nil { - return false - } - return oDuration == nDuration -} - -// Use this method when the field accepts either an IP address or a -// self_link referencing a resource (such as google_compute_route's -// next_hop_ilb) -func CompareIpAddressOrSelfLinkOrResourceName(_, old, new string, _ *schema.ResourceData) bool { - // if we can parse `new` as an IP address, then compare as strings - if net.ParseIP(new) != nil { - return new == old - } - - // otherwise compare as self links - return CompareSelfLinkOrResourceName("", old, new, nil) -} - -{{ if ne $.TargetVersionName `ga` -}} -// Suppress all diffs, used for Disk.Interface which is a nonfunctional field -func AlwaysDiffSuppress(_, _, _ string, _ *schema.ResourceData) bool { - return true -} -{{- end }} - -// Use this method when subnet is optioanl and auto_create_subnetworks = true -// API sometimes choose a subnet so the diff needs to be ignored -func CompareOptionalSubnet(_, old, new string, _ *schema.ResourceData) bool { - if IsEmptyValue(reflect.ValueOf(new)) { - return true - } - // otherwise compare as self links - return CompareSelfLinkOrResourceName("", old, new, nil) -} - -// Suppress diffs in below cases -// "https://hello-rehvs75zla-uc.a.run.app/" -> "https://hello-rehvs75zla-uc.a.run.app" -// "https://hello-rehvs75zla-uc.a.run.app" -> "https://hello-rehvs75zla-uc.a.run.app/" -func LastSlashDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - if last := len(new) - 1; last >= 0 && new[last] == '/' { - new = new[:last] - } - - if last := len(old) - 1; last >= 0 && old[last] == '/' { - old = old[:last] - } - return new == old -} - -// Suppress diffs when the value read from api -// has the project number instead of the project name -func ProjectNumberDiffSuppress(_, old, new string, _ *schema.ResourceData) bool { - var a2, b2 string - reN := regexp.MustCompile("projects/\\d+") - re := regexp.MustCompile("projects/[^/]+") - replacement := []byte("projects/equal") - a2 = string(reN.ReplaceAll([]byte(old), replacement)) - b2 = string(re.ReplaceAll([]byte(new), replacement)) - return a2 == b2 -} - -func IsNewResource(diff TerraformResourceDiff) bool { - name := diff.Get("name") - return name.(string) == "" -} - -func CompareCryptoKeyVersions(_, old, new string, _ *schema.ResourceData) bool { - // The API can return cryptoKeyVersions even though it wasn't specified. - // format: projects//locations//keyRings//cryptoKeys//cryptoKeyVersions/1 - - kmsKeyWithoutVersions := strings.Split(old, "/cryptoKeyVersions")[0] - if kmsKeyWithoutVersions == new { - return true - } - - return false -} - -func CidrOrSizeDiffSuppress(k, old, new string, d *schema.ResourceData) bool { - // If the user specified a size and the API returned a full cidr block, suppress. - return strings.HasPrefix(new, "/") && strings.HasSuffix(old, new) -} \ No newline at end of file From 8be8eb449a88877607e511f723d75f3032c4602a Mon Sep 17 00:00:00 2001 From: Nick Elliot Date: Fri, 28 Jun 2024 08:32:19 -0700 Subject: [PATCH 246/356] go rewrite workflow diffs -- state upgraders todo (#11076) --- mmv1/api/resource.go | 12 ++++++++++++ mmv1/templates/terraform/resource.go.tmpl | 10 ++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 344e236f43c3..65dc21673cea 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -668,6 +668,10 @@ func getLabelsFieldNote(title string) string { title, title, title) } +func (r Resource) StateMigrationFile() string { + return fmt.Sprintf("templates/terraform/state_migrations/go/%s_%s.go.tmpl", google.Underscore(r.ProductMetadata.Name), google.Underscore(r.Name)) +} + // ==================== // Version-related methods // ==================== @@ -1535,3 +1539,11 @@ func (r Resource) VersionedProvider(exampleVersion string) bool { } return vp != "" && vp != "ga" } + +func (r Resource) StateUpgradersCount() []int { + var nums []int + for i := r.StateUpgradeBaseSchemaVersion; i < r.SchemaVersion; i++ { + nums = append(nums, i) + } + return nums +} diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index e5820a3091a4..601ded37661b 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -98,10 +98,10 @@ func Resource{{ $.ResourceName -}}() *schema.Resource { {{- if $.StateUpgraders }} StateUpgraders: []schema.StateUpgrader{ -{{- range $v := $.SchemaVersions }} +{{- range $v := $.StateUpgradersCount }} { - Type: resource{{$.PathType}}ResourceV{{$v}}().CoreConfigSchema().ImpliedType(), - Upgrade: Resource{{$.PathType}}UpgradeV{{$v}}, + Type: resource{{$.ResourceName}}ResourceV{{$v}}().CoreConfigSchema().ImpliedType(), + Upgrade: Resource{{$.ResourceName}}UpgradeV{{$v}}, Version: {{$v}}, }, {{- end }} @@ -1204,4 +1204,6 @@ func resource{{ $.ResourceName -}}PostCreateFailure(d *schema.ResourceData, meta {{- $.CustomTemplate $.CustomCode.PostCreateFailure false -}} } {{- end }} -{{/* TODO state upgraders */}} +{{- if and $.SchemaVersion $.StateUpgraders }} + {{ $.CustomTemplate $.StateMigrationFile false -}} +{{- end }} From 2a32beb359a669581cfad2af5b98d072cc577d41 Mon Sep 17 00:00:00 2001 From: courageJ Date: Fri, 28 Jun 2024 12:31:14 -0400 Subject: [PATCH 247/356] Add DCGM enum to monitoring config (#10593) --- .../services/container/resource_container_cluster.go.erb | 4 ++-- .../container/resource_container_cluster_migratev1.go.erb | 4 ++-- .../terraform/website/docs/r/container_cluster.html.markdown | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb index bf16ba801320..f8107b751894 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster.go.erb @@ -1198,9 +1198,9 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, Computed: true, <% if version == "ga" -%> - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, KUBELET and CADVISOR.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, KUBELET, CADVISOR and DCGM.`, <% else -%> - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS, KUBELET and CADVISOR.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS, KUBELET, CADVISOR and DCGM.`, <% end -%> Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb index 108d576fa96a..87e86456b2f5 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_cluster_migratev1.go.erb @@ -959,9 +959,9 @@ func resourceContainerClusterResourceV1() *schema.Resource { Optional: true, Computed: true, <% if version == "ga" -%> - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT and STATEFULSET.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET and DCGM.`, <% else -%> - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET and WORKLOADS.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS and DCGM.`, <% end -%> Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown index 70dfa529be9d..048ea25a4b2a 100644 --- a/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/container_cluster.html.markdown @@ -614,7 +614,7 @@ This block also contains several computed attributes, documented below. The `monitoring_config` block supports: -* `enable_components` - (Optional) The GKE components exposing metrics. Supported values include: `SYSTEM_COMPONENTS`, `APISERVER`, `SCHEDULER`, `CONTROLLER_MANAGER`, `STORAGE`, `HPA`, `POD`, `DAEMONSET`, `DEPLOYMENT`, `STATEFULSET`, `KUBELET` and `CADVISOR`. In beta provider, `WORKLOADS` is supported on top of those 12 values. (`WORKLOADS` is deprecated and removed in GKE 1.24.) `KUBELET` and `CADVISOR` are only supported in GKE 1.29.3-gke.1093000 and above. +* `enable_components` - (Optional) The GKE components exposing metrics. Supported values include: `SYSTEM_COMPONENTS`, `APISERVER`, `SCHEDULER`, `CONTROLLER_MANAGER`, `STORAGE`, `HPA`, `POD`, `DAEMONSET`, `DEPLOYMENT`, `STATEFULSET`, `KUBELET`, `CADVISOR` and `DCGM`. In beta provider, `WORKLOADS` is supported on top of those 12 values. (`WORKLOADS` is deprecated and removed in GKE 1.24.) `KUBELET` and `CADVISOR` are only supported in GKE 1.29.3-gke.1093000 and above. * `managed_prometheus` - (Optional) Configuration for Managed Service for Prometheus. Structure is [documented below](#nested_managed_prometheus). From f514cc2e5b57129f3136b17ce602aa724fa390c4 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Fri, 28 Jun 2024 20:05:45 +0100 Subject: [PATCH 248/356] Add plural data source `google_kms_crypto_keys` (#11053) --- .../provider/provider_mmv1_resources.go.erb | 1 + .../kms/data_source_google_kms_crypto_keys.go | 205 ++++++++++++++++++ ...data_source_google_kms_crypto_keys_test.go | 72 ++++++ .../transport/error_retry_predicates.go | 25 +++ .../docs/d/kms_crypto_keys.html.markdown | 56 +++++ 5 files changed, 359 insertions(+) create mode 100644 mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys.go create mode 100644 mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/kms_crypto_keys.html.markdown diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index a301a030551d..ecf0d1135f0c 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -133,6 +133,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ <% end -%> "google_iap_client": iap.DataSourceGoogleIapClient(), "google_kms_crypto_key": kms.DataSourceGoogleKmsCryptoKey(), + "google_kms_crypto_keys": kms.DataSourceGoogleKmsCryptoKeys(), "google_kms_crypto_key_version": kms.DataSourceGoogleKmsCryptoKeyVersion(), "google_kms_key_ring": kms.DataSourceGoogleKmsKeyRing(), "google_kms_secret": kms.DataSourceGoogleKmsSecret(), diff --git a/mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys.go b/mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys.go new file mode 100644 index 000000000000..f27d5e9c53e3 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys.go @@ -0,0 +1,205 @@ +package kms + +import ( + "fmt" + "log" + "net/http" + "regexp" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleKmsCryptoKeys() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceKMSCryptoKey().Schema) + tpgresource.AddOptionalFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "key_ring") + + // We need to explicitly add the id field to the schema used for individual keys + // Currently the id field in the google_kms_crypto_key resource is implied/added by the SDK + dsSchema["id"] = &schema.Schema{ + Type: schema.TypeString, + Computed: true, + } + + return &schema.Resource{ + Read: dataSourceGoogleKmsCryptoKeysRead, + Schema: map[string]*schema.Schema{ + "key_ring": { + Type: schema.TypeString, + Required: true, + Description: `The key ring that the keys belongs to. Format: 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}'.`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: ` + The filter argument is used to add a filter query parameter that limits which keys are retrieved by the data source: ?filter={{filter}}. + Example values: + + * "name:my-key-" will retrieve keys that contain "my-key-" anywhere in their name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}. + * "name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1" will only retrieve a key with that exact name. + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + `, + }, + "keys": { + Type: schema.TypeList, + Computed: true, + Description: "A list of all the retrieved keys from the provided key ring", + Elem: &schema.Resource{ + Schema: dsSchema, + }, + }, + }, + } +} + +func dataSourceGoogleKmsCryptoKeysRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + keyRingId, err := parseKmsKeyRingId(d.Get("key_ring").(string), config) + if err != nil { + return err + } + + id := fmt.Sprintf("%s/cryptoKeys", keyRingId.KeyRingId()) + if filter, ok := d.GetOk("filter"); ok { + id += "/filter=" + filter.(string) + } + d.SetId(id) + + log.Printf("[DEBUG] Searching for keys in key ring %s", keyRingId.KeyRingId()) + keys, err := dataSourceKMSCryptoKeysList(d, meta, keyRingId.KeyRingId()) + if err != nil { + return err + } + + if len(keys) > 0 { + log.Printf("[DEBUG] Found %d keys in key ring %s", len(keys), keyRingId.KeyRingId()) + value, err := flattenKMSKeysList(d, config, keys, keyRingId.KeyRingId()) + if err != nil { + return fmt.Errorf("error flattening keys list: %s", err) + } + if err := d.Set("keys", value); err != nil { + return fmt.Errorf("error setting keys: %s", err) + } + } else { + log.Printf("[DEBUG] Found 0 keys in key ring %s", keyRingId.KeyRingId()) + } + + return nil +} + +// dataSourceKMSCryptoKeysList calls the list endpoint for Crypto Key resources and collects all keys in a slice. +// This function handles pagination by collecting the resources returned by multiple calls to the list endpoint. +// This function also handles server-side filtering by setting the filter query parameter on each API call. +func dataSourceKMSCryptoKeysList(d *schema.ResourceData, meta interface{}, keyRingId string) ([]interface{}, error) { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return nil, err + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}{{key_ring}}/cryptoKeys") + if err != nil { + return nil, err + } + + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + // Always include the filter param, and optionally include the pageToken parameter for subsequent requests + var params = make(map[string]string, 0) + if filter, ok := d.GetOk("filter"); ok { + log.Printf("[DEBUG] Search for keys in key ring %s is using filter ?filter=%s", keyRingId, filter.(string)) + params["filter"] = filter.(string) + } + + cryptoKeys := make([]interface{}, 0) + for { + // Depending on previous iterations, params might contain a pageToken param + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return nil, err + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Headers: headers, + // ErrorRetryPredicates used to allow retrying if rate limits are hit when requesting multiple pages in a row + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429RetryableQuotaError}, + }) + if err != nil { + return nil, transport_tpg.HandleNotFoundError(err, d, fmt.Sprintf("KMSCryptoKeys %q", d.Id())) + } + + if res == nil { + // Decoding the object has resulted in it being gone. It may be marked deleted + log.Printf("[DEBUG] Removing KMSCryptoKey because it no longer exists.") + d.SetId("") + return nil, nil + } + + // Store info from this page + if v, ok := res["cryptoKeys"].([]interface{}); ok { + cryptoKeys = append(cryptoKeys, v...) + } + + // Handle pagination for next loop, or break loop + v, ok := res["nextPageToken"] + if ok { + params["pageToken"] = v.(string) + } + if !ok { + break + } + } + return cryptoKeys, nil +} + +// flattenKMSKeysList flattens a list of crypto keys from a given crypto key ring +func flattenKMSKeysList(d *schema.ResourceData, config *transport_tpg.Config, keysList []interface{}, keyRingId string) ([]interface{}, error) { + var keys []interface{} + for _, k := range keysList { + key := k.(map[string]interface{}) + parsedId, err := ParseKmsCryptoKeyId(key["name"].(string), config) + if err != nil { + return nil, err + } + + data := map[string]interface{}{} + // The google_kms_crypto_key resource and dataset set + // id as the value of name (projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{name}}) + // and set name is set as just {{name}}. + data["id"] = key["name"] + data["name"] = parsedId.Name + data["key_ring"] = keyRingId + + data["labels"] = flattenKMSCryptoKeyLabels(key["labels"], d, config) + data["primary"] = flattenKMSCryptoKeyPrimary(key["primary"], d, config) + data["purpose"] = flattenKMSCryptoKeyPurpose(key["purpose"], d, config) + data["rotation_period"] = flattenKMSCryptoKeyRotationPeriod(key["rotationPeriod"], d, config) + data["version_template"] = flattenKMSCryptoKeyVersionTemplate(key["versionTemplate"], d, config) + data["destroy_scheduled_duration"] = flattenKMSCryptoKeyDestroyScheduledDuration(key["destroyScheduledDuration"], d, config) + data["import_only"] = flattenKMSCryptoKeyImportOnly(key["importOnly"], d, config) + data["crypto_key_backend"] = flattenKMSCryptoKeyCryptoKeyBackend(key["cryptoKeyBackend"], d, config) + keys = append(keys, data) + } + + return keys, nil +} diff --git a/mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys_test.go b/mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys_test.go new file mode 100644 index 000000000000..bb0fcf78ab9d --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/data_source_google_kms_crypto_keys_test.go @@ -0,0 +1,72 @@ +package kms_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceGoogleKmsCryptoKeys_basic(t *testing.T) { + kms := acctest.BootstrapKMSKey(t) + + id := kms.KeyRing.Name + "/cryptoKeys" + + randomString := acctest.RandString(t, 10) + filterNameFindSharedKeys := "name:tftest-shared-" + filterNameFindsNoKeys := fmt.Sprintf("name:%s", randomString) + + findSharedKeysId := fmt.Sprintf("%s/filter=%s", id, filterNameFindSharedKeys) + findsNoKeysId := fmt.Sprintf("%s/filter=%s", id, filterNameFindsNoKeys) + + context := map[string]interface{}{ + "key_ring": kms.KeyRing.Name, + "filter": "", // Can be overridden using 2nd argument to config funcs + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleKmsCryptoKeys_basic(context, ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "id", id), + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "key_ring", kms.KeyRing.Name), + resource.TestMatchResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "keys.#", regexp.MustCompile("[1-9]+[0-9]*")), + ), + }, + { + Config: testAccDataSourceGoogleKmsCryptoKeys_basic(context, fmt.Sprintf("filter = \"%s\"", filterNameFindSharedKeys)), + Check: resource.ComposeTestCheckFunc( + // This filter should retrieve keys in the bootstrapped KMS key ring used by the test + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "id", findSharedKeysId), + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "key_ring", kms.KeyRing.Name), + resource.TestMatchResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "keys.#", regexp.MustCompile("[1-9]+[0-9]*")), + ), + }, + { + Config: testAccDataSourceGoogleKmsCryptoKeys_basic(context, fmt.Sprintf("filter = \"%s\"", filterNameFindsNoKeys)), + Check: resource.ComposeTestCheckFunc( + // This filter should retrieve no keys + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "id", findsNoKeysId), + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "key_ring", kms.KeyRing.Name), + resource.TestCheckResourceAttr("data.google_kms_crypto_keys.all_keys_in_ring", "keys.#", "0"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleKmsCryptoKeys_basic(context map[string]interface{}, filter string) string { + context["filter"] = filter + + return acctest.Nprintf(` +data "google_kms_crypto_keys" "all_keys_in_ring" { + key_ring = "%{key_ring}" + %{filter} +} +`, context) +} diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates.go b/mmv1/third_party/terraform/transport/error_retry_predicates.go index 3f1c95ca2fe2..ff42b4458c81 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates.go @@ -453,6 +453,31 @@ func Is429QuotaError(err error) (bool, string) { return false, "" } +// Do retry if operation returns a 429 and the reason is RATE_LIMIT_EXCEEDED +func Is429RetryableQuotaError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code == 429 { + // Quota error isn't necessarily retryable if it's a resource instance limit; check details + isRateLimitExceeded := false + for _, d := range gerr.Details { + data := d.(map[string]interface{}) + dType, ok := data["@type"] + // Find google.rpc.ErrorInfo in Details + if ok && strings.Contains(dType.(string), "ErrorInfo") { + if v, ok := data["reason"]; ok { + if v.(string) == "RATE_LIMIT_EXCEEDED" { + isRateLimitExceeded = true + break + } + } + } + } + return isRateLimitExceeded, "429s are retryable for this resource, but only if the reason is RATE_LIMIT_EXCEEDED" + } + } + return false, "" +} + // Retry if App Engine operation returns a 409 with a specific message for // concurrent operations, or a 404 indicating p4sa has not yet propagated. func IsAppEngineRetryableError(err error) (bool, string) { diff --git a/mmv1/third_party/terraform/website/docs/d/kms_crypto_keys.html.markdown b/mmv1/third_party/terraform/website/docs/d/kms_crypto_keys.html.markdown new file mode 100644 index 000000000000..b0c27895fde1 --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/kms_crypto_keys.html.markdown @@ -0,0 +1,56 @@ +--- +subcategory: "Cloud Key Management Service" +description: |- + Provides access to data about all KMS keys within a key ring with Google Cloud KMS. +--- + +# google_kms_crypto_keys + +Provides access to all Google Cloud Platform KMS CryptoKeys in a given KeyRing. For more information see +[the official documentation](https://cloud.google.com/kms/docs/object-hierarchy#key) +and +[API](https://cloud.google.com/kms/docs/reference/rest/v1/projects.locations.keyRings.cryptoKeys). + +A CryptoKey is an interface to key material which can be used to encrypt and decrypt data. A CryptoKey belongs to a +Google Cloud KMS KeyRing. + +## Example Usage + +```hcl +// Get all keys in the key ring +data "google_kms_crypto_keys" "all_crypto_keys" { + key_ring = data.google_kms_key_ring.my_key_ring.id +} + +// Get keys in the key ring that have "foobar" in their name +data "google_kms_crypto_keys" "all_crypto_keys" { + key_ring = data.google_kms_key_ring.my_key_ring.id + filter = "name:foobar" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `key_ring` - (Required) The key ring that the keys belongs to. Format: 'projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}'., + +* `filter` - (Optional) The filter argument is used to add a filter query parameter that limits which keys are retrieved by the data source: ?filter={{filter}}. When no value is provided there is no filtering. + +Example filter values if filtering on name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}/cryptoKeys/{{cryptoKey}}. + +* `"name:my-key-"` will retrieve keys that contain "my-key-" anywhere in their name. +* `"name=projects/my-project/locations/global/keyRings/my-key-ring/cryptoKeys/my-key-1"` will only retrieve a key with that exact name. + +[See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + + + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `keys` - A list of all the retrieved keys from the provided key ring. This list is influenced by the provided filter argument. + +See [google_kms_crypto_key](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/kms_crypto_key) resource for details of the available attributes on each key. + From ce98fbd6729e134475d2445a81c09a9409881fb4 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Fri, 28 Jun 2024 12:29:27 -0700 Subject: [PATCH 249/356] Go rewrite convert accessapproval yaml files (#11082) --- .../accessapproval/go_FolderSettings.yaml | 151 ++++++++++++++++++ .../go_OrganizationSettings.yaml | 131 +++++++++++++++ .../accessapproval/go_ProjectSettings.yaml | 140 ++++++++++++++++ mmv1/products/accessapproval/go_product.yaml | 22 +++ mmv1/templates/terraform/resource.go.tmpl | 10 +- mmv1/templates/terraform/update_mask.go.tmpl | 4 +- 6 files changed, 450 insertions(+), 8 deletions(-) create mode 100644 mmv1/products/accessapproval/go_FolderSettings.yaml create mode 100644 mmv1/products/accessapproval/go_OrganizationSettings.yaml create mode 100644 mmv1/products/accessapproval/go_ProjectSettings.yaml create mode 100644 mmv1/products/accessapproval/go_product.yaml diff --git a/mmv1/products/accessapproval/go_FolderSettings.yaml b/mmv1/products/accessapproval/go_FolderSettings.yaml new file mode 100644 index 000000000000..a466d51e29dd --- /dev/null +++ b/mmv1/products/accessapproval/go_FolderSettings.yaml @@ -0,0 +1,151 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FolderSettings' +legacy_name: 'google_folder_access_approval_settings' +description: | + Access Approval enables you to require your explicit approval whenever Google support and engineering need to access your customer content. +references: + guides: + api: 'https://cloud.google.com/access-approval/docs/reference/rest/v1/folders' +docs: +base_url: 'folders/{{folder_id}}/accessApprovalSettings' +self_link: 'folders/{{folder_id}}/accessApprovalSettings' +create_verb: 'PATCH' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'folders/{{folder_id}}/accessApprovalSettings' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + constants: 'templates/terraform/constants/go/access_approval.go.tmpl' + pre_create: 'templates/terraform/update_mask.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/clear_folder_access_approval_settings.go.tmpl' +examples: + - name: 'folder_access_approval_full' + primary_resource_id: 'folder_access_approval' + vars: + folder_name: 'my-folder' + test_env_vars: + org_id: 'ORG_ID' + skip_test: true + - name: 'folder_access_approval_active_key_version' + primary_resource_id: 'folder_access_approval' + vars: + folder_name: 'my-folder' + test_env_vars: + org_id: 'ORG_ID' + skip_test: true +parameters: + - name: 'folder_id' + type: String + description: | + ID of the folder of the access approval settings. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of the settings. Format is "folders/{folder_id}/accessApprovalSettings" + output: true + - name: 'notificationEmails' + type: Array + description: | + A list of email addresses to which notifications relating to approval requests should be sent. + Notifications relating to a resource will be sent to all emails in the settings of ancestor + resources of that resource. A maximum of 50 email addresses are allowed. + is_set: true + default_from_api: true + item_type: + type: String + max_size: 50 + - name: 'enrolledServices' + type: Array + description: | + A list of Google Cloud Services for which the given resource has Access Approval enrolled. + Access requests for the resource given by name against any of these services contained here will be required + to have explicit approval. Enrollment can only be done on an all or nothing basis. + + A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. + is_set: true + required: true + set_hash_func: accessApprovalEnrolledServicesHash + item_type: + type: NestedObject + properties: + - name: 'cloudProduct' + type: String + description: | + The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): + * all + * App Engine + * BigQuery + * Cloud Bigtable + * Cloud Key Management Service + * Compute Engine + * Cloud Dataflow + * Cloud Identity and Access Management + * Cloud Pub/Sub + * Cloud Storage + * Persistent Disk + + Note: These values are supported as input, but considered a legacy format: + * all + * appengine.googleapis.com + * bigquery.googleapis.com + * bigtable.googleapis.com + * cloudkms.googleapis.com + * compute.googleapis.com + * dataflow.googleapis.com + * iam.googleapis.com + * pubsub.googleapis.com + * storage.googleapis.com + required: true + - name: 'enrollmentLevel' + type: Enum + description: | + The enrollment level of the service. + default_value: "BLOCK_ALL" + enum_values: + - 'BLOCK_ALL' + - name: 'enrolledAncestor' + type: Boolean + description: | + If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Folder. + output: true + - name: 'activeKeyVersion' + type: String + description: | + The asymmetric crypto key version to use for signing approval requests. + Empty active_key_version indicates that a Google-managed key should be used for signing. + This property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set. + - name: 'ancestorHasActiveKeyVersion' + type: Boolean + description: | + If the field is true, that indicates that an ancestor of this Folder has set active_key_version. + output: true + - name: 'invalidKeyVersion' + type: Boolean + description: | + If the field is true, that indicates that there is some configuration issue with the active_key_version + configured on this Folder (e.g. it doesn't exist or the Access Approval service account doesn't have the + correct permissions on it, etc.) This key version is not necessarily the effective key version at this level, + as key versions are inherited top-down. + output: true diff --git a/mmv1/products/accessapproval/go_OrganizationSettings.yaml b/mmv1/products/accessapproval/go_OrganizationSettings.yaml new file mode 100644 index 000000000000..c486995d8b83 --- /dev/null +++ b/mmv1/products/accessapproval/go_OrganizationSettings.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'OrganizationSettings' +legacy_name: 'google_organization_access_approval_settings' +description: | + Access Approval enables you to require your explicit approval whenever Google support and engineering need to access your customer content. +references: + guides: + api: 'https://cloud.google.com/access-approval/docs/reference/rest/v1/organizations' +docs: +base_url: 'organizations/{{organization_id}}/accessApprovalSettings' +self_link: 'organizations/{{organization_id}}/accessApprovalSettings' +create_verb: 'PATCH' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'organizations/{{organization_id}}/accessApprovalSettings' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/update_mask.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/clear_organization_access_approval_settings.go.tmpl' +examples: + - name: 'organization_access_approval_full' + primary_resource_id: 'organization_access_approval' + test_env_vars: + org_id: 'ORG_ID' + skip_test: true + - name: 'organization_access_approval_active_key_version' + primary_resource_id: 'organization_access_approval' + test_env_vars: + org_id: 'ORG_ID' + skip_test: true +parameters: + - name: 'organization_id' + type: String + description: | + ID of the organization of the access approval settings. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of the settings. Format is "organizations/{organization_id}/accessApprovalSettings" + output: true + - name: 'notificationEmails' + type: Array + description: | + A list of email addresses to which notifications relating to approval requests should be sent. + Notifications relating to a resource will be sent to all emails in the settings of ancestor + resources of that resource. A maximum of 50 email addresses are allowed. + is_set: true + default_from_api: true + item_type: + type: String + max_size: 50 + - name: 'enrolledServices' + type: Array + description: | + A list of Google Cloud Services for which the given resource has Access Approval enrolled. + Access requests for the resource given by name against any of these services contained here will be required + to have explicit approval. Enrollment can be done for individual services. + + A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. + is_set: true + required: true + set_hash_func: accessApprovalEnrolledServicesHash + item_type: + type: NestedObject + properties: + - name: 'cloudProduct' + type: String + description: | + The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): + all + appengine.googleapis.com + bigquery.googleapis.com + bigtable.googleapis.com + cloudkms.googleapis.com + compute.googleapis.com + dataflow.googleapis.com + iam.googleapis.com + pubsub.googleapis.com + storage.googleapis.com + required: true + - name: 'enrollmentLevel' + type: Enum + description: | + The enrollment level of the service. + default_value: "BLOCK_ALL" + enum_values: + - 'BLOCK_ALL' + - name: 'enrolledAncestor' + type: Boolean + description: | + This field will always be unset for the organization since organizations do not have ancestors. + output: true + - name: 'activeKeyVersion' + type: String + description: | + The asymmetric crypto key version to use for signing approval requests. + Empty active_key_version indicates that a Google-managed key should be used for signing. + - name: 'ancestorHasActiveKeyVersion' + type: Boolean + description: | + This field will always be unset for the organization since organizations do not have ancestors. + output: true + - name: 'invalidKeyVersion' + type: Boolean + description: | + If the field is true, that indicates that there is some configuration issue with the active_key_version + configured on this Organization (e.g. it doesn't exist or the Access Approval service account doesn't have the + correct permissions on it, etc.). + output: true diff --git a/mmv1/products/accessapproval/go_ProjectSettings.yaml b/mmv1/products/accessapproval/go_ProjectSettings.yaml new file mode 100644 index 000000000000..806609b22c4f --- /dev/null +++ b/mmv1/products/accessapproval/go_ProjectSettings.yaml @@ -0,0 +1,140 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ProjectSettings' +legacy_name: 'google_project_access_approval_settings' +description: | + Access Approval enables you to require your explicit approval whenever Google support and engineering need to access your customer content. +references: + guides: + api: 'https://cloud.google.com/access-approval/docs/reference/rest/v1/projects' +docs: +base_url: 'projects/{{project_id}}/accessApprovalSettings' +self_link: 'projects/{{project_id}}/accessApprovalSettings' +create_verb: 'PATCH' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project_id}}/accessApprovalSettings' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/update_mask.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/clear_project_access_approval_settings.go.tmpl' +examples: + - name: 'project_access_approval_full' + primary_resource_id: 'project_access_approval' + test_env_vars: + project: 'PROJECT_NAME' + org_id: 'ORG_ID' + skip_test: true + - name: 'project_access_approval_active_key_version' + primary_resource_id: 'project_access_approval' + test_env_vars: + project: 'PROJECT_NAME' + org_id: 'ORG_ID' + skip_test: true +parameters: + - name: 'project_id' + type: String + description: | + ID of the project of the access approval settings. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of the settings. Format is "projects/{project_id}/accessApprovalSettings" + output: true + - name: 'notificationEmails' + type: Array + description: | + A list of email addresses to which notifications relating to approval requests should be sent. + Notifications relating to a resource will be sent to all emails in the settings of ancestor + resources of that resource. A maximum of 50 email addresses are allowed. + is_set: true + default_from_api: true + item_type: + type: String + max_size: 50 + - name: 'enrolledServices' + type: Array + description: | + A list of Google Cloud Services for which the given resource has Access Approval enrolled. + Access requests for the resource given by name against any of these services contained here will be required + to have explicit approval. Enrollment can only be done on an all or nothing basis. + + A maximum of 10 enrolled services will be enforced, to be expanded as the set of supported services is expanded. + is_set: true + required: true + set_hash_func: accessApprovalEnrolledServicesHash + item_type: + type: NestedObject + properties: + - name: 'cloudProduct' + type: String + description: | + The product for which Access Approval will be enrolled. Allowed values are listed (case-sensitive): + all + appengine.googleapis.com + bigquery.googleapis.com + bigtable.googleapis.com + cloudkms.googleapis.com + compute.googleapis.com + dataflow.googleapis.com + iam.googleapis.com + pubsub.googleapis.com + storage.googleapis.com + required: true + - name: 'enrollmentLevel' + type: Enum + description: | + The enrollment level of the service. + default_value: "BLOCK_ALL" + enum_values: + - 'BLOCK_ALL' + - name: 'enrolledAncestor' + type: Boolean + description: | + If the field is true, that indicates that at least one service is enrolled for Access Approval in one or more ancestors of the Project. + output: true + - name: 'activeKeyVersion' + type: String + description: | + The asymmetric crypto key version to use for signing approval requests. + Empty active_key_version indicates that a Google-managed key should be used for signing. + This property will be ignored if set by an ancestor of the resource, and new non-empty values may not be set. + - name: 'ancestorHasActiveKeyVersion' + type: Boolean + description: | + If the field is true, that indicates that an ancestor of this Project has set active_key_version. + output: true + - name: 'invalidKeyVersion' + type: Boolean + description: | + If the field is true, that indicates that there is some configuration issue with the active_key_version + configured on this Project (e.g. it doesn't exist or the Access Approval service account doesn't have the + correct permissions on it, etc.) This key version is not necessarily the effective key version at this level, + as key versions are inherited top-down. + output: true + - name: 'project' + type: String + description: | + Project id. + deprecation_message: '`project` is deprecated and will be removed in a future major release. Use `project_id` instead.' diff --git a/mmv1/products/accessapproval/go_product.yaml b/mmv1/products/accessapproval/go_product.yaml new file mode 100644 index 000000000000..d925b1742897 --- /dev/null +++ b/mmv1/products/accessapproval/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AccessApproval' +display_name: 'Access Approval' +versions: + - name: 'ga' + base_url: 'https://accessapproval.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 601ded37661b..6f7c8bca66c3 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -279,7 +279,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ } headers := make(http.Header) -{{- if $.CustomCode.PreCreate }} +{{- if $.CustomCode.PreCreate }} {{ $.CustomTemplate $.CustomCode.PreCreate false -}} {{- end}} res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ @@ -304,10 +304,10 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{- end}} return fmt.Errorf("Error creating {{ $.Name -}}: %s", err) } -{{/* # Set resource properties from create API response (unless it returns an Operation) */}} -{{if and ($.GetAsync) (not ($.GetAsync.IsA "OpAsync")) }} +{{- /* # Set resource properties from create API response (unless it returns an Operation) */}} +{{- if not (and $.GetAsync ($.GetAsync.IsA "OpAsync")) }} {{- range $prop := $.GettableProperties }} -{{ if and ($.IsInIdentity $prop) $prop.Output }} +{{- if and ($.IsInIdentity $prop) $prop.Output }} if err := d.Set("{{ underscore $prop.Name -}}", flatten{{ if $.NestedQuery -}}Nested{{end}}{{ $.ResourceName -}}{{ camelize $prop.Name "upper" -}}(res["{{ $prop.ApiName -}}"], d, config)); err != nil { return fmt.Errorf(`Error setting computed identity field "{{ underscore $prop.Name }}": %s`, err) } @@ -756,7 +756,7 @@ func resource{{ $.ResourceName -}}Update(d *schema.ResourceData, meta interface{ log.Printf("[DEBUG] Updating {{ $.Name }} %q: %#v", d.Id(), obj) headers := make(http.Header) {{- if $.UpdateMask }} -{{ template "UpdateMask" $ -}} +{{ $.CustomTemplate "templates/terraform/update_mask.go.tmpl" false -}} {{ end}} {{- if $.CustomCode.PreUpdate -}}{{""}} {{ $.CustomTemplate $.CustomCode.PreUpdate true -}} diff --git a/mmv1/templates/terraform/update_mask.go.tmpl b/mmv1/templates/terraform/update_mask.go.tmpl index 3f29c3fd10ad..515db8f04da6 100644 --- a/mmv1/templates/terraform/update_mask.go.tmpl +++ b/mmv1/templates/terraform/update_mask.go.tmpl @@ -9,8 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} -{{- define "UpdateMask" -}} +*/ -}} updateMask := []string{} {{- $maskGroups := $.GetPropertyUpdateMasksGroups $.UpdateBodyProperties "" }} {{- range $key := $.GetPropertyUpdateMasksGroupKeys $.UpdateBodyProperties }} @@ -25,4 +24,3 @@ url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": str if err != nil { return err } -{{- end }}{{/* define */}} From 86415d0d14a3c2a62bffdb7c6e8348f0fbf1c977 Mon Sep 17 00:00:00 2001 From: Demetre Pipia Date: Fri, 28 Jun 2024 21:45:00 +0200 Subject: [PATCH 250/356] Autoscaler maxreplicas fix (#11083) --- mmv1/products/compute/Autoscaler.yaml | 1 + mmv1/products/compute/RegionAutoscaler.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/mmv1/products/compute/Autoscaler.yaml b/mmv1/products/compute/Autoscaler.yaml index f62d4f9a1b99..f2db0fd9d46f 100644 --- a/mmv1/products/compute/Autoscaler.yaml +++ b/mmv1/products/compute/Autoscaler.yaml @@ -137,6 +137,7 @@ properties: to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas. + send_empty_value: true required: true - !ruby/object:Api::Type::Integer name: 'cooldownPeriod' diff --git a/mmv1/products/compute/RegionAutoscaler.yaml b/mmv1/products/compute/RegionAutoscaler.yaml index fa9991ee61aa..9156b1970aa1 100644 --- a/mmv1/products/compute/RegionAutoscaler.yaml +++ b/mmv1/products/compute/RegionAutoscaler.yaml @@ -117,6 +117,7 @@ properties: to. This is required when creating or updating an autoscaler. The maximum number of replicas should not be lower than minimal number of replicas. + send_empty_value: true required: true - !ruby/object:Api::Type::Integer name: 'cooldownPeriod' From 6922a985aca13048b385a8b5fc85697519a8c999 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Fri, 28 Jun 2024 14:52:36 -0500 Subject: [PATCH 251/356] go rewrite - remaining Compute and some ACM diffs (#11079) --- mmv1/api/resource.go | 9 ++- mmv1/google/string_utils.go | 1 + mmv1/products/compute/ForwardingRule.yaml | 14 ++-- mmv1/products/compute/go_ForwardingRule.yaml | 18 ++--- mmv1/products/compute/go_HealthCheck.yaml | 25 +++++++ mmv1/products/pubsub/go_Schema.yaml | 2 +- mmv1/products/pubsub/go_Subscription.yaml | 6 +- mmv1/products/pubsub/go_Topic.yaml | 2 +- mmv1/template-converter.go | 1 + ...r_nat_validate_action_active_range.go.tmpl | 2 +- .../custom_flatten/go/default_if_empty.tmpl | 2 +- ...self_link_as_name_set_organization.go.tmpl | 26 ++++++++ ...query_reservation_assignment_basic.tf.tmpl | 13 ++++ ...gquery_reservation_assignment_full.tf.tmpl | 14 ++++ ...st_config_allowlisted_certificates.tf.tmpl | 16 +++++ ...e_health_check_http_source_regions.tf.tmpl | 12 ++++ ..._health_check_https_source_regions.tf.tmpl | 12 ++++ ...te_health_check_tcp_source_regions.tf.tmpl | 12 ++++ .../go/compute_packet_mirroring_full.tf.tmpl | 2 + .../examples/go/compute_reservation.tf.tmpl | 2 + .../go/dataplex_datascan_full_quality.tf.tmpl | 2 +- .../go/dns_managed_zone_basic.tf.tmpl | 2 + .../go/dns_managed_zone_private.tf.tmpl | 2 + ...ns_managed_zone_private_forwarding.tf.tmpl | 2 + .../go/dns_managed_zone_private_gke.tf.tmpl | 2 + .../dns_managed_zone_private_peering.tf.tmpl | 2 + .../go/dns_managed_zone_quickstart.tf.tmpl | 2 + ...dns_managed_zone_service_directory.tf.tmpl | 2 + .../examples/go/dns_policy_basic.tf.tmpl | 2 + .../examples/go/dns_record_set_basic.tf.tmpl | 2 + .../go/dns_response_policy_basic.tf.tmpl | 2 + .../go/dns_response_policy_rule_basic.tf.tmpl | 2 + ...xternal_cdn_lb_with_backend_bucket.tf.tmpl | 20 ++++++ .../go/external_http_lb_mig_backend.tf.tmpl | 18 +++++ ..._http_lb_mig_backend_custom_header.tf.tmpl | 2 + .../external_ssl_proxy_lb_mig_backend.tf.tmpl | 2 + .../external_tcp_proxy_lb_mig_backend.tf.tmpl | 2 + .../examples/go/external_vpn_gateway.tf.tmpl | 2 + .../go/firewall_with_target_tags.tf.tmpl | 2 + .../go/flask_google_cloud_quickstart.tf.tmpl | 8 +++ ..._internal_http_lb_with_mig_backend.tf.tmpl | 2 + .../go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl | 2 + .../go/instance_custom_hostname.tf.tmpl | 6 +- .../go/instance_settings_basic.tf.tmpl | 2 + .../instance_virtual_display_enabled.tf.tmpl | 6 +- .../go/int_https_lb_https_redirect.tf.tmpl | 4 +- .../internal_http_lb_with_mig_backend.tf.tmpl | 2 + ...ternal_tcp_udp_lb_with_mig_backend.tf.tmpl | 4 +- ..._custom_firewall_enforcement_order.tf.tmpl | 2 + .../examples/go/network_custom_mtu.tf.tmpl | 2 + ...gement_connectivity_test_addresses.tf.tmpl | 2 + ...gement_connectivity_test_instances.tf.tmpl | 2 + ..._services_lb_route_extension_basic.tf.tmpl | 4 ++ ...ervices_lb_traffic_extension_basic.tf.tmpl | 3 + ...rivate_service_connect_google_apis.tf.tmpl | 6 ++ .../go/privateca_capool_all_fields.tf.tmpl | 2 + .../go/privateca_capool_basic.tf.tmpl | 2 + ...vateca_certificate_authority_basic.tf.tmpl | 2 + ...teca_certificate_authority_byo_key.tf.tmpl | 2 + ...a_certificate_authority_custom_ski.tf.tmpl | 2 + ..._certificate_authority_subordinate.tf.tmpl | 2 + .../go/privateca_certificate_config.tf.tmpl | 4 +- .../go/privateca_certificate_csr.tf.tmpl | 2 + .../privateca_certificate_custom_ski.tf.tmpl | 2 + ...privateca_certificate_no_authority.tf.tmpl | 2 + ...rivateca_certificate_with_template.tf.tmpl | 2 + .../examples/go/privateca_quickstart.tf.tmpl | 2 + .../go/privateca_template_basic.tf.tmpl | 2 + .../go/region_autoscaler_basic.tf.tmpl | 2 + .../go/region_target_tcp_proxy_basic.tf.tmpl | 4 +- ...region_url_map_path_template_match.tf.tmpl | 2 + ...gional_external_http_load_balancer.tf.tmpl | 28 ++++++++ ...nization_notification_config_basic.tf.tmpl | 15 +++++ ...re_source_manager_repository_basic.tf.tmpl | 10 +++ ..._manager_repository_initial_config.tf.tmpl | 18 +++++ .../examples/go/spot_instance_basic.tf.tmpl | 4 +- .../examples/go/sql_database_basic.tf.tmpl | 2 + .../go/sql_database_deletion_policy.tf.tmpl | 2 + .../go/sql_database_instance_my_sql.tf.tmpl | 4 ++ .../go/sql_database_instance_postgres.tf.tmpl | 4 ++ .../sql_database_instance_sqlserver.tf.tmpl | 4 ++ .../examples/go/sql_instance_cmek.tf.tmpl | 14 ++++ .../examples/go/sql_instance_ha.tf.tmpl | 6 ++ .../go/sql_instance_iam_condition.tf.tmpl | 2 + .../examples/go/sql_instance_labels.tf.tmpl | 6 ++ .../examples/go/sql_instance_pitr.tf.tmpl | 4 ++ .../examples/go/sql_instance_ssl_cert.tf.tmpl | 10 +++ ..._mysql_instance_authorized_network.tf.tmpl | 2 + .../go/sql_mysql_instance_backup.tf.tmpl | 2 + ...sql_mysql_instance_backup_location.tf.tmpl | 2 + ...ql_mysql_instance_backup_retention.tf.tmpl | 2 + .../go/sql_mysql_instance_clone.tf.tmpl | 4 ++ .../go/sql_mysql_instance_flags.tf.tmpl | 2 + .../go/sql_mysql_instance_public_ip.tf.tmpl | 2 + .../go/sql_mysql_instance_pvp.tf.tmpl | 2 + .../go/sql_mysql_instance_replica.tf.tmpl | 4 ++ ...stgres_instance_authorized_network.tf.tmpl | 2 + .../go/sql_postgres_instance_backup.tf.tmpl | 2 + ..._postgres_instance_backup_location.tf.tmpl | 2 + ...postgres_instance_backup_retention.tf.tmpl | 2 + .../go/sql_postgres_instance_clone.tf.tmpl | 4 ++ .../go/sql_postgres_instance_flags.tf.tmpl | 2 + .../sql_postgres_instance_public_ip.tf.tmpl | 2 + .../go/sql_postgres_instance_pvp.tf.tmpl | 2 + .../go/sql_postgres_instance_replica.tf.tmpl | 4 ++ ...server_instance_authorized_network.tf.tmpl | 2 + .../go/sql_sqlserver_instance_backup.tf.tmpl | 2 + ...sqlserver_instance_backup_location.tf.tmpl | 2 + ...qlserver_instance_backup_retention.tf.tmpl | 2 + .../go/sql_sqlserver_instance_clone.tf.tmpl | 4 ++ .../go/sql_sqlserver_instance_flags.tf.tmpl | 2 + .../sql_sqlserver_instance_public_ip.tf.tmpl | 2 + .../go/sql_sqlserver_instance_replica.tf.tmpl | 4 ++ .../go/sql_sqlserver_vm_instance.tf.tmpl | 4 ++ .../examples/go/storage_hmac_key.tf.tmpl | 4 +- .../go/storage_make_data_public.tf.tmpl | 2 + .../examples/go/storage_new_bucket.tf.tmpl | 8 +++ .../storage_object_lifecycle_setting.tf.tmpl | 2 + .../go/storage_pubsub_notifications.tf.tmpl | 2 + .../go/storage_static_website.tf.tmpl | 6 ++ .../go/target_grpc_proxy_basic.tf.tmpl | 2 + .../go/target_http_proxy_basic.tf.tmpl | 2 + ...http_proxy_http_keep_alive_timeout.tf.tmpl | 2 + .../target_http_proxy_https_redirect.tf.tmpl | 2 + .../go/target_https_proxy_basic.tf.tmpl | 2 + ...ttps_proxy_http_keep_alive_timeout.tf.tmpl | 2 + .../go/target_https_proxy_mtls.tf.tmpl | 2 + .../go/target_ssl_proxy_basic.tf.tmpl | 2 + .../go/target_tcp_proxy_basic.tf.tmpl | 2 + .../go/url_map_bucket_and_service.tf.tmpl | 2 + .../go/url_map_header_based_routing.tf.tmpl | 2 + .../url_map_parameter_based_routing.tf.tmpl | 2 + .../go/url_map_path_template_match.tf.tmpl | 2 + .../go/url_map_traffic_director_path.tf.tmpl | 2 + ..._map_traffic_director_path_partial.tf.tmpl | 2 + .../go/url_map_traffic_director_route.tf.tmpl | 2 + ...map_traffic_director_route_partial.tf.tmpl | 2 + .../go/workstation_config_basic.tf.tmpl | 21 ++++++ mmv1/templates/terraform/nested_query.go.tmpl | 29 ++------- .../bigquery_reservation_assignment.go.tmpl | 20 ++++++ mmv1/templates/terraform/resource.go.tmpl | 16 +++-- .../unordered_list_customize_diff.go.tmpl | 2 +- mmv1/templates/terraform/yaml_conversion.erb | 2 +- ...resource_compute_health_check_test.go.tmpl | 65 +++++++++++++++++++ 144 files changed, 705 insertions(+), 65 deletions(-) create mode 100644 mmv1/templates/terraform/custom_import/go/scc_v2_source_self_link_as_name_set_organization.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_full.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/certificate_manager_trust_config_allowlisted_certificates.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_health_check_http_source_regions.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_health_check_https_source_regions.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/compute_health_check_tcp_source_regions.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/scc_v2_organization_notification_config_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/secure_source_manager_repository_basic.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/secure_source_manager_repository_initial_config.tf.tmpl create mode 100644 mmv1/templates/terraform/pre_create/go/bigquery_reservation_assignment.go.tmpl diff --git a/mmv1/api/resource.go b/mmv1/api/resource.go index 65dc21673cea..c0fb56b4be85 100644 --- a/mmv1/api/resource.go +++ b/mmv1/api/resource.go @@ -1495,7 +1495,14 @@ func (r Resource) PropertiesByCustomUpdateGroups() []UpdateGroup { } updateGroups = append(updateGroups, groupedProperty) } - sort.Slice(updateGroups, func(i, j int) bool { return updateGroups[i].UpdateId < updateGroups[i].UpdateId }) + sort.Slice(updateGroups, func(i, j int) bool { + a := updateGroups[i] + b := updateGroups[j] + if a.UpdateVerb != b.UpdateVerb { + return a.UpdateVerb > b.UpdateVerb + } + return a.UpdateId < b.UpdateId + }) return updateGroups } diff --git a/mmv1/google/string_utils.go b/mmv1/google/string_utils.go index daeaf56baaf6..63d8fce9c683 100644 --- a/mmv1/google/string_utils.go +++ b/mmv1/google/string_utils.go @@ -158,6 +158,7 @@ func Format2Regex(format string) string { // TODO: the trims may not be needed with more effecient regex word := strings.TrimPrefix(match, "{{") word = strings.TrimSuffix(word, "}}") + word = strings.ReplaceAll(word, "%", "") return fmt.Sprintf("(?P<%s>.+)", word) }) re = regexp.MustCompile(`\{\{([[:word:]]+)\}\}`) diff --git a/mmv1/products/compute/ForwardingRule.yaml b/mmv1/products/compute/ForwardingRule.yaml index b28d4ad67374..e3877f30bdb7 100644 --- a/mmv1/products/compute/ForwardingRule.yaml +++ b/mmv1/products/compute/ForwardingRule.yaml @@ -497,6 +497,13 @@ properties: update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setTarget' diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' custom_expand: 'templates/terraform/custom_expand/self_link_from_name.erb' + - !ruby/object:Api::Type::Fingerprint + name: 'labelFingerprint' + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' + update_verb: :POST - !ruby/object:Api::Type::Boolean name: 'allowGlobalAccess' description: | @@ -518,13 +525,6 @@ properties: Labels to apply to this forwarding rule. A list of key->value pairs. update_verb: :POST update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' - - !ruby/object:Api::Type::Fingerprint - name: 'labelFingerprint' - description: | - The fingerprint used for optimistic locking of this resource. Used - internally during updates. - update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' - update_verb: :POST - !ruby/object:Api::Type::Boolean name: 'allPorts' description: | diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index 2944456aca31..417db5eed9f2 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -479,6 +479,15 @@ properties: update_verb: 'POST' diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' custom_expand: 'templates/terraform/custom_expand/go/self_link_from_name.tmpl' + - name: 'labelFingerprint' + type: Fingerprint + description: | + The fingerprint used for optimistic locking of this resource. Used + internally during updates. + output: true + update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' + update_verb: 'POST' + key_expander: '' - name: 'allowGlobalAccess' type: Boolean description: | @@ -501,15 +510,6 @@ properties: immutable: false update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' update_verb: 'POST' - - name: 'labelFingerprint' - type: Fingerprint - description: | - The fingerprint used for optimistic locking of this resource. Used - internally during updates. - output: true - update_url: 'projects/{{project}}/regions/{{region}}/forwardingRules/{{name}}/setLabels' - update_verb: 'POST' - key_expander: '' - name: 'allPorts' type: Boolean description: | diff --git a/mmv1/products/compute/go_HealthCheck.yaml b/mmv1/products/compute/go_HealthCheck.yaml index 20c697a78af4..053c86241be7 100644 --- a/mmv1/products/compute/go_HealthCheck.yaml +++ b/mmv1/products/compute/go_HealthCheck.yaml @@ -158,6 +158,31 @@ properties: The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec. default_value: 5 + - name: 'sourceRegions' + type: Array + description: | + The list of cloud regions from which health checks are performed. If + any regions are specified, then exactly 3 regions should be specified. + The region names must be valid names of Google Cloud regions. This can + only be set for global health check. If this list is non-empty, then + there are restrictions on what other health check fields are supported + and what other resources can use this health check: + + * SSL, HTTP2, and GRPC protocols are not supported. + + * The TCP request field is not supported. + + * The proxyHeader field for HTTP, HTTPS, and TCP is not supported. + + * The checkIntervalSec field must be at least 30. + + * The health check cannot be used with BackendService nor with managed + instance group auto-healing. + min_version: 'beta' + item_type: + type: String + min_size: 3 + max_size: 3 - name: 'unhealthyThreshold' type: Integer description: | diff --git a/mmv1/products/pubsub/go_Schema.yaml b/mmv1/products/pubsub/go_Schema.yaml index 46ee0533179b..c16b3ab2f43b 100644 --- a/mmv1/products/pubsub/go_Schema.yaml +++ b/mmv1/products/pubsub/go_Schema.yaml @@ -70,7 +70,7 @@ properties: - name: 'type' type: Enum description: The type of the schema definition - default_value: TYPE_UNSPECIFIED + default_value: "TYPE_UNSPECIFIED" enum_values: - 'TYPE_UNSPECIFIED' - 'PROTOCOL_BUFFER' diff --git a/mmv1/products/pubsub/go_Subscription.yaml b/mmv1/products/pubsub/go_Subscription.yaml index 9ec82cb69821..8665583d28f8 100644 --- a/mmv1/products/pubsub/go_Subscription.yaml +++ b/mmv1/products/pubsub/go_Subscription.yaml @@ -141,7 +141,7 @@ properties: - name: 'table' type: String description: | - The name of the table to which to write data, of the form {projectId}:{datasetId}.{tableId} + The name of the table to which to write data, of the form {projectId}.{datasetId}.{tableId} required: true - name: 'useTopicSchema' type: Boolean @@ -207,7 +207,7 @@ properties: The maximum duration that can elapse before a new Cloud Storage file is created. Min 1 minute, max 10 minutes, default 5 minutes. May not exceed the subscription's acknowledgement deadline. A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". - default_value: 300s + default_value: "300s" - name: 'maxBytes' type: Integer description: | @@ -353,7 +353,7 @@ properties: A duration in seconds with up to nine fractional digits, terminated by 's'. Example: `"600.5s"`. - default_value: 604800s + default_value: "604800s" - name: 'retainAckedMessages' type: Boolean description: | diff --git a/mmv1/products/pubsub/go_Topic.yaml b/mmv1/products/pubsub/go_Topic.yaml index 32c9ce54bce2..807462055e78 100644 --- a/mmv1/products/pubsub/go_Topic.yaml +++ b/mmv1/products/pubsub/go_Topic.yaml @@ -143,7 +143,7 @@ properties: - name: 'encoding' type: Enum description: The encoding of messages validated against schema. - default_value: ENCODING_UNSPECIFIED + default_value: "ENCODING_UNSPECIFIED" enum_values: - 'ENCODING_UNSPECIFIED' - 'JSON' diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index 16d06276cc19..b6809faad156 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -689,6 +689,7 @@ func checkExceptionList(filePath string) bool { "custom_flatten/bigquery_table_ref_copy_destinationtable.go", "custom_flatten/bigquery_table_ref_extract_sourcetable.go", "custom_flatten/bigquery_table_ref_query_destinationtable.go", + "constants/router_nat_validate_action_active_range.go", "unordered_list_customize_diff", "default_if_empty", diff --git a/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl b/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl index 0844522c0620..b1dbba3c8a53 100644 --- a/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl +++ b/mmv1/templates/terraform/constants/go/router_nat_validate_action_active_range.go.tmpl @@ -1,4 +1,4 @@ -{{- if ne $.TargetVersionName "ga" }} +{{- if ne $.TargetVersionName "ga" -}} natType := d.Get("type").(string) if natType == "PRIVATE" { rules := d.Get("rules").(*schema.Set) diff --git a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl index d381d195cfe9..2c3cacc96b6a 100644 --- a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl @@ -14,7 +14,7 @@ func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.Reso if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return {{$.GoLiteral $.DefaultValue}} } -{{ if $.IsA "Integer" }} +{{ if $.IsA "Integer" -}} // Handles the string fixed64 format if strVal, ok := v.(string); ok { if intVal, err := tpgresource.StringToFixed64(strVal); err == nil { diff --git a/mmv1/templates/terraform/custom_import/go/scc_v2_source_self_link_as_name_set_organization.go.tmpl b/mmv1/templates/terraform/custom_import/go/scc_v2_source_self_link_as_name_set_organization.go.tmpl new file mode 100644 index 000000000000..71dee0f9828a --- /dev/null +++ b/mmv1/templates/terraform/custom_import/go/scc_v2_source_self_link_as_name_set_organization.go.tmpl @@ -0,0 +1,26 @@ +config := meta.(*transport_tpg.Config) + +// current import_formats can't import fields with forward slashes in their value +if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { + return nil, err +} + +stringParts := strings.Split(d.Get("name").(string), "/") +if len(stringParts) != 6 { + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{"{{"}}organization{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/notificationConfigs/{{"{{"}}config_id{{"}}"}}", + ) +} + +if err := d.Set("organization", stringParts[1]); err != nil { + return nil, fmt.Errorf("Error setting organization: %s", err) +} +if err := d.Set("location", stringParts[3]); err != nil { + return nil, fmt.Errorf("Error setting location: %s", err) +} +if err := d.Set("config_id", stringParts[5]); err != nil { + return nil, fmt.Errorf("Error setting config_id: %s", err) +} +return []*schema.ResourceData{d}, nil diff --git a/mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_basic.tf.tmpl new file mode 100644 index 000000000000..5961e131f26f --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_basic.tf.tmpl @@ -0,0 +1,13 @@ +resource "google_bigquery_reservation" "basic" { + name = "{{index $.Vars "reservation_name"}}" + project = "{{index $.TestEnvVars "project"}}" + location = "us-central1" + slot_capacity = 0 + ignore_idle_slots = false +} + +resource "google_bigquery_reservation_assignment" "{{$.PrimaryResourceId}}" { + assignee = "projects/{{index $.TestEnvVars "project"}}" + job_type = "PIPELINE" + reservation = google_bigquery_reservation.basic.id +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_full.tf.tmpl b/mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_full.tf.tmpl new file mode 100644 index 000000000000..c13722a098b4 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/bigquery_reservation_assignment_full.tf.tmpl @@ -0,0 +1,14 @@ +resource "google_bigquery_reservation" "basic" { + name = "{{index $.Vars "reservation_name"}}" + project = "{{index $.TestEnvVars "project"}}" + location = "us-central1" + slot_capacity = 0 + ignore_idle_slots = false +} + +resource "google_bigquery_reservation_assignment" "{{$.PrimaryResourceId}}" { + assignee = "projects/{{index $.TestEnvVars "project"}}" + job_type = "QUERY" + location = "us-central1" + reservation = google_bigquery_reservation.basic.id +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/certificate_manager_trust_config_allowlisted_certificates.tf.tmpl b/mmv1/templates/terraform/examples/go/certificate_manager_trust_config_allowlisted_certificates.tf.tmpl new file mode 100644 index 000000000000..90dff10f6d3f --- /dev/null +++ b/mmv1/templates/terraform/examples/go/certificate_manager_trust_config_allowlisted_certificates.tf.tmpl @@ -0,0 +1,16 @@ +resource "google_certificate_manager_trust_config" "{{$.PrimaryResourceId}}" { + name = "{{index $.Vars "trust_config_name"}}" + description = "A sample trust config resource with allowlisted certificates" + location = "global" + + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert.pem") + } + allowlisted_certificates { + pem_certificate = file("test-fixtures/cert2.pem") + } + + labels = { + foo = "bar" + } +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/compute_health_check_http_source_regions.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_health_check_http_source_regions.tf.tmpl new file mode 100644 index 000000000000..14480abe2365 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_health_check_http_source_regions.tf.tmpl @@ -0,0 +1,12 @@ +resource "google_compute_health_check" "{{$.PrimaryResourceId}}" { + provider = "google-beta" + name = "{{index $.Vars "health_check_name"}}" + check_interval_sec = 30 + + http_health_check { + port = 80 + port_specification = "USE_FIXED_PORT" + } + + source_regions = ["us-west1", "us-central1", "us-east5"] +} diff --git a/mmv1/templates/terraform/examples/go/compute_health_check_https_source_regions.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_health_check_https_source_regions.tf.tmpl new file mode 100644 index 000000000000..d274bf06b9cb --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_health_check_https_source_regions.tf.tmpl @@ -0,0 +1,12 @@ +resource "google_compute_health_check" "{{$.PrimaryResourceId}}" { + provider = "google-beta" + name = "{{index $.Vars "health_check_name"}}" + check_interval_sec = 30 + + https_health_check { + port = 80 + port_specification = "USE_FIXED_PORT" + } + + source_regions = ["us-west1", "us-central1", "us-east5"] +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/compute_health_check_tcp_source_regions.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_health_check_tcp_source_regions.tf.tmpl new file mode 100644 index 000000000000..4bd4b7bd4465 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/compute_health_check_tcp_source_regions.tf.tmpl @@ -0,0 +1,12 @@ +resource "google_compute_health_check" "{{$.PrimaryResourceId}}" { + provider = "google-beta" + name = "{{index $.Vars "health_check_name"}}" + check_interval_sec = 30 + + tcp_health_check { + port = 80 + port_specification = "USE_FIXED_PORT" + } + + source_regions = ["us-west1", "us-central1", "us-east5"] +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl index 4a82a492c4cd..5cabd103b4cd 100644 --- a/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl @@ -54,6 +54,7 @@ resource "google_compute_forwarding_rule" "default" { network_tier = "PREMIUM" } +# [START compute_vm_packet_mirror] resource "google_compute_packet_mirroring" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mirroring_name"}}" description = "bar" @@ -75,3 +76,4 @@ resource "google_compute_packet_mirroring" "{{$.PrimaryResourceId}}" { direction = "BOTH" } } +# [END compute_vm_packet_mirror] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl index c98518869326..802269430c8c 100644 --- a/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl @@ -1,3 +1,4 @@ +# [START compute_reservation_create_local_reservation] resource "google_compute_reservation" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "gce_reservation_local"}}" @@ -16,3 +17,4 @@ resource "google_compute_reservation" "{{$.PrimaryResourceId}}" { } } +# [END compute_reservation_create_local_reservation] diff --git a/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl b/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl index 2a1a2f423ec5..5fa08827f4bd 100644 --- a/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dataplex_datascan_full_quality.tf.tmpl @@ -94,7 +94,7 @@ resource "google_dataplex_datascan" "{{$.PrimaryResourceId}}" { sql_expression = "COUNT(*) > 0" } } - + rules { dimension = "VALIDITY" sql_assertion { diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl index 59fd73af0a75..7f7a458ed13a 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_managed_zone_basic] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "example-zone" dns_name = "example-${random_id.rnd.hex}.com." @@ -10,3 +11,4 @@ resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { resource "random_id" "rnd" { byte_length = 4 } +# [END dns_managed_zone_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl index 2f48709d0e95..5c3a1784d3aa 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_managed_zone_private] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -27,3 +28,4 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } +# [END dns_managed_zone_private] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl index 0194bfb8f74f..a8667c1d827d 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_managed_zone_private_forwarding] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -36,3 +37,4 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } +# [END dns_managed_zone_private_forwarding] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl index 6a4a3cc8d941..bb062e78c832 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_managed_zone_private_gke] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -66,3 +67,4 @@ resource "google_container_cluster" "cluster-1" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END dns_managed_zone_private_gke] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl index 891a4512eadd..f8515d19c5b8 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_managed_zone_private_peering] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "peering.example.com." @@ -27,3 +28,4 @@ resource "google_compute_network" "network-target" { name = "{{index $.Vars "network_target_name"}}" auto_create_subnetworks = false } +# [END dns_managed_zone_private_peering] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl index 580fe7096c3b..6d0a5f6bebc2 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_domain_tutorial] # to setup a web-server resource "google_compute_instance" "default" { name = "{{index $.Vars "dns_compute_instance"}}" @@ -52,3 +53,4 @@ resource "google_dns_record_set" "default" { google_compute_instance.default.network_interface.0.access_config.0.nat_ip ] } +# [END dns_domain_tutorial] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl index 916242554cd1..5fc030a212d7 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_managed_zone_service_directory] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { provider = google-beta @@ -27,3 +28,4 @@ resource "google_compute_network" "network" { name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = false } +# [END dns_managed_zone_service_directory] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl index 86ceb05b04d9..7129c4992bfe 100644 --- a/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_policy_basic] resource "google_dns_policy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "policy_name"}}" enable_inbound_forwarding = true @@ -31,3 +32,4 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } +# [END dns_policy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl index 4ac1dc34847f..57e48b8efff9 100644 --- a/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_record_set_basic] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sample_zone"}}" dns_name = "{{index $.Vars "sample_zone"}}.hashicorptest.com." @@ -11,3 +12,4 @@ resource "google_dns_record_set" "default" { rrdatas = ["10.0.0.1", "10.1.0.1"] ttl = 86400 } +# [END dns_record_set_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl index ab8da5ec4b1c..ac7e4c75ef2f 100644 --- a/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_response_policy_basic] resource "google_compute_network" "network-1" { name = "{{index $.Vars "network_1_name"}}" auto_create_subnetworks = false @@ -68,3 +69,4 @@ resource "google_dns_response_policy" "{{$.PrimaryResourceId}}" { gke_cluster_name = google_container_cluster.cluster-1.id } } +# [END dns_response_policy_basic] diff --git a/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl index 7e8e38c88372..c187913e052c 100644 --- a/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START dns_response_policy_rule_basic] resource "google_compute_network" "network-1" { name = "{{index $.Vars "network_1_name"}}" auto_create_subnetworks = false @@ -34,3 +35,4 @@ resource "google_dns_response_policy_rule" "{{$.PrimaryResourceId}}" { } } +# [END dns_response_policy_rule_basic] diff --git a/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl b/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl index d0abe1648d10..1e8e402edba8 100644 --- a/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl @@ -1,5 +1,6 @@ # CDN load balancer with Cloud bucket as backend +# [START cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] # Cloud Storage bucket resource "google_storage_bucket" "default" { name = "{{index $.Vars "my_bucket"}}" @@ -15,14 +16,18 @@ resource "google_storage_bucket" "default" { } } +# [END cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] +# [START cloudloadbalancing_cdn_with_backend_bucket_make_public] # make bucket public resource "google_storage_bucket_iam_member" "default" { bucket = google_storage_bucket.default.name role = "roles/storage.objectViewer" member = "allUsers" } +# [END cloudloadbalancing_cdn_with_backend_bucket_make_public] +# [START cloudloadbalancing_cdn_with_backend_bucket_index_page] resource "google_storage_bucket_object" "index_page" { name = "{{index $.Vars "index_page"}}" bucket = google_storage_bucket.default.name @@ -32,7 +37,9 @@ resource "google_storage_bucket_object" "index_page" { EOT } +# [END cloudloadbalancing_cdn_with_backend_bucket_index_page] +# [START cloudloadbalancing_cdn_with_backend_bucket_error_page] resource "google_storage_bucket_object" "error_page" { name = "{{index $.Vars "404_page"}}" bucket = google_storage_bucket.default.name @@ -42,7 +49,9 @@ resource "google_storage_bucket_object" "error_page" { EOT } +# [END cloudloadbalancing_cdn_with_backend_bucket_error_page] +# [START cloudloadbalancing_cdn_with_backend_bucket_image] # image object for testing, try to access http:///test.jpg resource "google_storage_bucket_object" "test_image" { name = "{{index $.Vars "test_object"}}" @@ -56,12 +65,16 @@ resource "google_storage_bucket_object" "test_image" { bucket = google_storage_bucket.default.name } +# [END cloudloadbalancing_cdn_with_backend_bucket_image] +# [START cloudloadbalancing_cdn_with_backend_bucket_ip_address] # reserve IP address resource "google_compute_global_address" "default" { name = "{{index $.Vars "example_ip"}}" } +# [END cloudloadbalancing_cdn_with_backend_bucket_ip_address] +# [START cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] # forwarding rule resource "google_compute_global_forwarding_rule" "default" { name = "{{index $.Vars "http_lb_forwarding_rule"}}" @@ -71,19 +84,25 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } +# [END cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] +# [START cloudloadbalancing_cdn_with_backend_bucket_http_proxy] # http proxy resource "google_compute_target_http_proxy" "default" { name = "{{index $.Vars "http_lb_proxy"}}" url_map = google_compute_url_map.default.id } +# [END cloudloadbalancing_cdn_with_backend_bucket_http_proxy] +# [START cloudloadbalancing_cdn_with_backend_bucket_url_map] # url map resource "google_compute_url_map" "default" { name = "{{index $.Vars "http_lb"}}" default_service = google_compute_backend_bucket.default.id } +# [END cloudloadbalancing_cdn_with_backend_bucket_url_map] +# [START cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] # backend bucket with CDN policy with default ttl settings resource "google_compute_backend_bucket" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "cat_backend_bucket"}}" @@ -99,3 +118,4 @@ resource "google_compute_backend_bucket" "{{$.PrimaryResourceId}}" { serve_while_stale = 86400 } } +# [END cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl index 3e0de516a7e5..3e3c2ea58296 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl @@ -1,5 +1,6 @@ # External HTTP load balancer with an CDN-enabled managed instance group backend +# [START cloudloadbalancing_ext_http_gce_instance_template] resource "google_compute_instance_template" "default" { name = "{{index $.Vars "lb_backend_template"}}" disk { @@ -36,7 +37,9 @@ resource "google_compute_instance_template" "default" { } tags = ["allow-health-check"] } +# [END cloudloadbalancing_ext_http_gce_instance_template] +# [START cloudloadbalancing_ext_http_gce_instance_mig] resource "google_compute_instance_group_manager" "default" { name = "{{index $.Vars "lb_backend_example"}}" zone = "us-east1-b" @@ -51,8 +54,10 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } +# [END cloudloadbalancing_ext_http_gce_instance_mig] +# [START cloudloadbalancing_ext_http_gce_instance_firewall_rule] resource "google_compute_firewall" "default" { name = "{{index $.Vars "fw_allow_health_check"}}" direction = "INGRESS" @@ -65,12 +70,16 @@ resource "google_compute_firewall" "default" { protocol = "tcp" } } +# [END cloudloadbalancing_ext_http_gce_instance_firewall_rule] +# [START cloudloadbalancing_ext_http_gce_instance_ip_address] resource "google_compute_global_address" "default" { name = "{{index $.Vars "lb_ipv4_1"}}" ip_version = "IPV4" } +# [END cloudloadbalancing_ext_http_gce_instance_ip_address] +# [START cloudloadbalancing_ext_http_gce_instance_health_check] resource "google_compute_health_check" "default" { name = "{{index $.Vars "http_basic_check"}}" check_interval_sec = 5 @@ -84,7 +93,9 @@ resource "google_compute_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } +# [END cloudloadbalancing_ext_http_gce_instance_health_check] +# [START cloudloadbalancing_ext_http_gce_instance_backend_service] resource "google_compute_backend_service" "default" { name = "{{index $.Vars "web_backend_service"}}" connection_draining_timeout_sec = 0 @@ -100,17 +111,23 @@ resource "google_compute_backend_service" "default" { capacity_scaler = 1.0 } } +# [END cloudloadbalancing_ext_http_gce_instance_backend_service] +# [START cloudloadbalancing_ext_http_gce_instance_url_map] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "web_map_http"}}" default_service = google_compute_backend_service.default.id } +# [END cloudloadbalancing_ext_http_gce_instance_url_map] +# [START cloudloadbalancing_ext_http_gce_instance_target_http_proxy] resource "google_compute_target_http_proxy" "default" { name = "{{index $.Vars "http_lb_proxy"}}" url_map = google_compute_url_map.default.id } +# [END cloudloadbalancing_ext_http_gce_instance_target_http_proxy] +# [START cloudloadbalancing_ext_http_gce_instance_forwarding_rule] resource "google_compute_global_forwarding_rule" "default" { name = "{{index $.Vars "http_content_rule"}}" ip_protocol = "TCP" @@ -119,3 +136,4 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } +# [END cloudloadbalancing_ext_http_gce_instance_forwarding_rule] diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl index 3929838d30e8..1f81adddbce2 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl @@ -1,6 +1,7 @@ # External HTTP load balancer with a CDN-enabled managed instance group backend # and custom request and response headers +# [START cloudloadbalancing_ext_http_gce_custom_header] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "xlb_network_name"}}" @@ -153,3 +154,4 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } +# [END cloudloadbalancing_ext_http_gce_custom_header] diff --git a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl index b4e551c989ca..19cd2b27e99a 100644 --- a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl @@ -1,5 +1,6 @@ # External SSL proxy load balancer with managed instance group backend +# [START cloudloadbalancing_ext_ssl_proxy_lb] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "ssl_proxy_xlb_network"}}" @@ -183,4 +184,5 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } +# [END cloudloadbalancing_ext_ssl_proxy_lb] diff --git a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl index dde8ee80f76b..08408558c900 100644 --- a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl @@ -1,5 +1,6 @@ # External TCP proxy load balancer with managed instance group backend +# [START cloudloadbalancing_ext_tcp_proxy_lb] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "tcp_proxy_xlb_network"}}" @@ -141,3 +142,4 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } +# [END cloudloadbalancing_ext_tcp_proxy_lb] diff --git a/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl b/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl index 9eeb7bf49af5..0eae785631a7 100644 --- a/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudvpn_ha_external] resource "google_compute_ha_vpn_gateway" "ha_gateway" { region = "us-central1" name = "{{index $.Vars "ha_vpn_gateway_name"}}" @@ -99,3 +100,4 @@ resource "google_compute_router_peer" "router1_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router1_interface2.name } +# [END cloudvpn_ha_external] diff --git a/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl b/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl index 44b17b9699ed..748888614305 100644 --- a/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl @@ -1,3 +1,4 @@ +# [START vpc_firewall_create] resource "google_compute_firewall" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "firewall_name"}}" @@ -12,3 +13,4 @@ resource "google_compute_firewall" "{{$.PrimaryResourceId}}" { source_tags = ["foo"] target_tags = ["web"] } +# [END vpc_firewall_create] diff --git a/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl index fc23157f4b37..7419704f987f 100644 --- a/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl @@ -1,3 +1,4 @@ +# [START compute_flask_quickstart_vm] # Create a single Compute Engine instance resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "flask_vm"}}" @@ -25,7 +26,9 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } } +# [END compute_flask_quickstart_vm] +# [START vpc_flask_quickstart_ssh_fw] resource "google_compute_firewall" "ssh" { name = "{{index $.Vars "allow_ssh"}}" allow { @@ -38,8 +41,10 @@ resource "google_compute_firewall" "ssh" { source_ranges = ["0.0.0.0/0"] target_tags = ["ssh"] } +# [END vpc_flask_quickstart_ssh_fw] +# [START vpc_flask_quickstart_5000_fw] resource "google_compute_firewall" "flask" { name = "{{index $.Vars "flask_app_firewall"}}" network = "default" @@ -50,10 +55,12 @@ resource "google_compute_firewall" "flask" { } source_ranges = ["0.0.0.0/0"] } +# [END vpc_flask_quickstart_5000_fw] # Create new multi-region storage bucket in the US # with versioning enabled +# [START storage_bucket_tf_with_versioning] resource "google_storage_bucket" "default" { name = "{{index $.Vars "bucket_tfstate"}}" force_destroy = false @@ -63,3 +70,4 @@ resource "google_storage_bucket" "default" { enabled = true } } +# [END storage_bucket_tf_with_versioning] diff --git a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl index 5a11a850cf9b..e02dd389ad34 100644 --- a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl @@ -1,5 +1,6 @@ # Global Internal HTTP load balancer with a managed instance group backend +# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "gilb_network" { name = "{{index $.Vars "gilb_network_name"}}" @@ -181,3 +182,4 @@ resource "google_compute_instance" "vm-test" { } } } +# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl b/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl index b048fc49571a..cc7826b64ce9 100644 --- a/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudvpn_ha_gcp_to_gcp] resource "google_compute_ha_vpn_gateway" "{{$.PrimaryResourceId}}" { region = "us-central1" name = "{{index $.Vars "ha_vpn_gateway1_name"}}" @@ -177,3 +178,4 @@ resource "google_compute_router_peer" "router2_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router2_interface2.name } +# [END cloudvpn_ha_gcp_to_gcp] diff --git a/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl index 297ed227261c..b67c592a6cb0 100644 --- a/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl @@ -1,12 +1,13 @@ +# [START compute_custom_hostname_instance_create] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "custom_hostname_instance_name"}}" machine_type = "f1-micro" zone = "us-central1-c" - # Set a custom hostname below + # Set a custom hostname below hostname = "hashicorptest.com" - + boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -20,3 +21,4 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } +# [END compute_custom_hostname_instance_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl index 3ef1792a394a..a05e7c8ab91d 100644 --- a/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START instance_settings_basic] resource "google_compute_instance_settings" "{{$.PrimaryResourceId}}" { zone = "us-east7-b" @@ -8,3 +9,4 @@ resource "google_compute_instance_settings" "{{$.PrimaryResourceId}}" { } } +# [END instance_settings_basic] diff --git a/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl index 49560ea75cda..a1ea2d5cbccb 100644 --- a/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl @@ -1,12 +1,13 @@ +# [START compute_instance_virtual_display_enabled] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "instance_virtual_display"}}" machine_type = "f1-micro" zone = "us-central1-c" - + # Set the below to true to enable virtual display enable_display = true - + boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -20,3 +21,4 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } +# [END compute_instance_virtual_display_enabled] diff --git a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl index 4e721cf60f77..325ae36b4a6e 100644 --- a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl @@ -1,5 +1,6 @@ # Internal HTTPS load balancer with HTTP-to-HTTPS redirect +# [START cloudloadbalancing_int_https_with_redirect] # VPC network resource "google_compute_network" "default" { @@ -87,7 +88,7 @@ resource "google_compute_region_ssl_certificate" "default" { name_prefix = "my-certificate-" private_key = tls_private_key.default.private_key_pem certificate = tls_self_signed_cert.default.cert_pem - region = "europe-west1" + region = "europe-west1" lifecycle { create_before_destroy = true } @@ -282,3 +283,4 @@ resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { } } } +# [END cloudloadbalancing_int_https_with_redirect] diff --git a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl index d46b31a22a11..17b0e5512661 100644 --- a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl @@ -1,5 +1,6 @@ # Internal HTTP load balancer with a managed instance group backend +# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -187,3 +188,4 @@ resource "google_compute_instance" "vm-test" { } } } +# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl index 9d028444c3f9..b6fb1079cf3b 100644 --- a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl @@ -1,5 +1,6 @@ # Internal TCP/UDP load balancer with a managed instance group backend +# [START cloudloadbalancing_int_tcp_udp_gce] # VPC resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -128,7 +129,7 @@ resource "google_compute_firewall" "fw_hc" { target_tags = ["allow-health-check"] } -# allow communication within the subnet +# allow communication within the subnet resource "google_compute_firewall" "fw_ilb_to_backends" { name = "{{index $.Vars "fw_allow_ilb_to_backends_name"}}" provider = google-beta @@ -176,3 +177,4 @@ resource "google_compute_instance" "vm_test" { } } } +# [END cloudloadbalancing_int_tcp_udp_gce] diff --git a/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl b/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl index d621d2a084bf..622d27933c18 100644 --- a/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl @@ -1,6 +1,8 @@ +# [START vpc_auto_create] resource "google_compute_network" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = true network_firewall_policy_enforcement_order = "BEFORE_CLASSIC_FIREWALL" } +# [END vpc_auto_create] diff --git a/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl b/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl index 0a075376d536..3e3dc85f6d18 100644 --- a/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl @@ -1,6 +1,8 @@ +# [START vpc_auto_create] resource "google_compute_network" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = true mtu = 1460 } +# [END vpc_auto_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl index aa9a5ce8ef3c..17f1a11e3825 100644 --- a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl @@ -1,3 +1,4 @@ +# [START networkmanagement_test_addresses] resource "google_network_management_connectivity_test" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "primary_resource_name"}}" source { @@ -42,3 +43,4 @@ resource "google_compute_address" "dest-addr" { address = "10.0.43.43" region = "us-central1" } +# [END networkmanagement_test_addresses] diff --git a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl index 1ab2b32cea92..682cbbd3ff4e 100644 --- a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl @@ -1,3 +1,4 @@ +# [START networkmanagement_test_instances] resource "google_network_management_connectivity_test" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "primary_resource_name"}}" source { @@ -56,3 +57,4 @@ data "google_compute_image" "debian_9" { family = "debian-11" project = "debian-cloud" } +# [END networkmanagement_test_instances] diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl index 6dfe8c28adc8..b47eec932fa1 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl @@ -1,4 +1,5 @@ # Internal HTTP load balancer with a managed instance group backend +# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -187,7 +188,9 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } +# [END cloudloadbalancing_int_http_gce] +# [START lb_route_extension] resource "google_network_services_lb_route_extension" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "lb_route_extension_name"}}" description = "my route extension" @@ -346,3 +349,4 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } +# [END lb_route_extension] diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl index 3b3238e80538..abc31633df6b 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl @@ -177,7 +177,9 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } +# [END cloudloadbalancing_int_http_gce] +# [START lb_traffic_extension] resource "google_network_services_lb_traffic_extension" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "lb_traffic_extension_name"}}" description = "my traffic extension" @@ -332,3 +334,4 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } +# [END lb_traffic_extension] diff --git a/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl b/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl index 7434666761aa..dc4e9fbca51e 100644 --- a/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl @@ -1,3 +1,4 @@ +# [START vpc_subnet_private_access] resource "google_compute_network" "network" { provider = google-beta project = "{{index $.TestEnvVars "project"}}" @@ -14,7 +15,9 @@ resource "google_compute_subnetwork" "vpc_subnetwork" { network = google_compute_network.network.id private_ip_google_access = true } +# [END vpc_subnet_private_access] +# [START compute_internal_ip_private_access] resource "google_compute_global_address" "default" { provider = google-beta project = google_compute_network.network.project @@ -24,7 +27,9 @@ resource "google_compute_global_address" "default" { network = google_compute_network.network.id address = "100.100.100.106" } +# [END compute_internal_ip_private_access] +# [START compute_forwarding_rule_private_access] resource "google_compute_global_forwarding_rule" "default" { provider = google-beta project = google_compute_network.network.project @@ -38,3 +43,4 @@ resource "google_compute_global_forwarding_rule" "default" { service_directory_region = "europe-west3" } } +# [END compute_forwarding_rule_private_access] diff --git a/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl index 1fce3dc083ef..f09904910df4 100644 --- a/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_ca_pool_all_fields] resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -87,3 +88,4 @@ resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { } } } +# [END privateca_create_ca_pool_all_fields] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl index c5f76a407a83..d7a4806d9119 100644 --- a/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_ca_pool] resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -10,3 +11,4 @@ resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { foo = "bar" } } +# [END privateca_create_ca_pool] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl index 82e718a57170..d3a96e88f282 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_ca] resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -46,3 +47,4 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { algorithm = "RSA_PKCS1_4096_SHA256" } } +# [END privateca_create_ca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl index 3179b700af9f..2aae4fc5f1af 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_ca_byo_key] resource "google_project_service_identity" "privateca_sa" { service = "privateca.googleapis.com" } @@ -68,3 +69,4 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { google_kms_crypto_key_iam_member.privateca_sa_keyuser_viewer, ] } +# [END privateca_create_ca_byo_key] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl index 6e898ab25e80..92deecf16d0b 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_ca] resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -49,3 +50,4 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { cloud_kms_key_version = "{{index $.Vars "kms_key_name"}}/cryptoKeyVersions/1" } } +# [END privateca_create_ca] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl index f468ba2d74b3..8a6ec536f0b1 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_subordinateca] resource "google_privateca_certificate_authority" "root-ca" { pool = "{{index $.Vars "pool_name"}}" certificate_authority_id = "{{index $.Vars "certificate_authority_id"}}-root" @@ -92,3 +93,4 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { } type = "SUBORDINATE" } +# [END privateca_create_subordinateca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl index 88e505e72f49..c7eb1742e124 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_certificate_config] resource "google_privateca_ca_pool" "default" { location = "us-central1" @@ -60,7 +61,7 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { locality = "mountain view" province = "california" street_address = "1600 amphitheatre parkway" - } + } subject_alt_name { email_addresses = ["email@example.com"] ip_addresses = ["127.0.0.1"] @@ -98,3 +99,4 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { } } } +# [END privateca_create_certificate_config] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl index e2357245e0b8..74e268f42ba1 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_certificate_csr] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -54,3 +55,4 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { lifetime = "860s" pem_csr = file("test-fixtures/rsa_csr.pem") } +# [END privateca_create_certificate_csr] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl index 81d37cec8816..e760da42aede 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -89,3 +90,4 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } +# [END privateca_create_certificate] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl index d09d2d1f7913..8e683242cda5 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -86,3 +87,4 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } +# [END privateca_create_certificate] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl index 9d18151adbf7..ee36989471a2 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_certificate_template] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -130,3 +131,4 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { pem_csr = file("test-fixtures/rsa_csr.pem") certificate_template = google_privateca_certificate_template.default.id } +# [END privateca_create_certificate_template] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl index 9f4328102c52..fa96c3119def 100644 --- a/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_quickstart] provider google{} provider tls{} @@ -93,3 +94,4 @@ resource "google_privateca_certificate" "default" { name = "{{index $.Vars "my_certificate"}}" pem_csr = tls_cert_request.example.cert_request_pem } +# [END privateca_quickstart] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl index 0661224a55fa..03909a72b673 100644 --- a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START privateca_create_certificate_template] resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -70,3 +71,4 @@ resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { label-one = "value-one" } } +# [END privateca_create_certificate_template] diff --git a/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl index 0623a608d612..dcb2d94988e1 100644 --- a/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl @@ -14,6 +14,7 @@ resource "google_compute_region_autoscaler" "{{$.PrimaryResourceId}}" { } } +# [START compute_instance_template_basic] resource "google_compute_instance_template" "foobar" { name = "{{index $.Vars "instance_template_name"}}" machine_type = "e2-standard-4" @@ -45,6 +46,7 @@ resource "google_compute_instance_template" "foobar" { ] } } +# [END compute_instance_template_basic] resource "google_compute_target_pool" "foobar" { name = "{{index $.Vars "target_pool_name"}}" diff --git a/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl index d5d678b9a571..fbdd40f1e5c9 100644 --- a/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_region_target_tcp_proxy_basic] resource "google_compute_region_target_tcp_proxy" "default" { name = "{{index $.Vars "region_target_tcp_proxy_name"}}" region = "europe-west4" @@ -19,8 +20,9 @@ resource "google_compute_region_health_check" "default" { region = "europe-west4" timeout_sec = 1 check_interval_sec = 1 - + tcp_health_check { port = "80" } } +# [END cloudloadbalancing_region_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl b/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl index f313cfa20b62..0490b422924a 100644 --- a/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { region = "us-central1" @@ -86,3 +87,4 @@ resource "google_compute_region_health_check" "default" { } } +# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl index 0a3ff32857ad..7c1bd95b0e04 100644 --- a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl @@ -1,10 +1,14 @@ +# [START cloudloadbalancing_rllxlb_example] +# [START cloudloadbalancing_vpc_network_rllxlb_example] resource "google_compute_network" "default" { name = "{{index $.Vars "lb_network"}}" auto_create_subnetworks = false routing_mode = "REGIONAL" } +# [END cloudloadbalancing_vpc_network_rllxlb_example] +# [START cloudloadbalancing_vpc_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "default" { name = "{{index $.Vars "backend_subnet"}}" ip_cidr_range = "10.1.2.0/24" @@ -14,7 +18,9 @@ resource "google_compute_subnetwork" "default" { region = "us-west1" stack_type = "IPV4_ONLY" } +# [END cloudloadbalancing_vpc_subnetwork_rllxlb_example] +# [START cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "proxy_only" { name = "{{index $.Vars "proxy_only_subnet"}}" ip_cidr_range = "10.129.0.0/23" @@ -23,7 +29,9 @@ resource "google_compute_subnetwork" "proxy_only" { region = "us-west1" role = "ACTIVE" } +# [END cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] +# [START cloudloadbalancing_health_firewall_rllxlb_example] resource "google_compute_firewall" "default" { name = "{{index $.Vars "fw_allow_health_check"}}" allow { @@ -35,7 +43,9 @@ resource "google_compute_firewall" "default" { source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] target_tags = ["load-balanced-backend"] } +# [END cloudloadbalancing_health_firewall_rllxlb_example] +# [START cloudloadbalancing_proxy_firewall_rllxlb_example] resource "google_compute_firewall" "allow_proxy" { name = "{{index $.Vars "fw_allow_proxies"}}" allow { @@ -56,7 +66,9 @@ resource "google_compute_firewall" "allow_proxy" { source_ranges = ["10.129.0.0/23"] target_tags = ["load-balanced-backend"] } +# [END cloudloadbalancing_proxy_firewall_rllxlb_example] +# [START cloudloadbalancing_instance_template_rllxlb_example] resource "google_compute_instance_template" "default" { name = "{{index $.Vars "l7_xlb_backend_template"}}" disk { @@ -94,7 +106,9 @@ resource "google_compute_instance_template" "default" { } tags = ["load-balanced-backend"] } +# [END cloudloadbalancing_instance_template_rllxlb_example] +# [START cloudloadbalancing_instance_group_rllxlb_example] resource "google_compute_instance_group_manager" "default" { name = "{{index $.Vars "l7_xlb_backend_example"}}" zone = "us-west1-a" @@ -109,15 +123,19 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } +# [END cloudloadbalancing_instance_group_rllxlb_example] +# [START cloudloadbalancing_ip_address_rllxlb_example] resource "google_compute_address" "default" { name = "{{index $.Vars "address_name"}}" address_type = "EXTERNAL" network_tier = "STANDARD" region = "us-west1" } +# [END cloudloadbalancing_ip_address_rllxlb_example] +# [START cloudloadbalancing_health_check_rllxlb_example] resource "google_compute_region_health_check" "default" { name = "{{index $.Vars "l7_xlb_basic_check"}}" check_interval_sec = 5 @@ -131,7 +149,9 @@ resource "google_compute_region_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } +# [END cloudloadbalancing_health_check_rllxlb_example] +# [START cloudloadbalancing_backend_service_rllxlb_example] resource "google_compute_region_backend_service" "default" { name = "{{index $.Vars "l7_xlb_backend_service"}}" region = "us-west1" @@ -146,19 +166,25 @@ resource "google_compute_region_backend_service" "default" { capacity_scaler = 1.0 } } +# [END cloudloadbalancing_backend_service_rllxlb_example] +# [START cloudloadbalancing_url_map_rllxlb_example] resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "regional_l7_xlb_map"}}" region = "us-west1" default_service = google_compute_region_backend_service.default.id } +# [END cloudloadbalancing_url_map_rllxlb_example] +# [START cloudloadbalancing_target_http_proxy_rllxlb_example] resource "google_compute_region_target_http_proxy" "default" { name = "{{index $.Vars "l7_xlb_proxy"}}" region = "us-west1" url_map = google_compute_region_url_map.default.id } +# [END cloudloadbalancing_target_http_proxy_rllxlb_example] +# [START cloudloadbalancing_forwarding_rule_rllxlb_example] resource "google_compute_forwarding_rule" "default" { name = "l7-xlb-forwarding-rule" provider = google-beta @@ -173,4 +199,6 @@ resource "google_compute_forwarding_rule" "default" { ip_address = google_compute_address.default.address network_tier = "STANDARD" } +# [END cloudloadbalancing_forwarding_rule_rllxlb_example] +# [END cloudloadbalancing_rllxlb_example] diff --git a/mmv1/templates/terraform/examples/go/scc_v2_organization_notification_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/scc_v2_organization_notification_config_basic.tf.tmpl new file mode 100644 index 000000000000..1d90cb7a579e --- /dev/null +++ b/mmv1/templates/terraform/examples/go/scc_v2_organization_notification_config_basic.tf.tmpl @@ -0,0 +1,15 @@ +resource "google_pubsub_topic" "scc_v2_organization_notification_config" { + name = "{{index $.Vars "topic_name"}}" +} + +resource "google_scc_v2_organization_notification_config" "{{$.PrimaryResourceId}}" { + config_id = "{{index $.Vars "config_id"}}" + organization = "{{index $.TestEnvVars "org_id"}}" + location = "global" + description = "My custom Cloud Security Command Center Finding Organization Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_v2_organization_notification_config.id + + streaming_config { + filter = "category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"" + } +} diff --git a/mmv1/templates/terraform/examples/go/secure_source_manager_repository_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/secure_source_manager_repository_basic.tf.tmpl new file mode 100644 index 000000000000..fc410d2d0d58 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/secure_source_manager_repository_basic.tf.tmpl @@ -0,0 +1,10 @@ +resource "google_secure_source_manager_instance" "instance" { + location = "us-central1" + instance_id = "{{index $.Vars "instance_id"}}" +} + +resource "google_secure_source_manager_repository" "{{$.PrimaryResourceId}}" { + location = "us-central1" + repository_id = "{{index $.Vars "repository_id"}}" + instance = google_secure_source_manager_instance.instance.name +} diff --git a/mmv1/templates/terraform/examples/go/secure_source_manager_repository_initial_config.tf.tmpl b/mmv1/templates/terraform/examples/go/secure_source_manager_repository_initial_config.tf.tmpl new file mode 100644 index 000000000000..969a25833002 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/secure_source_manager_repository_initial_config.tf.tmpl @@ -0,0 +1,18 @@ +resource "google_secure_source_manager_instance" "instance" { + location = "us-central1" + instance_id = "{{index $.Vars "instance_id"}}" +} + +resource "google_secure_source_manager_repository" "{{$.PrimaryResourceId}}" { + location = "us-central1" + repository_id = "{{index $.Vars "repository_id"}}" + instance = google_secure_source_manager_instance.instance.name + + description = "This is a test repository" + initial_config { + default_branch = "main" + gitignores = ["python"] + license = "mit" + readme = "default" + } +} diff --git a/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl index 9d96f30623f6..18c695305bc0 100644 --- a/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START compute_spot_instance_create] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "spot_instance_name"}}" @@ -9,7 +10,7 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { image = "debian-cloud/debian-11" } } - + scheduling { preemptible = true automatic_restart = false @@ -25,3 +26,4 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } +# [END compute_spot_instance_create] diff --git a/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl index fa73d6564e15..9c1915799fa8 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl @@ -1,7 +1,9 @@ +# [START cloud_sql_database_create] resource "google_sql_database" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_name"}}" instance = google_sql_database_instance.instance.name } +# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl index 94ee726216e9..f0704eab298c 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl @@ -1,8 +1,10 @@ +# [START cloud_sql_database_create] resource "google_sql_database" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_name"}}" instance = google_sql_database_instance.instance.name deletion_policy = "ABANDON" } +# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl index 21a9c6082906..124e40ca8f2a 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" region = "us-central1" @@ -7,7 +8,9 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_80_db_n1_s2] +# [START cloud_sql_mysql_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -24,3 +27,4 @@ resource "google_sql_user" "user" { enable_password_verification = true } } +# [END cloud_sql_mysql_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl index ca0440f929fd..2a41ff5a4017 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" @@ -8,7 +9,9 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_80_db_n1_s2] +# [START cloud_sql_postgres_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -19,3 +22,4 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } +# [END cloud_sql_postgres_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl index da41d5d0ade6..370d70d28d3b 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" region = "us-central1" @@ -8,7 +9,9 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_80_db_n1_s2] +# [START cloud_sql_sqlserver_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -19,3 +22,4 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } +# [END cloud_sql_sqlserver_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl index 254ce6225bb3..fa875cd2bf18 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl @@ -1,21 +1,28 @@ +# [START cloud_sql_instance_service_identity] resource "google_project_service_identity" "gcp_sa_cloud_sql" { provider = google-beta service = "sqladmin.googleapis.com" } +# [END cloud_sql_instance_service_identity] +# [START cloud_sql_instance_keyring] resource "google_kms_key_ring" "keyring" { provider = google-beta name = "{{index $.Vars "keyring_name"}}" location = "us-central1" } +# [END cloud_sql_instance_keyring] +# [START cloud_sql_instance_key] resource "google_kms_crypto_key" "key" { provider = google-beta name = "{{index $.Vars "crypto_key_name"}}" key_ring = google_kms_key_ring.keyring.id purpose = "ENCRYPT_DECRYPT" } +# [END cloud_sql_instance_key] +# [START cloud_sql_instance_crypto_key] resource "google_kms_crypto_key_iam_member" "crypto_key" { provider = google-beta crypto_key_id = google_kms_crypto_key.key.id @@ -23,7 +30,9 @@ resource "google_kms_crypto_key_iam_member" "crypto_key" { member = "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}" } +# [END cloud_sql_instance_crypto_key] +# [START cloud_sql_mysql_instance_cmek] resource "google_sql_database_instance" "mysql_instance_with_cmek" { name = "{{index $.Vars "mysql_instance_cmek"}}" provider = google-beta @@ -35,7 +44,9 @@ resource "google_sql_database_instance" "mysql_instance_with_cmek" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_cmek] +# [START cloud_sql_postgres_instance_cmek] resource "google_sql_database_instance" "postgres_instance_with_cmek" { name = "{{index $.Vars "postgres_instance_cmek"}}" provider = google-beta @@ -47,7 +58,9 @@ resource "google_sql_database_instance" "postgres_instance_with_cmek" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_cmek] +# [START cloud_sql_sqlserver_instance_cmek] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_cmek"}}" provider = google-beta @@ -60,3 +73,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_cmek] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl index 4c56199b3ed4..6ac502c2ddd4 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_ha] resource "google_sql_database_instance" "mysql_instance_ha" { name = "{{index $.Vars "mysql_instance_ha"}}" region = "asia-northeast1" @@ -13,7 +14,9 @@ resource "google_sql_database_instance" "mysql_instance_ha" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_ha] +# [START cloud_sql_postgres_instance_ha] resource "google_sql_database_instance" "postgres_instance_ha" { name = "{{index $.Vars "postgres_instance_ha"}}" region = "us-central1" @@ -29,7 +32,9 @@ resource "google_sql_database_instance" "postgres_instance_ha" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_ha] +# [START cloud_sql_sqlserver_instance_ha] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_ha"}}" region = "us-central1" @@ -45,3 +50,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_ha] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl index b95a271bf5e6..9e80323369b8 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl @@ -6,6 +6,7 @@ resource "google_project_service_identity" "gcp_sa_cloud_sql" { service = "sqladmin.googleapis.com" } +# [START cloud_sql_instance_iam_conditions] data "google_iam_policy" "sql_iam_policy" { binding { role = "roles/cloudsql.client" @@ -24,6 +25,7 @@ resource "google_project_iam_policy" "project" { project = data.google_project.project.id policy_data = data.google_iam_policy.sql_iam_policy.policy_data } +# [END cloud_sql_instance_iam_conditions] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_iam_condition"}}" diff --git a/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl index 9fa98a22dad8..06a6bbc81a24 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_labels] resource "google_sql_database_instance" "mysql_instance_labels" { name = "{{index $.Vars "mysql_instance_labels"}}" region = "us-central1" @@ -11,7 +12,9 @@ resource "google_sql_database_instance" "mysql_instance_labels" { } deletion_protection = "false" } +# [END cloud_sql_mysql_instance_labels] +# [START cloud_sql_postgres_instance_labels] resource "google_sql_database_instance" "postgres_instance_labels" { name = "{{index $.Vars "postgres_instance_labels"}}" region = "us-central1" @@ -25,7 +28,9 @@ resource "google_sql_database_instance" "postgres_instance_labels" { } deletion_protection = "false" } +# [END cloud_sql_postgres_instance_labels] +# [START cloud_sql_sqlserver_instance_labels] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_labels"}}" region = "us-central1" @@ -40,3 +45,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "false" } +# [END cloud_sql_sqlserver_instance_labels] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl index 3739b999506e..a06e64956bf6 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_pitr] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_pitr"}}" region = "asia-northeast1" @@ -13,7 +14,9 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_pitr] +# [START cloud_sql_postgres_instance_pitr] resource "google_sql_database_instance" "postgres_instance_pitr" { name = "{{index $.Vars "postgres_instance__pitr"}}" region = "us-central1" @@ -29,3 +32,4 @@ resource "google_sql_database_instance" "postgres_instance_pitr" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_pitr] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl index 0ac9a24a3c81..e976fd8c142a 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_require_ssl] resource "google_sql_database_instance" "mysql_instance" { name = "{{index $.Vars "mysql_instance"}}" region = "asia-northeast1" @@ -10,12 +11,16 @@ resource "google_sql_database_instance" "mysql_instance" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_require_ssl] +# [START cloud_sql_mysql_instance_ssl_cert] resource "google_sql_ssl_cert" "mysql_client_cert" { common_name = "mysql_common_name" instance = google_sql_database_instance.mysql_instance.name } +# [END cloud_sql_mysql_instance_ssl_cert] +# [START cloud_sql_postgres_instance_require_ssl] resource "google_sql_database_instance" "postgres_instance" { name = "{{index $.Vars "postgres_instance"}}" region = "asia-northeast1" @@ -28,12 +33,16 @@ resource "google_sql_database_instance" "postgres_instance" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_require_ssl] +# [START cloud_sql_postgres_instance_ssl_cert] resource "google_sql_ssl_cert" "postgres_client_cert" { common_name = "postgres_common_name" instance = google_sql_database_instance.postgres_instance.name } +# [END cloud_sql_postgres_instance_ssl_cert] +# [START cloud_sql_sqlserver_instance_require_ssl] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance"}}" region = "asia-northeast1" @@ -47,3 +56,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_require_ssl] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl index 1cd16f0e408b..2e6490cd6fe4 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_with_authorized_network"}}" region = "us-central1" @@ -14,3 +15,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl index fab77d0d506d..67dfdc4fad50 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup"}}" region = "asia-northeast1" @@ -12,3 +13,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl index 96e138c64c2d..916d12bf09ac 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup_location"}}" region = "asia-northeast1" @@ -11,3 +12,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl index ee7a1d41c254..57e630c34e2f 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup_retention"}}" region = "asia-northeast1" @@ -14,3 +15,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl index 134192af0268..5ea986116ac5 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "mysql_instance_source_name"}}" region = "us-central1" @@ -7,7 +8,9 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_source] +# [START cloud_sql_mysql_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_clone_name"}}" region = "us-central1" @@ -17,3 +20,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl index 188242db640d..a2421e78283e 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "MYSQL_8_0" name = "{{index $.Vars "mysql_instance"}}" @@ -20,3 +21,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl index 93d0efe450fa..8165b86cd381 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "MYSQL_5_7" name = "{{index $.Vars "mysql_public_ip_instance_name"}}" @@ -20,3 +21,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl index feef53e47ab8..de0fd811c86b 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_pvp] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_pvp_instance_name"}}" region = "asia-northeast1" @@ -15,3 +16,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_pvp] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl index 9664d733332e..6b36e0c5c05a 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_mysql_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "mysql_primary_instance_name"}}" region = "europe-west4" @@ -11,7 +12,9 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_primary] +# [START cloud_sql_mysql_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -29,3 +32,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_mysql_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl index 1441006a0b88..77bd979fa7d1 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_with_authorized_network"}}" region = "us-central1" @@ -14,3 +15,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl index 94043bb1c485..be494aa10a38 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup"}}" region = "us-central1" @@ -11,3 +12,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl index 93476520866b..523fe2128d10 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup_location"}}" region = "us-central1" @@ -11,3 +12,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl index b797a1f8e699..5851ba5db1b5 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup_retention"}}" region = "us-central1" @@ -14,3 +15,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl index fd519d57b23e..d0a68e4b6c9c 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "postgres_instance_source_name"}}" region = "us-central1" @@ -7,7 +8,9 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_source] +# [START cloud_sql_postgres_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_clone_name"}}" region = "us-central1" @@ -17,3 +20,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl index eed5805b0096..eec472895808 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance"}}" region = "us-central1" @@ -15,3 +16,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl index 4ab67a49c708..8fbb18db8910 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "POSTGRES_14" name = "{{index $.Vars "postgres_public_ip_instance_name"}}" @@ -18,3 +19,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl index 5181bd859d46..d9036765b51c 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_pvp] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_pvp_instance_name"}}" region = "asia-northeast1" @@ -16,3 +17,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_pvp] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl index 180fa60611e5..8f205eb48d40 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_postgres_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "postgres_primary_instance_name"}}" region = "europe-west4" @@ -10,7 +11,9 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_primary] +# [START cloud_sql_postgres_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -28,3 +31,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_postgres_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl index 6cb660a5dfa9..bb36f15ae286 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_with_authorized_network"}}" region = "us-central1" @@ -15,3 +16,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl index ea04446a312c..80b315b42fe3 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup"}}" region = "us-central1" @@ -12,3 +13,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl index ba2d19cc8eed..46cbacab1c67 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup_location"}}" region = "us-central1" @@ -12,3 +13,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl index bcf2751fbac2..0898f0083008 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup_retention"}}" region = "us-central1" @@ -15,3 +16,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl index 9122adc7ec14..121529a7065d 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "sqlserver_instance_source_name"}}" region = "us-central1" @@ -8,7 +9,9 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_source] +# [START cloud_sql_sqlserver_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_clone_name"}}" region = "us-central1" @@ -19,3 +22,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl index 31e878f1a9fc..559ea293afb1 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance"}}" region = "us-central1" @@ -20,3 +21,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl index 86b8090be47a..789aba01520a 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_public_ip_instance_name"}}" region = "europe-west4" @@ -19,3 +20,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl index 93c26ef17944..88d0acc1e8d9 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloud_sql_sqlserver_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "sqlserver_primary_instance_name"}}" region = "europe-west4" @@ -11,7 +12,9 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_primary] +# [START cloud_sql_sqlserver_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -29,3 +32,4 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } +# [END cloud_sql_sqlserver_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl index c1d7a1e1d5fa..24ed99f68177 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl @@ -14,6 +14,7 @@ resource "google_compute_subnetwork" "default" { network = google_compute_network.default.id } +# [START cloud_sql_sqlserver_vm_instance] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { provider = google-beta name = "{{index $.Vars "sqlserver_vm"}}" @@ -38,7 +39,9 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { subnetwork = google_compute_subnetwork.default.id } } +# [END cloud_sql_sqlserver_vm_instance] +# [START cloud_sql_sqlserver_vm_firewall_rule] resource "google_compute_firewall" "sql_server_1433" { provider = google-beta name = "{{index $.Vars "sql_server_1433_3"}}" @@ -52,3 +55,4 @@ resource "google_compute_firewall" "sql_server_1433" { priority = 1000 source_ranges = ["0.0.0.0/0"] } +# [END cloud_sql_sqlserver_vm_firewall_rule] diff --git a/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl index 152b8b675a03..5d248640da3c 100644 --- a/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl @@ -1,9 +1,11 @@ +# [START storage_hmac_key] # Create a new service account resource "google_service_account" "service_account" { account_id = "{{index $.Vars "account_id"}}" } -#Create the HMAC key for the associated service account +#Create the HMAC key for the associated service account resource "google_storage_hmac_key" "{{$.PrimaryResourceId}}" { service_account_email = google_service_account.service_account.email } +# [END storage_hmac_key] diff --git a/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl index edc5c07db1f5..c020cc71de31 100644 --- a/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl @@ -5,6 +5,7 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { uniform_bucket_level_access = true } +# [START storage_make_data_public] # Make bucket public resource "google_storage_bucket_iam_member" "member" { provider = google-beta @@ -12,3 +13,4 @@ resource "google_storage_bucket_iam_member" "member" { role = "roles/storage.objectViewer" member = "allUsers" } +# [END storage_make_data_public] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl index b6557318d04c..0e52b1e699a8 100644 --- a/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl @@ -1,3 +1,4 @@ +# [START storage_create_new_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { @@ -7,7 +8,9 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { uniform_bucket_level_access = true } +# [END storage_create_new_bucket_tf] +# [START storage_upload_object_tf] # Upload files # Discussion about using tf to upload a large number of objects # https://stackoverflow.com/questions/68455132/terraform-copy-multiple-files-to-bucket-at-the-same-time-bucket-creation @@ -21,7 +24,9 @@ resource "google_storage_bucket_object" "default" { content_type = "text/plain" bucket = google_storage_bucket.static.id } +# [END storage_upload_object_tf] +# [START storage_get_object_metadata_tf] # Get object metadata data "google_storage_bucket_object" "default" { name = google_storage_bucket_object.default.name @@ -31,7 +36,9 @@ data "google_storage_bucket_object" "default" { output "object_metadata" { value = data.google_storage_bucket_object.default } +# [END storage_get_object_metadata_tf] +# [START storage_get_bucket_metadata_tf] # Get bucket metadata data "google_storage_bucket" "default" { name = google_storage_bucket.static.id @@ -40,4 +47,5 @@ data "google_storage_bucket" "default" { output "bucket_metadata" { value = data.google_storage_bucket.default } +# [END storage_get_bucket_metadata_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl index 3beadc46890e..b8fd64b062e1 100644 --- a/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl @@ -1,3 +1,4 @@ +# [START storage_create_lifecycle_setting_tf] resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { provider = google-beta name = "{{index $.Vars "example_bucket"}}" @@ -13,3 +14,4 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { } } } +# [END storage_create_lifecycle_setting_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl index b73d62d3826c..e586cbbb59d6 100644 --- a/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl @@ -1,3 +1,4 @@ +# [START storage_create_pubsub_notifications_tf] // Create a Pub/Sub notification. resource "google_storage_notification" "notification" { provider = google-beta @@ -32,3 +33,4 @@ resource "google_pubsub_topic" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "your_topic_name"}}" provider = google-beta } +# [END storage_create_pubsub_notifications_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl index 6f7fa483a2b0..826b69285cca 100644 --- a/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl @@ -1,3 +1,4 @@ +# [START storage_static_website_create_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage and settings for main_page_suffix and not_found_page resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { @@ -9,14 +10,18 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { not_found_page = "{{index $.Vars "main_page_suffix"}}" } } +# [END storage_static_website_create_bucket_tf] +# [START storage_static_website_make_bucket_public_tf] # Make bucket public by granting allUsers READER access resource "google_storage_bucket_access_control" "public_rule" { bucket = google_storage_bucket.static_website.id role = "READER" entity = "allUsers" } +# [END storage_static_website_make_bucket_public_tf] +# [START storage_static_website_upload_files_tf] # Upload a simple index.html page to the bucket resource "google_storage_bucket_object" "indexpage" { name = "{{index $.Vars "main_page_suffix"}}" @@ -32,3 +37,4 @@ resource "google_storage_bucket_object" "errorpage" { content_type = "text/html" bucket = google_storage_bucket.static_website.id } +# [END storage_static_website_upload_files_tf] diff --git a/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl index 900565b2a1d1..99596d6b90d3 100644 --- a/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_grpc_proxy_basic] resource "google_compute_target_grpc_proxy" "default" { name = "{{index $.Vars "proxy_name"}}" url_map = google_compute_url_map.urlmap.id @@ -85,3 +86,4 @@ resource "google_compute_health_check" "default" { grpc_service_name = "testservice" } } +# [END cloudloadbalancing_target_grpc_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl index dcce57816ab9..5e5b374f711c 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_http_proxy_basic] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -38,3 +39,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END cloudloadbalancing_target_http_proxy_basic] diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl index 9927dcd35ffb..875d95a242f9 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" http_keep_alive_timeout_sec = 610 @@ -40,3 +41,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl index 9b25f866cf5d..e07e79da166d 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_http_proxy_https_redirect] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -10,3 +11,4 @@ resource "google_compute_url_map" "default" { strip_query = false } } +# [END cloudloadbalancing_target_http_proxy_https_redirect] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl index 087268c78f28..5cfab5833370 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_https_proxy_basic] resource "google_compute_target_https_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_https_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -47,3 +48,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END cloudloadbalancing_target_https_proxy_basic] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl index 075c573196f7..fe52d17cb089 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] resource "google_compute_target_https_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_https_proxy_name"}}" http_keep_alive_timeout_sec = 610 @@ -49,3 +50,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl index 11c28dfea258..08f749b2793c 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_https_proxy_mtls] data "google_project" "project" { provider = google-beta } @@ -89,3 +90,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END cloudloadbalancing_target_https_proxy_mtls] diff --git a/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl index 7533c8ab2c1a..4167a0c8b1bc 100644 --- a/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_ssl_proxy_basic] resource "google_compute_target_ssl_proxy" "default" { name = "{{index $.Vars "target_ssl_proxy_name"}}" backend_service = google_compute_backend_service.default.id @@ -24,3 +25,4 @@ resource "google_compute_health_check" "default" { port = "443" } } +# [END cloudloadbalancing_target_ssl_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl index ebd96724985b..2c7689ab33c6 100644 --- a/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_target_tcp_proxy_basic] resource "google_compute_target_tcp_proxy" "default" { name = "{{index $.Vars "target_tcp_proxy_name"}}" backend_service = google_compute_backend_service.default.id @@ -20,3 +21,4 @@ resource "google_compute_health_check" "default" { port = "443" } } +# [END cloudloadbalancing_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl index 42c461403bf6..f8b9d8efdb9a 100644 --- a/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_url_map_bucket_and_service] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -72,3 +73,4 @@ resource "google_storage_bucket" "static" { name = "{{index $.Vars "storage_bucket_name"}}" location = "US" } +# [END cloudloadbalancing_url_map_bucket_and_service] diff --git a/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl index 7d58d12765c4..432b692d479d 100644 --- a/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl @@ -1,3 +1,4 @@ +# [START trafficdirector_url_map_header_based_routing] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "header-based routing example" @@ -72,3 +73,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END trafficdirector_url_map_header_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl index 5b9994af38f9..02d79ef69565 100644 --- a/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl @@ -1,3 +1,4 @@ +# [START trafficdirector_url_map_parameter_based_routing] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "parameter-based routing example" @@ -72,3 +73,4 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } +# [END trafficdirector_url_map_parameter_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl index 74fd0795d039..17dc901c7517 100644 --- a/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl @@ -1,3 +1,4 @@ +# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -73,3 +74,4 @@ resource "google_storage_bucket" "static" { name = "{{index $.Vars "storage_bucket_name"}}" location = "US" } +# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl index f401ca61a072..72e92f8fb516 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl @@ -1,3 +1,4 @@ +# [START trafficdirector_url_map_traffic_director_path] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -101,3 +102,4 @@ resource "google_compute_health_check" "default" { port = 80 } } +# [END trafficdirector_url_map_traffic_director_path] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl index b35f1cca09fd..ad2340dc3cc3 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl @@ -1,3 +1,4 @@ +# [START trafficdirector_url_map_traffic_director_path_partial] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -71,3 +72,4 @@ resource "google_compute_health_check" "default" { } } +# [END trafficdirector_url_map_traffic_director_path_partial] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl index 9f75e5b169de..a711552980c2 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl @@ -1,3 +1,4 @@ +# [START trafficdirector_url_map_traffic_director_route] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -81,3 +82,4 @@ resource "google_compute_health_check" "default" { port = 80 } } +# [END trafficdirector_url_map_traffic_director_route] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl index 1d1ea0c74ffd..c50a260f71ab 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl @@ -1,3 +1,4 @@ +# [START trafficdirector_url_map_traffic_director_route_partial] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -52,3 +53,4 @@ resource "google_compute_health_check" "default" { port = 80 } } +# [END trafficdirector_url_map_traffic_director_route_partial] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/workstation_config_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/workstation_config_basic.tf.tmpl index 4c293b9abb1e..2b34032e64fa 100644 --- a/mmv1/templates/terraform/examples/go/workstation_config_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/workstation_config_basic.tf.tmpl @@ -1,3 +1,21 @@ +resource "google_project" "project" { + project_id = "{{index $.Vars "project_id"}}" + name = "{{index $.Vars "project_id"}}" + org_id = "{{index $.TestEnvVars "org_id"}}" +} + +resource "google_tags_tag_key" "tag_key1" { + provider = "google-beta" + parent = "organizations/{{index $.TestEnvVars "org_id"}}" + short_name = "{{index $.Vars "tag_key1"}}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = "google-beta" + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "{{index $.Vars "tag_value1"}}" +} + resource "google_compute_network" "default" { provider = google-beta name = "{{index $.Vars "workstation_cluster_name"}}" @@ -52,6 +70,9 @@ resource "google_workstations_workstation_config" "{{$.PrimaryResourceId}}" { boot_disk_size_gb = 35 disable_public_ip_addresses = true disable_ssh = false + vm_tags = { + "tagKeys/${google_tags_tag_key.tag_key1.short_name}" = "tagValues/${google_tags_tag_value.tag_value1.short_name}" + } } } } diff --git a/mmv1/templates/terraform/nested_query.go.tmpl b/mmv1/templates/terraform/nested_query.go.tmpl index b1525ad40cef..3c268d6eda82 100644 --- a/mmv1/templates/terraform/nested_query.go.tmpl +++ b/mmv1/templates/terraform/nested_query.go.tmpl @@ -102,31 +102,13 @@ func resource{{ $.ResourceName }}PatchCreateEncoder(d *schema.ResourceData, meta // Return list with the resource to create appended {{- if $.NestedQuery.IsListOfIds }} res := map[string]interface{}{ - "{{ $.LastNestedQueryKey }}": append(currItems, obj["{{ $.FirstIdentity.ApiName }}"]), + "{{ $.LastNestedQueryKey }}": append(currItems, obj["{{ $.FirstIdentityProp.ApiName }}"]), } {{- else }} res := map[string]interface{}{ "{{ $.LastNestedQueryKey }}": append(currItems, obj), } {{- end }} - {{/* - Reconstruct the full nested object. For example, if nested_query.keys is: - - nested_item - - more_nested_item - Then the code above will build: - { - "more_nested_item": [...] - } - Add back the other keys so we get: - { - "nested_item": { - "more_nested_item": [...] - } - } - Note that this assumes that we can safely have "more_nested_item" be the only element - in the "nested_item" map, which only works if the patch request takes an update mask - (or if the rest of the map would have been empty anyway). - */}} {{- range $i, $k := $.NestedQuery.Keys }} {{- if ne $i 0 }} wrapped := map[string]interface{}{ @@ -203,9 +185,8 @@ func resource{{ $.ResourceName }}PatchDeleteEncoder(d *schema.ResourceData, meta res := map[string]interface{}{ "{{ $.LastNestedQueryKey }}": updatedItems, } - {{/* see comments in PatchCreateEncoder for details */}} {{- range $i, $k := $.NestedQuery.Keys }} - {{- if ne $i 0 }} + {{- if ne $i (sub (len $.NestedQuery.Keys) 1) }} wrapped := map[string]interface{}{ "{{ $k }}": res, } @@ -227,7 +208,7 @@ func resource{{ $.ResourceName }}PatchDeleteEncoder(d *schema.ResourceData, meta */}} func resource{{ $.ResourceName }}ListForPatch(d *schema.ResourceData, meta interface{}) ([]interface{}, error) { config := meta.(*transport_tpg.Config) - url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}{{"}}$.SelfLinkUri{{"}}"}}") + url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}{{$.ProductMetadata.Name}}BasePath{{"}}"}}{{$.SelfLinkUri}}") if err != nil { return nil, err } @@ -264,12 +245,14 @@ func resource{{ $.ResourceName }}ListForPatch(d *schema.ResourceData, meta inter var v interface{} var ok bool -{{- range $k := $.NestedQuery.Keys }} +{{- range $i, $k := $.NestedQuery.Keys }} + {{- if not (eq $i (sub (len $.NestedQuery.Keys) 1)) }} if v, ok = res["{{ $k }}"]; ok && v != nil { res = v.(map[string]interface{}) } else { return nil, nil } + {{- end }} {{- end }} v, ok = res["{{ $.LastNestedQueryKey }}"] diff --git a/mmv1/templates/terraform/pre_create/go/bigquery_reservation_assignment.go.tmpl b/mmv1/templates/terraform/pre_create/go/bigquery_reservation_assignment.go.tmpl new file mode 100644 index 000000000000..c4a0cdfda0f2 --- /dev/null +++ b/mmv1/templates/terraform/pre_create/go/bigquery_reservation_assignment.go.tmpl @@ -0,0 +1,20 @@ + if _, ok := d.GetOkExists("location"); !ok { + // Extract location from parent reservation. + reservation := d.Get("reservation").(string) + + tableRef := regexp.MustCompile("projects/(.+)/locations/(.+)/reservations/(.+)") + if parts := tableRef.FindStringSubmatch(reservation); parts != nil { + err := d.Set("location", parts[2]) + if err != nil { + return err + } + } + + if strings.Contains(url, "locations//") { + // re-compute url now that location must be set + url = strings.ReplaceAll(url, "/locations//", "/locations/"+d.Get("location").(string)+"/") + if err != nil { + return err + } + } + } diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 6f7c8bca66c3..4f59fc841d69 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -245,7 +245,7 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ if err != nil { return err } -{{- if $.UpdateMask -}} +{{- if $.UpdateMask }} url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "{{ join $.NestedQuery.Keys "." -}}"}) if err != nil { return err @@ -488,7 +488,10 @@ func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interfac if err != nil { return res, err } -{{if $.NestedQuery -}} +{{- if or $.NestedQuery $.CustomCode.Decoder }} +{{""}} +{{- end }} +{{- if $.NestedQuery }} res, err = flattenNested{{ $.ResourceName -}}(d, meta, res) if err != nil { return nil, err @@ -507,7 +510,7 @@ func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interfac if res == nil { return nil, tpgresource.Fake404("decoded", "{{ $.ResourceName }}") } -{{- end -}} +{{- end }} return res, nil {{ end -}} } @@ -817,7 +820,10 @@ if len(updateMask) > 0 { if err != nil { return err } -{{ else if $.GetAsync.IsA "PollAsync" -}} +{{- if not $.FieldSpecificUpdateMethods }} +{{""}} +{{- end}} +{{- else if $.GetAsync.IsA "PollAsync" -}} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName -}}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncExistence -}}, "Updating {{ $.Name -}}", d.Timeout(schema.TimeoutUpdate), {{ $.GetAsync.TargetOccurrences -}}) if err != nil { {{ if $.GetAsync.SuppressError -}} @@ -1058,7 +1064,7 @@ func resource{{ $.ResourceName }}Delete(d *schema.ResourceData, meta interface{} return transport_tpg.HandleNotFoundError(err, d, "{{ $.Name }}") } {{- if $.UpdateMask }} - url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "{{- join $.NestedQuery.Keys "," -}}"}) + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": "{{- join $.NestedQuery.Keys "." -}}"}) if err != nil { return err } diff --git a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl index 98e788c85227..e80c7d5d1b10 100644 --- a/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl +++ b/mmv1/templates/terraform/unordered_list_customize_diff.go.tmpl @@ -1,4 +1,4 @@ -{{- define "UnorderedListCustomizeDiff" }} +{{- define "UnorderedListCustomizeDiff" -}} keys := diff.GetChangedKeysPrefix("{{ underscore $.Name }}") if len(keys) == 0 { return nil diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index e2f06c8688ff..3c4ffd3605f8 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -130,7 +130,7 @@ skip_delete: <%= object.skip_delete %> immutable: <%= object.immutable %> <% end -%> <% unless object.mutex.nil? -%> -mutex: <%= object.mutex %> +mutex: '<%= object.mutex %>' <% end -%> <% #import diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl index e0b18dab24a6..d762af049515 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_health_check_test.go.tmpl @@ -382,3 +382,68 @@ resource "google_compute_health_check" "foobar" { `, hckName) } {{- end }} + +{{ if ne $.TargetVersionName `ga` -}} + +func TestAccComputeHealthCheck_srcRegions_update(t *testing.T) { + t.Parallel() + + hckName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckComputeHealthCheckDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeHealthCheck_srcRegions(hckName), + }, + { + ResourceName: "google_compute_health_check.src_region", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeHealthCheck_srcRegions_update(hckName), + }, + { + ResourceName: "google_compute_health_check.src_region", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + + +func testAccComputeHealthCheck_srcRegions(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "src_region" { + provider = "google-beta" + name = "%s" + description = "Resource created for Terraform acceptance testing" + check_interval_sec = 30 + source_regions = ["us-central1", "us-east1", "asia-south1"] + http_health_check { + port = "80" + } +} +`, hckName) +} + +func testAccComputeHealthCheck_srcRegions_update(hckName string) string { + return fmt.Sprintf(` +resource "google_compute_health_check" "src_region" { + provider = "google-beta" + name = "%s" + description = "Resource updated for Terraform acceptance testing" + check_interval_sec = 30 + source_regions = ["us-west1", "europe-north1", "asia-south1"] + http_health_check { + port = "80" + } +} +`, hckName) +} + +{{ end }} From f7976e71cbb8f4478138d4466373b7b695165fed Mon Sep 17 00:00:00 2001 From: roop2 <161707562+roop2@users.noreply.github.com> Date: Sat, 29 Jun 2024 01:26:22 +0530 Subject: [PATCH 252/356] Adding support for regional flex zones in netapp volume beta provider (#10946) --- mmv1/products/netapp/storagePool.yaml | 32 ++- mmv1/products/netapp/volume.yaml | 12 + .../pre_update/netapp_storagepool.go.erb | 65 +++++ .../resource_netapp_storage_pool_test.go | 125 --------- .../resource_netapp_storage_pool_test.go.erb | 262 ++++++++++++++++++ 5 files changed, 369 insertions(+), 127 deletions(-) create mode 100644 mmv1/templates/terraform/pre_update/netapp_storagepool.go.erb delete mode 100644 mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go create mode 100644 mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb diff --git a/mmv1/products/netapp/storagePool.yaml b/mmv1/products/netapp/storagePool.yaml index 529d2ea5aaae..aafe3d74deb2 100644 --- a/mmv1/products/netapp/storagePool.yaml +++ b/mmv1/products/netapp/storagePool.yaml @@ -25,9 +25,22 @@ description: | The capacity of the pool can be split up and assigned to volumes within the pool. Storage pools are a billable component of NetApp Volumes. Billing is based on the location, service level, and capacity allocated to a pool independent of consumption at the volume level. + + Storage pools of service level Flex are available as zonal (single zone) or regional (two zones in same region) pools. + Zonal and regional pools are high-available within the zone. On top of that, regional pools have `replica_zone` as + hot standby zone. All volume access is served from the `zone`. If `zone` fails, `replica_zone` + automatically becomes the active zone. This will cause state drift in your configuration. + If a zone switch (manual or automatic) is triggered outside of Terraform, you need to adjust the `zone` + and `replica_zone` values to reflect the current state, or Terraform will initiate a zone switch when running + the next apply. You can trigger a manual + [zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones) + via Terraform by swapping the value of the `zone` and `replica_zone` parameters in your HCL code. + Note : Regional FLEX storage pool are supported in beta provider currently. + references: !ruby/object:Api::Resource::ReferenceLinks guides: 'Quickstart documentation': 'https://cloud.google.com/netapp/volumes/docs/get-started/quickstarts/create-storage-pool' + 'Regional Flex zone switch': 'https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones' api: 'https://cloud.google.com/netapp/volumes/docs/reference/rest/v1/projects.locations.storagePools' base_url: projects/{{project}}/locations/{{location}}/storagePools self_link: projects/{{project}}/locations/{{location}}/storagePools/{{name}} @@ -49,11 +62,11 @@ parameters: immutable: true url_param_only: true description: | - Name of the location. Usually a region name, expect for some FLEX service level pools which require a zone name. + Name of the location. For zonal Flex pools specify a zone name, in all other cases a region name. - !ruby/object:Api::Type::String name: 'name' description: - The resource name of the storage pool. Needs to be unique per location. + The resource name of the storage pool. Needs to be unique per location/region. required: true immutable: true url_param_only: true @@ -133,3 +146,18 @@ properties: - :SERVICE_MANAGED - :CLOUD_KMS output: true + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Specifies the active zone for regional Flex pools. `zone` and `replica_zone` values can be swapped to initiate a + [zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones). + If you want to create a zonal Flex pool, specify a zone name for `location` and omit `zone`. + min_version: beta + - !ruby/object:Api::Type::String + name: 'replicaZone' + description: | + Specifies the replica zone for regional Flex pools. `zone` and `replica_zone` values can be swapped to initiate a + [zone switch](https://cloud.google.com/netapp/volumes/docs/configure-and-use/storage-pools/edit-or-delete-storage-pool#switch_active_and_replica_zones). + min_version: beta +custom_code: !ruby/object:Provider::Terraform::CustomCode + pre_update: templates/terraform/pre_update/netapp_storagepool.go.erb diff --git a/mmv1/products/netapp/volume.yaml b/mmv1/products/netapp/volume.yaml index 1073e7525c17..3ac6a167d60b 100644 --- a/mmv1/products/netapp/volume.yaml +++ b/mmv1/products/netapp/volume.yaml @@ -475,6 +475,18 @@ properties: name: 'scheduledBackupEnabled' description: |- When set to true, scheduled backup is enabled on the volume. Omit if no backup_policy is specified. + - !ruby/object:Api::Type::String + name: 'zone' + description: | + Specifies the active zone for regional volume. + output: true + min_version: beta + - !ruby/object:Api::Type::String + name: 'replicaZone' + description: | + Specifies the replica zone for regional volume. + output: true + min_version: beta virtual_fields: - !ruby/object:Api::Type::Enum name: 'deletion_policy' diff --git a/mmv1/templates/terraform/pre_update/netapp_storagepool.go.erb b/mmv1/templates/terraform/pre_update/netapp_storagepool.go.erb new file mode 100644 index 000000000000..86e47f1349e2 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/netapp_storagepool.go.erb @@ -0,0 +1,65 @@ +// detect manual zone switches for service level FLEX + +if d.Get("service_level").(string) == "FLEX" { + // Check if this is zonal or regional Flex. Only continue for regional pool + _, hasZone := d.GetOk("zone") + _, hasReplicaZone := d.GetOk("replica_zone") + if hasZone && hasReplicaZone { + // For a zone switch, user needs to swap zone and replica_zone. Other changes are not allowed + if d.HasChange("zone") && d.HasChange("replica_zone") { + oldZone, newZone := d.GetChange("zone") + oldReplicaZone, newReplicaZone := d.GetChange("replica_zone") + if newZone == oldReplicaZone && newReplicaZone == oldZone { + rawurl, err := tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/storagePools/{{name}}:switch") + if err != nil { + return err + } + + reso, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: rawurl, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error switching active zone for pool: %s, %v", d.Id(), err) + } + + err = NetappOperationWaitTime( + config, reso, project, "Switching active pool zone", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + //remove zone and replicaZone from updateMask + n := 0 + for _, v := range updateMask { + if v != "zone" && v != "replicaZone" { + updateMask[n] = v + n++ + } + } + updateMask = updateMask[:n] + + // delete from payload too + delete(obj, "zone") + delete(obj, "replicaZone") + + // PATCH URL was already build prior to this code. We need to rebuild it to catch our changes + url, err = tpgresource.ReplaceVars(d, config, "{{NetappBasePath}}projects/{{project}}/locations/{{location}}/storagePools/{{name}}") + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + } else { + return fmt.Errorf("Incorrect zone change for pool: %s. Supported zone, replica_zone are : %s, %s", d.Id(), oldZone, oldReplicaZone) + } + } + } +} diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go deleted file mode 100644 index c47b113fd70d..000000000000 --- a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package netapp_test - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-provider-google/google/acctest" -) - -func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { - t.Parallel() - - context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), - } - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - Steps: []resource.TestStep{ - { - Config: testAccNetappstoragePool_storagePoolCreateExample_full(context), - }, - { - ResourceName: "google_netapp_storage_pool.test_pool", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, - }, - { - Config: testAccNetappstoragePool_storagePoolCreateExample_update(context), - }, - { - ResourceName: "google_netapp_storage_pool.test_pool", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, - }, - }, - }) -} - -func testAccNetappstoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { - return acctest.Nprintf(` - -resource "google_compute_network" "peering_network" { - name = "tf-test-network%{random_suffix}" -} - -# Create an IP address -resource "google_compute_global_address" "private_ip_alloc" { - name = "tf-test-address%{random_suffix}" - purpose = "VPC_PEERING" - address_type = "INTERNAL" - prefix_length = 16 - network = google_compute_network.peering_network.id -} - -# Create a private connection -resource "google_service_networking_connection" "default" { - network = google_compute_network.peering_network.id - service = "netapp.servicenetworking.goog" - reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] -} - -resource "google_netapp_storage_pool" "test_pool" { - name = "tf-test-pool%{random_suffix}" - location = "us-central1" - service_level = "PREMIUM" - capacity_gib = "2048" - network = google_compute_network.peering_network.id - active_directory = "" - description = "this is a test description" - kms_config = "" - labels = { - key= "test" - value= "pool" - } - ldap_enabled = false - -} -`, context) -} - -func testAccNetappstoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { - return acctest.Nprintf(` - -resource "google_compute_network" "peering_network" { - name = "tf-test-network%{random_suffix}" -} - -# Create an IP address -resource "google_compute_global_address" "private_ip_alloc" { - name = "tf-test-address%{random_suffix}" - purpose = "VPC_PEERING" - address_type = "INTERNAL" - prefix_length = 16 - network = google_compute_network.peering_network.id -} - -# Create a private connection -resource "google_service_networking_connection" "default" { - network = google_compute_network.peering_network.id - service = "netapp.servicenetworking.goog" - reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] -} - -resource "google_netapp_storage_pool" "test_pool" { - name = "tf-test-pool%{random_suffix}" - location = "us-central1" - service_level = "PREMIUM" - capacity_gib = "4096" - network = google_compute_network.peering_network.id - active_directory = "" - description = "this is test" - kms_config = "" - labels = { - key= "test" - value= "pool" - } - ldap_enabled = false - -} -`, context) -} diff --git a/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb new file mode 100644 index 000000000000..4a6a5b18218e --- /dev/null +++ b/mmv1/third_party/terraform/services/netapp/resource_netapp_storage_pool_test.go.erb @@ -0,0 +1,262 @@ +<% autogen_exception -%> +package netapp_test + +import ( + "testing" +<% unless version == 'ga' -%> + "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +<% end -%> + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNetappstoragePool_storagePoolCreateExample_full(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappstoragePool_storagePoolCreateExample_update(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetappstoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_network" "peering_network" { + name = "tf-test-network%{random_suffix}" +} + +# Create an IP address +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-address%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.peering_network.id +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.peering_network.id + service = "netapp.servicenetworking.goog" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} + +resource "google_netapp_storage_pool" "test_pool" { + name = "tf-test-pool%{random_suffix}" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "2048" + network = google_compute_network.peering_network.id + active_directory = "" + description = "this is a test description" + kms_config = "" + labels = { + key= "test" + value= "pool" + } + ldap_enabled = false + +} +`, context) +} + +func testAccNetappstoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_network" "peering_network" { + name = "tf-test-network%{random_suffix}" +} + +# Create an IP address +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-address%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.peering_network.id +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.peering_network.id + service = "netapp.servicenetworking.goog" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} + +resource "google_netapp_storage_pool" "test_pool" { + name = "tf-test-pool%{random_suffix}" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "4096" + network = google_compute_network.peering_network.id + active_directory = "" + description = "this is test" + kms_config = "" + labels = { + key= "test" + value= "pool" + } + ldap_enabled = false + +} +`, context) +} + +<% unless version == 'ga' -%> +func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *testing.T) { + context := map[string]interface{}{ + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckNetappstoragePoolDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context), + Check: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins(), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "test_pool" { + provider = google-beta + name = "tf-test-pool%{random_suffix}" + location = "us-east1" + service_level = "FLEX" + capacity_gib = "2048" + network = data.google_compute_network.default.id + zone = "us-east1-c" + replica_zone = "us-east1-b" +} + +resource "time_sleep" "wait_5_minutes" { + depends_on = [google_netapp_storage_pool.test_pool] + destroy_duration = "5m" +} + +data "google_compute_network" "default" { + provider = google-beta + name = "%{network_name}" +} +`, context) +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "test_pool" { + provider = google-beta + name = "tf-test-pool%{random_suffix}" + location = "us-east1" + service_level = "FLEX" + capacity_gib = "2048" + network = data.google_compute_network.default.id + zone = "us-east1-b" + replica_zone = "us-east1-c" +} + +resource "time_sleep" "wait_5_minutes" { + depends_on = [google_netapp_storage_pool.test_pool] + destroy_duration = "5m" +} + +data "google_compute_network" "default" { + provider = google-beta + name = "%{network_name}" +} +`, context) +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins() resource.TestCheckFunc { + return func(s *terraform.State) error { + // wait 5 minutes before executing the switchback due to api zone switch issues + time.Sleep(5 * time.Minute) + return nil + } +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "test_pool" { + provider = google-beta + name = "tf-test-pool%{random_suffix}" + location = "us-east1" + service_level = "FLEX" + capacity_gib = "2048" + network = data.google_compute_network.default.id + zone = "us-east1-c" + replica_zone = "us-east1-b" +} + +resource "time_sleep" "wait_5_minutes" { + depends_on = [google_netapp_storage_pool.test_pool] + destroy_duration = "5m" +} + +data "google_compute_network" "default" { + provider = google-beta + name = "%{network_name}" +} +`, context) +} + +<% end -%> From bc0935d7905ab9d07908c6768d3cfcdc7b57fbfa Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Fri, 28 Jun 2024 16:36:32 -0400 Subject: [PATCH 253/356] Use sweepable name for vpc access connector (#11084) --- .../services/vpcaccess/data_source_vpc_access_connector_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/vpcaccess/data_source_vpc_access_connector_test.go b/mmv1/third_party/terraform/services/vpcaccess/data_source_vpc_access_connector_test.go index c5defbe76372..b64da1c8fd1f 100644 --- a/mmv1/third_party/terraform/services/vpcaccess/data_source_vpc_access_connector_test.go +++ b/mmv1/third_party/terraform/services/vpcaccess/data_source_vpc_access_connector_test.go @@ -36,7 +36,7 @@ func TestAccVPCAccessConnectorDatasource_basic(t *testing.T) { func testAccVPCAccessConnectorDatasourceConfig(suffix string) string { return fmt.Sprintf(` resource "google_vpc_access_connector" "connector" { - name = "vpc-con-test-%s" + name = "tf-test-%s" ip_cidr_range = "10.8.0.32/28" network = "default" region = "us-central1" From b03e72f5f5f54fcfed067ed3e4fc8afa51a8f9e1 Mon Sep 17 00:00:00 2001 From: Mauricio Alvarez Leon <65101411+BBBmau@users.noreply.github.com> Date: Fri, 28 Jun 2024 14:53:31 -0700 Subject: [PATCH 254/356] `KMS`: add plural data source for `resource_google_kms_key_ring` (#11061) Co-authored-by: Sarah French <15078782+SarahFrench@users.noreply.github.com> --- .../provider/provider_mmv1_resources.go.erb | 1 + .../kms/data_source_google_kms_key_rings.go | 173 ++++++++++++++++++ .../data_source_google_kms_key_rings_test.go | 70 +++++++ 3 files changed, 244 insertions(+) create mode 100644 mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings.go create mode 100644 mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings_test.go diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index ecf0d1135f0c..73fa0d83fe75 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -136,6 +136,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_kms_crypto_keys": kms.DataSourceGoogleKmsCryptoKeys(), "google_kms_crypto_key_version": kms.DataSourceGoogleKmsCryptoKeyVersion(), "google_kms_key_ring": kms.DataSourceGoogleKmsKeyRing(), + "google_kms_key_rings": kms.DataSourceGoogleKmsKeyRings(), "google_kms_secret": kms.DataSourceGoogleKmsSecret(), "google_kms_secret_ciphertext": kms.DataSourceGoogleKmsSecretCiphertext(), <% unless version == 'ga' -%> diff --git a/mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings.go b/mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings.go new file mode 100644 index 000000000000..fdc9be311b34 --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings.go @@ -0,0 +1,173 @@ +package kms + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleKmsKeyRings() *schema.Resource { + return &schema.Resource{ + Read: dataSourceGoogleKmsKeyRingsRead, + Schema: map[string]*schema.Schema{ + "project": { + Type: schema.TypeString, + Optional: true, + Description: `Project ID of the project.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + Description: `The canonical id for the location. For example: "us-east1".`, + }, + "filter": { + Type: schema.TypeString, + Optional: true, + Description: ` + The filter argument is used to add a filter query parameter that limits which keys are retrieved by the data source: ?filter={{filter}}. + Example values: + + * "name:my-key-" will retrieve key rings that contain "my-key-" anywhere in their name. Note: names take the form projects/{{project}}/locations/{{location}}/keyRings/{{keyRing}}. + * "name=projects/my-project/locations/global/keyRings/my-key-ring" will only retrieve a key ring with that exact name. + + [See the documentation about using filters](https://cloud.google.com/kms/docs/sorting-and-filtering) + `, + }, + "key_rings": { + Type: schema.TypeList, + Computed: true, + Description: "A list of all the retrieved key rings", + Elem: &schema.Resource{ + // schema isn't used from resource_kms_key_ring due to having project and location fields which are empty when grabbed in a list. + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + } +} + +func dataSourceGoogleKmsKeyRingsRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/keyRings") + if err != nil { + return err + } + if filter, ok := d.GetOk("filter"); ok { + id += "/filter=" + filter.(string) + } + d.SetId(id) + + log.Printf("[DEBUG] Searching for keyrings") + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := tpgresource.GetProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for keyRings: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + var keyRings []interface{} + + params := make(map[string]string) + if filter, ok := d.GetOk("filter"); ok { + log.Printf("[DEBUG] Search for key rings using filter ?filter=%s", filter.(string)) + params["filter"] = filter.(string) + if err != nil { + return err + } + } + + url, err := tpgresource.ReplaceVars(d, config, "{{KMSBasePath}}projects/{{project}}/locations/{{location}}/keyRings") + if err != nil { + return err + } + + for { + url, err = transport_tpg.AddQueryParams(url, params) + if err != nil { + return err + } + + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.Is429RetryableQuotaError}, + }) + if err != nil { + return fmt.Errorf("Error retrieving buckets: %s", err) + } + + if res["keyRings"] == nil { + break + } + pageKeyRings, err := flattenKMSKeyRingsList(config, res["keyRings"]) + if err != nil { + return fmt.Errorf("error flattening key rings list: %s", err) + } + keyRings = append(keyRings, pageKeyRings...) + + pToken, ok := res["nextPageToken"] + if ok && pToken != nil && pToken.(string) != "" { + params["pageToken"] = pToken.(string) + } else { + break + } + } + + log.Printf("[DEBUG] Found %d key rings", len(keyRings)) + if err := d.Set("key_rings", keyRings); err != nil { + return fmt.Errorf("error setting key rings: %s", err) + } + + return nil +} + +// flattenKMSKeyRingsList flattens a list of key rings +func flattenKMSKeyRingsList(config *transport_tpg.Config, keyRingsList interface{}) ([]interface{}, error) { + var keyRings []interface{} + for _, k := range keyRingsList.([]interface{}) { + keyRing := k.(map[string]interface{}) + + parsedId, err := parseKmsKeyRingId(keyRing["name"].(string), config) + if err != nil { + return nil, err + } + + data := map[string]interface{}{} + // The google_kms_key_rings resource and dataset set + // id as the value of name (projects/{{project}}/locations/{{location}}/keyRings/{{name}}) + // and set name is set as just {{name}}. + data["id"] = keyRing["name"] + data["name"] = parsedId.Name + + keyRings = append(keyRings, data) + } + + return keyRings, nil +} diff --git a/mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings_test.go b/mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings_test.go new file mode 100644 index 000000000000..779cd0cc076a --- /dev/null +++ b/mmv1/third_party/terraform/services/kms/data_source_google_kms_key_rings_test.go @@ -0,0 +1,70 @@ +package kms_test + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccDataSourceGoogleKmsKeyRings_basic(t *testing.T) { + kms := acctest.BootstrapKMSKey(t) + idPath := strings.Split(kms.KeyRing.Name, "/") + location := idPath[3] + randomString := acctest.RandString(t, 10) + filterNameFindSharedKeys := "name:tftest-shared-" + filterNameFindsNoKeys := fmt.Sprintf("name:%s", randomString) + + keyRingsID := fmt.Sprintf("projects/%s/locations/%s/keyRings", idPath[1], location) + findSharedKeysId := fmt.Sprintf("%s/filter=%s", keyRingsID, filterNameFindSharedKeys) + findsNoKeysId := fmt.Sprintf("%s/filter=%s", keyRingsID, filterNameFindsNoKeys) + + context := map[string]interface{}{ + "filter": "", // Can be overridden using 2nd argument to config funcs + "location": location, + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleKmsKeyRings_basic(context, ""), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.google_kms_key_rings.all_key_rings", "id", keyRingsID), + resource.TestMatchResourceAttr("data.google_kms_key_rings.all_key_rings", "key_rings.#", regexp.MustCompile("[1-9]+[0-9]*")), + ), + }, + { + Config: testAccDataSourceGoogleKmsKeyRings_basic(context, fmt.Sprintf("filter = \"%s\"", filterNameFindSharedKeys)), + Check: resource.ComposeTestCheckFunc( + // This filter should retrieve the bootstrapped KMS key rings used by the test + resource.TestCheckResourceAttr("data.google_kms_key_rings.all_key_rings", "id", findSharedKeysId), + resource.TestMatchResourceAttr("data.google_kms_key_rings.all_key_rings", "key_rings.#", regexp.MustCompile("[1-9]+[0-9]*")), + ), + }, + { + Config: testAccDataSourceGoogleKmsKeyRings_basic(context, fmt.Sprintf("filter = \"%s\"", filterNameFindsNoKeys)), + Check: resource.ComposeTestCheckFunc( + // This filter should retrieve no keys + resource.TestCheckResourceAttr("data.google_kms_key_rings.all_key_rings", "id", findsNoKeysId), + resource.TestCheckResourceAttr("data.google_kms_key_rings.all_key_rings", "key_rings.#", "0"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleKmsKeyRings_basic(context map[string]interface{}, filter string) string { + context["filter"] = filter + + return acctest.Nprintf(` +data "google_kms_key_rings" "all_key_rings" { + location = "%{location}" + %{filter} +} +`, context) +} From ab71b9d86d2ee259aec7d584984bb1f828541e27 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Mon, 1 Jul 2024 16:20:44 +0100 Subject: [PATCH 255/356] Fix acceptance tests for `google_workstations_workstation_config` following `vm_tags` field being added (#11078) --- .../workstations/WorkstationConfig.yaml | 9 +++- .../examples/workstation_config_basic.tf.erb | 20 +++---- ...rkstations_workstation_config_test.go.tmpl | 52 +++++++++++++------ ...orkstations_workstation_config_test.go.erb | 51 ++++++++++++------ 4 files changed, 84 insertions(+), 48 deletions(-) diff --git a/mmv1/products/workstations/WorkstationConfig.yaml b/mmv1/products/workstations/WorkstationConfig.yaml index 83086c194d3b..da235ea6df81 100644 --- a/mmv1/products/workstations/WorkstationConfig.yaml +++ b/mmv1/products/workstations/WorkstationConfig.yaml @@ -73,8 +73,13 @@ examples: vars: workstation_cluster_name: 'workstation-cluster' workstation_config_name: 'workstation-config' - tag_key1: 'tag_key1' - tag_value1: 'tag_value1' + key_short_name: 'keyname' + value_short_name: 'valuename' + org_id: '123456789' + test_vars_overrides: + key_short_name: '"tf-test-key-" + acctest.RandString(t, 10)' + value_short_name: '"tf-test-value-" + acctest.RandString(t, 10)' + org_id: 'envvar.GetTestOrgFromEnv(t)' - !ruby/object:Provider::Terraform::Examples name: 'workstation_config_container' min_version: beta diff --git a/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb b/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb index 692461e52dab..a70cc2c0a054 100644 --- a/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb +++ b/mmv1/templates/terraform/examples/workstation_config_basic.tf.erb @@ -1,19 +1,13 @@ -resource "google_project" "project" { - project_id = "<%= ctx[:vars]['project_id'] %>" - name = "<%= ctx[:vars]['project_id'] %>" - org_id = "<%= ctx[:test_env_vars]['org_id'] %>" -} - resource "google_tags_tag_key" "tag_key1" { - provider = "google-beta" - parent = "organizations/<%= ctx[:test_env_vars]['org_id'] %>" - short_name = "<%= ctx[:vars]['tag_key1'] %>" + provider = google-beta + parent = "organizations/<%= ctx[:vars]['org_id'] %>" + short_name = "<%= ctx[:vars]['key_short_name'] %>" } resource "google_tags_tag_value" "tag_value1" { - provider = "google-beta" - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "<%= ctx[:vars]['tag_value1'] %>" + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "<%= ctx[:vars]['value_short_name'] %>" } resource "google_compute_network" "default" { @@ -71,7 +65,7 @@ resource "google_workstations_workstation_config" "<%= ctx[:primary_resource_id] disable_public_ip_addresses = true disable_ssh = false vm_tags = { - "tagKeys/${google_tags_tag_key.tag_key1.short_name}" = "tagValues/${google_tags_tag_value.tag_value1.short_name}" + "tagKeys/${google_tags_tag_key.tag_key1.name}" = "tagValues/${google_tags_tag_value.tag_value1.name}" } } } diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl index ddced48c78b2..2cf6f91f2d2f 100644 --- a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl @@ -2,8 +2,10 @@ package workstations_test {{- if ne $.TargetVersionName "ga" }} import ( + "fmt" "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -707,8 +709,13 @@ func testAccWorkstationsWorkstationConfig_readinessChecks(context map[string]int func TestAccWorkstationsWorkstationConfig_update(t *testing.T) { t.Parallel() + randString := acctest.RandString(t, 10) context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), + "random_suffix": randString, + "project_id": fmt.Sprintf("tf-test-proj-%s", randString), + "key_short_name": fmt.Sprintf("tf-test-key-%s", randString), + "value_short_name": fmt.Sprintf("tf-test-value-%s", randString), + "org_id": envvar.GetTestOrgFromEnv(t), } acctest.VcrTest(t, resource.TestCase{ @@ -749,6 +756,17 @@ func TestAccWorkstationsWorkstationConfig_update(t *testing.T) { func testAccWorkstationsWorkstationConfig_update(context map[string]interface{}) string { return acctest.Nprintf(` +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "organizations/%{org_id}" + short_name = "%{key_short_name}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "%{value_short_name}" +} resource "google_compute_network" "default" { provider = google-beta name = "tf-test-workstation-cluster%{random_suffix}" @@ -1319,22 +1337,22 @@ func TestAccWorkstationsWorkstationConfig_vmTags(t *testing.T) { } func testAccWorkstationsWorkstationConfig_vmTags(context map[string]interface{}) string { - return acctest.Nprintf(` - data "google_project" "project" { - provider = "google-beta" - } - - resource "google_tags_tag_key" "tag_key1" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key1%{random_suffix}" - } - - resource "google_tags_tag_value" "tag_value1" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "tf_test_tag_value1%{random_suffix}" - } +return acctest.Nprintf(` +data "google_project" "project" { + provider = google-beta +} + +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" +} resource "google_compute_network" "default" { provider = google-beta diff --git a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb index 7fed741b8039..08169d2fe496 100644 --- a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb +++ b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb @@ -3,8 +3,10 @@ package workstations_test <% unless version == "ga" -%> import ( + "fmt" "testing" "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) @@ -708,8 +710,13 @@ func testAccWorkstationsWorkstationConfig_readinessChecks(context map[string]int func TestAccWorkstationsWorkstationConfig_update(t *testing.T) { t.Parallel() + randString := acctest.RandString(t, 10) context := map[string]interface{}{ - "random_suffix": acctest.RandString(t, 10), + "random_suffix": randString, + "project_id": fmt.Sprintf("tf-test-proj-%s", randString), + "key_short_name": fmt.Sprintf("tf-test-key-%s", randString), + "value_short_name": fmt.Sprintf("tf-test-value-%s", randString), + "org_id": envvar.GetTestOrgFromEnv(t), } acctest.VcrTest(t, resource.TestCase{ @@ -750,6 +757,18 @@ func TestAccWorkstationsWorkstationConfig_update(t *testing.T) { func testAccWorkstationsWorkstationConfig_update(context map[string]interface{}) string { return acctest.Nprintf(` +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "organizations/%{org_id}" + short_name = "%{key_short_name}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "%{value_short_name}" +} + resource "google_compute_network" "default" { provider = google-beta name = "tf-test-workstation-cluster%{random_suffix}" @@ -1321,21 +1340,21 @@ func TestAccWorkstationsWorkstationConfig_vmTags(t *testing.T) { func testAccWorkstationsWorkstationConfig_vmTags(context map[string]interface{}) string { return acctest.Nprintf(` - data "google_project" "project" { - provider = "google-beta" - } - - resource "google_tags_tag_key" "tag_key1" { - provider = google-beta - parent = "projects/${data.google_project.project.number}" - short_name = "tf_test_tag_key1%{random_suffix}" - } - - resource "google_tags_tag_value" "tag_value1" { - provider = google-beta - parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" - short_name = "tf_test_tag_value1%{random_suffix}" - } +data "google_project" "project" { + provider = google-beta +} + +resource "google_tags_tag_key" "tag_key1" { + provider = google-beta + parent = "projects/${data.google_project.project.number}" + short_name = "tf_test_tag_key1%{random_suffix}" +} + +resource "google_tags_tag_value" "tag_value1" { + provider = google-beta + parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" + short_name = "tf_test_tag_value1%{random_suffix}" +} resource "google_compute_network" "default" { provider = google-beta From 4910932404bce12332623c5460be355fa082955b Mon Sep 17 00:00:00 2001 From: Ryan Oaks Date: Mon, 1 Jul 2024 11:36:33 -0400 Subject: [PATCH 256/356] Fix DataprocCluster sweeper to use clusters field from API response (#11085) --- .../services/dataproc/resource_dataproc_cluster_sweeper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_sweeper.go b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_sweeper.go index ad4927e28a19..e66dfb7fafba 100644 --- a/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_sweeper.go +++ b/mmv1/third_party/terraform/services/dataproc/resource_dataproc_cluster_sweeper.go @@ -66,7 +66,7 @@ func testSweepDataprocCluster(region string) error { return nil } - resourceList, ok := res["policies"] + resourceList, ok := res["clusters"] if !ok { log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") return nil From 6f92b8948626096583db037a1aa81c738f83b450 Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 1 Jul 2024 12:24:24 -0500 Subject: [PATCH 257/356] go rewrite - acm and apigee (#11087) --- .../accesscontextmanager/go_AccessLevel.yaml | 311 +++++++ .../go_AccessLevelCondition.yaml | 243 ++++++ .../accesscontextmanager/go_AccessLevels.yaml | 315 +++++++ .../accesscontextmanager/go_AccessPolicy.yaml | 117 +++ .../go_AuthorizedOrgsDesc.yaml | 145 ++++ .../accesscontextmanager/go_EgressPolicy.yaml | 78 ++ .../go_GcpUserAccessBinding.yaml | 90 ++ .../go_IngressPolicy.yaml | 78 ++ .../go_ServicePerimeter.yaml | 768 ++++++++++++++++++ .../go_ServicePerimeterDryRunResource.yaml | 105 +++ .../go_ServicePerimeterEgressPolicy.yaml | 184 +++++ .../go_ServicePerimeterIngressPolicy.yaml | 192 +++++ .../go_ServicePerimeterResource.yaml | 102 +++ .../go_ServicePerimeters.yaml | 765 +++++++++++++++++ .../accesscontextmanager/go_product.yaml | 34 + mmv1/products/apigee/go_AddonsConfig.yaml | 131 +++ .../apigee/go_EndpointAttachment.yaml | 105 +++ mmv1/products/apigee/go_EnvKeystore.yaml | 68 ++ mmv1/products/apigee/go_EnvReferences.yaml | 79 ++ mmv1/products/apigee/go_Envgroup.yaml | 86 ++ .../apigee/go_EnvgroupAttachment.yaml | 85 ++ mmv1/products/apigee/go_Environment.yaml | 187 +++++ mmv1/products/apigee/go_Instance.yaml | 188 +++++ .../apigee/go_InstanceAttachment.yaml | 86 ++ .../go_KeystoresAliasesSelfSignedCert.yaml | 218 +++++ mmv1/products/apigee/go_NatAddress.yaml | 89 ++ mmv1/products/apigee/go_Organization.yaml | 245 ++++++ .../products/apigee/go_SyncAuthorization.yaml | 76 ++ mmv1/products/apigee/go_TargetServer.yaml | 157 ++++ mmv1/products/apigee/go_product.yaml | 22 + mmv1/products/compute/go_Autoscaler.yaml | 1 + .../compute/go_BackendBucketSignedUrlKey.yaml | 2 +- .../go_BackendServiceSignedUrlKey.yaml | 2 +- .../compute/go_GlobalNetworkEndpoint.yaml | 2 +- .../compute/go_InstanceGroupMembership.yaml | 2 +- .../compute/go_InstanceGroupNamedPort.yaml | 2 +- mmv1/products/compute/go_NetworkEndpoint.yaml | 2 +- .../products/compute/go_NetworkEndpoints.yaml | 2 +- .../go_NetworkPeeringRoutesConfig.yaml | 2 +- .../compute/go_PerInstanceConfig.yaml | 2 +- .../products/compute/go_RegionAutoscaler.yaml | 1 + .../compute/go_RegionNetworkEndpoint.yaml | 2 +- .../compute/go_RegionPerInstanceConfig.yaml | 2 +- mmv1/products/compute/go_Route.yaml | 2 +- mmv1/products/compute/go_Router.yaml | 2 +- mmv1/products/compute/go_RouterNat.yaml | 2 +- .../custom_flatten/go/default_if_empty.tmpl | 2 +- .../apigee_endpoint_attachment.go.erb | 29 +- .../go/apigee_endpoint_attachment.go.tmpl | 29 +- .../base_configs/iam_test_file.go.tmpl | 10 +- .../pre_update/go/netapp_storagepool.go.tmpl | 65 ++ mmv1/templates/terraform/resource.go.tmpl | 14 +- .../terraform/resource_iam.html.markdown.tmpl | 2 +- mmv1/templates/terraform/yaml_conversion.erb | 2 +- .../go/resource_container_cluster.go.tmpl | 4 +- ...source_container_cluster_migratev1.go.tmpl | 4 +- .../resource_netapp_storage_pool_test.go.tmpl | 261 ++++++ 57 files changed, 5740 insertions(+), 61 deletions(-) create mode 100644 mmv1/products/accesscontextmanager/go_AccessLevel.yaml create mode 100644 mmv1/products/accesscontextmanager/go_AccessLevelCondition.yaml create mode 100644 mmv1/products/accesscontextmanager/go_AccessLevels.yaml create mode 100644 mmv1/products/accesscontextmanager/go_AccessPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_AuthorizedOrgsDesc.yaml create mode 100644 mmv1/products/accesscontextmanager/go_EgressPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_GcpUserAccessBinding.yaml create mode 100644 mmv1/products/accesscontextmanager/go_IngressPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeterResource.yaml create mode 100644 mmv1/products/accesscontextmanager/go_ServicePerimeters.yaml create mode 100644 mmv1/products/accesscontextmanager/go_product.yaml create mode 100644 mmv1/products/apigee/go_AddonsConfig.yaml create mode 100644 mmv1/products/apigee/go_EndpointAttachment.yaml create mode 100644 mmv1/products/apigee/go_EnvKeystore.yaml create mode 100644 mmv1/products/apigee/go_EnvReferences.yaml create mode 100644 mmv1/products/apigee/go_Envgroup.yaml create mode 100644 mmv1/products/apigee/go_EnvgroupAttachment.yaml create mode 100644 mmv1/products/apigee/go_Environment.yaml create mode 100644 mmv1/products/apigee/go_Instance.yaml create mode 100644 mmv1/products/apigee/go_InstanceAttachment.yaml create mode 100644 mmv1/products/apigee/go_KeystoresAliasesSelfSignedCert.yaml create mode 100644 mmv1/products/apigee/go_NatAddress.yaml create mode 100644 mmv1/products/apigee/go_Organization.yaml create mode 100644 mmv1/products/apigee/go_SyncAuthorization.yaml create mode 100644 mmv1/products/apigee/go_TargetServer.yaml create mode 100644 mmv1/products/apigee/go_product.yaml create mode 100644 mmv1/templates/terraform/pre_update/go/netapp_storagepool.go.tmpl create mode 100644 mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl diff --git a/mmv1/products/accesscontextmanager/go_AccessLevel.yaml b/mmv1/products/accesscontextmanager/go_AccessLevel.yaml new file mode 100644 index 000000000000..7fcbe6d33c1a --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_AccessLevel.yaml @@ -0,0 +1,311 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AccessLevel' +description: | + An AccessLevel is a label that can be applied to requests to GCP services, + along with a list of requirements necessary for the label to be applied. +references: + guides: + 'Access Policy Quickstart': 'https://cloud.google.com/access-context-manager/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.accessLevels' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{name}}' +base_url: '' +self_link: '{{name}}' +create_url: '{{parent}}/accessLevels' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + encoder: 'templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl' +skip_sweeper: true +examples: + - name: 'access_context_manager_access_level_basic' + primary_resource_id: 'access-level' + vars: + access_level_name: 'chromeos_no_lock' + skip_test: true +parameters: + - name: 'parent' + type: String + description: | + The AccessPolicy this AccessLevel lives in. + Format: accessPolicies/{policy_id} + required: true + immutable: true + ignore_read: true + - name: 'name' + type: String + description: | + Resource name for the Access Level. The short_name component must begin + with a letter and only include alphanumeric and '_'. + Format: accessPolicies/{policy_id}/accessLevels/{short_name} + required: true + immutable: true +properties: + - name: 'title' + type: String + description: | + Human readable title. Must be unique within the Policy. + required: true + - name: 'description' + type: String + description: | + Description of the AccessLevel and its use. Does not affect behavior. + - name: 'basic' + type: NestedObject + description: | + A set of predefined conditions for the access level and a combining function. + conflicts: + - custom + properties: + - name: 'combiningFunction' + type: Enum + description: | + How the conditions list should be combined to determine if a request + is granted this AccessLevel. If AND is used, each Condition in + conditions must be satisfied for the AccessLevel to be applied. If + OR is used, at least one Condition in conditions must be satisfied + for the AccessLevel to be applied. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: "AND" + enum_values: + - 'AND' + - 'OR' + - name: 'conditions' + type: Array + description: | + A set of requirements for the AccessLevel to be granted. + required: true + item_type: + type: NestedObject + properties: + - name: 'ipSubnetworks' + type: Array + description: | + A list of CIDR block IP subnetwork specification. May be IPv4 + or IPv6. + Note that for a CIDR IP address block, the specified IP address + portion must be properly truncated (i.e. all the host bits must + be zero) or the input is considered malformed. For example, + "192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, + for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" + is not. The originating IP of a request must be in one of the + listed subnets in order for this Condition to be true. + If empty, all IP addresses are allowed. + item_type: + type: String + - name: 'requiredAccessLevels' + type: Array + description: | + A list of other access levels defined in the same Policy, + referenced by resource name. Referencing an AccessLevel which + does not exist is an error. All access levels listed must be + granted for the Condition to be true. + Format: accessPolicies/{policy_id}/accessLevels/{short_name} + item_type: + type: String + - name: 'members' + type: Array + description: | + An allowed list of members (users, service accounts). + Using groups is not supported yet. + + The signed-in user originating the request must be a part of one + of the provided members. If not specified, a request may come + from any user (logged in/not logged in, not present in any + groups, etc.). + Formats: `user:{emailid}`, `serviceAccount:{emailid}` + item_type: + type: String + - name: 'negate' + type: Boolean + description: | + Whether to negate the Condition. If true, the Condition becomes + a NAND over its non-empty fields, each field must be false for + the Condition overall to be satisfied. Defaults to false. + - name: 'devicePolicy' + type: NestedObject + description: | + Device specific restrictions, all restrictions must hold for + the Condition to be true. If not specified, all devices are + allowed. + properties: + - name: 'requireScreenLock' + type: Boolean + description: | + Whether or not screenlock is required for the DevicePolicy + to be true. Defaults to false. + api_name: requireScreenlock + - name: 'allowedEncryptionStatuses' + type: Array + description: | + A list of allowed encryptions statuses. + An empty list allows all statuses. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'ENCRYPTION_UNSPECIFIED' + - 'ENCRYPTION_UNSUPPORTED' + - 'UNENCRYPTED' + - 'ENCRYPTED' + - name: 'allowedDeviceManagementLevels' + type: Array + description: | + A list of allowed device management levels. + An empty list allows all management levels. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'MANAGEMENT_UNSPECIFIED' + - 'NONE' + - 'BASIC' + - 'COMPLETE' + - name: 'osConstraints' + type: Array + description: | + A list of allowed OS versions. + An empty list allows all types and all versions. + item_type: + type: NestedObject + properties: + - name: 'minimumVersion' + type: String + description: | + The minimum allowed OS version. If not set, any version + of this OS satisfies the constraint. + Format: "major.minor.patch" such as "10.5.301", "9.2.1". + - name: 'requireVerifiedChromeOs' + type: Boolean + description: + If you specify DESKTOP_CHROME_OS for osType, you can + optionally include requireVerifiedChromeOs to require + Chrome Verified Access. + - name: 'osType' + type: Enum + description: | + The operating system type of the device. + required: true + enum_values: + - 'OS_UNSPECIFIED' + - 'DESKTOP_MAC' + - 'DESKTOP_WINDOWS' + - 'DESKTOP_LINUX' + - 'DESKTOP_CHROME_OS' + - 'ANDROID' + - 'IOS' + - name: 'requireAdminApproval' + type: Boolean + description: | + Whether the device needs to be approved by the customer admin. + - name: 'requireCorpOwned' + type: Boolean + description: | + Whether the device needs to be corp owned. + - name: 'regions' + type: Array + description: | + The request must originate from one of the provided + countries/regions. + Format: A valid ISO 3166-1 alpha-2 code. + item_type: + type: String + - name: 'vpcNetworkSources' + type: Array + description: 'The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ip_subnetworks`.' + item_type: + type: NestedObject + properties: + - name: 'vpcSubnetwork' + type: NestedObject + description: 'Sub networks within a VPC network.' + properties: + - name: 'network' + type: String + description: 'Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller.' + required: true + - name: 'vpcIpSubnetworks' + type: Array + description: 'CIDR block IP subnetwork specification. Must be IPv4.' + item_type: + type: String + min_size: 1 + - name: 'custom' + type: NestedObject + description: | + Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. + See CEL spec at: https://github.com/google/cel-spec. + conflicts: + - basic + properties: + - name: 'expr' + type: NestedObject + description: | + Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. + This page details the objects and attributes that are used to the build the CEL expressions for + custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec. + required: true + properties: + - name: 'expression' + type: String + description: + Textual representation of an expression in Common Expression + Language syntax. + required: true + - name: 'title' + type: String + description: + Title for the expression, i.e. a short string describing its + purpose. + - name: 'description' + type: String + description: Description of the expression + - name: 'location' + type: String + description: + String indicating the location of the expression for error + reporting, e.g. a file name and a position in the file diff --git a/mmv1/products/accesscontextmanager/go_AccessLevelCondition.yaml b/mmv1/products/accesscontextmanager/go_AccessLevelCondition.yaml new file mode 100644 index 000000000000..87288da3a1a0 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_AccessLevelCondition.yaml @@ -0,0 +1,243 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AccessLevelCondition' +description: | + Allows configuring a single access level condition to be appended to an access level's conditions. + This resource is intended to be used in cases where it is not possible to compile a full list + of conditions to include in a `google_access_context_manager_access_level` resource, + to enable them to be added separately. + + ~> **Note:** If this resource is used alongside a `google_access_context_manager_access_level` resource, + the access level resource must have a `lifecycle` block with `ignore_changes = [basic[0].conditions]` so + they don't fight over which service accounts should be included. +references: + guides: + 'Access Policy Quickstart': 'https://cloud.google.com/access-context-manager/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.accessLevels' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{access_level}}' +base_url: '' +self_link: '{{access_level}}' +create_url: '{{access_level}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{access_level}}' +import_format: + - '{{access_level}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'PollAsync' + check_response_func_existence: 'transport_tpg.PollCheckForExistence' + check_response_func_absence: 'transport_tpg.PollCheckForAbsence' + suppress_error: false + target_occurrences: 1 + actions: ['create'] +identity: + - ipSubnetworks + - requiredAccessLevels + - members + - negate + - devicePolicy + - regions +nested_query: + keys: + - basic + - conditions + is_list_of_ids: false + modify_by_patch: true +custom_code: +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_access_level_condition_basic' + primary_resource_id: 'access-level-condition' + vars: + access_level_name: 'chromeos_no_lock' + account_id: 'my-account-id' + skip_test: true +parameters: + - name: 'accessLevel' + type: ResourceRef + description: | + The name of the Access Level to add this condition to. + url_param_only: true + required: true + immutable: true + resource: 'AccessLevel' + imports: 'name' +properties: + - name: 'ipSubnetworks' + type: Array + description: | + A list of CIDR block IP subnetwork specification. May be IPv4 + or IPv6. + Note that for a CIDR IP address block, the specified IP address + portion must be properly truncated (i.e. all the host bits must + be zero) or the input is considered malformed. For example, + "192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, + for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" + is not. The originating IP of a request must be in one of the + listed subnets in order for this Condition to be true. + If empty, all IP addresses are allowed. + item_type: + type: String + - name: 'requiredAccessLevels' + type: Array + description: | + A list of other access levels defined in the same Policy, + referenced by resource name. Referencing an AccessLevel which + does not exist is an error. All access levels listed must be + granted for the Condition to be true. + Format: accessPolicies/{policy_id}/accessLevels/{short_name} + item_type: + type: String + - name: 'members' + type: Array + description: | + An allowed list of members (users, service accounts). + Using groups is not supported yet. + + The signed-in user originating the request must be a part of one + of the provided members. If not specified, a request may come + from any user (logged in/not logged in, not present in any + groups, etc.). + Formats: `user:{emailid}`, `serviceAccount:{emailid}` + item_type: + type: String + - name: 'negate' + type: Boolean + description: | + Whether to negate the Condition. If true, the Condition becomes + a NAND over its non-empty fields, each field must be false for + the Condition overall to be satisfied. Defaults to false. + - name: 'devicePolicy' + type: NestedObject + description: | + Device specific restrictions, all restrictions must hold for + the Condition to be true. If not specified, all devices are + allowed. + properties: + - name: 'requireScreenLock' + type: Boolean + description: | + Whether or not screenlock is required for the DevicePolicy + to be true. Defaults to false. + api_name: requireScreenlock + - name: 'allowedEncryptionStatuses' + type: Array + description: | + A list of allowed encryptions statuses. + An empty list allows all statuses. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'ENCRYPTION_UNSPECIFIED' + - 'ENCRYPTION_UNSUPPORTED' + - 'UNENCRYPTED' + - 'ENCRYPTED' + - name: 'allowedDeviceManagementLevels' + type: Array + description: | + A list of allowed device management levels. + An empty list allows all management levels. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'MANAGEMENT_UNSPECIFIED' + - 'NONE' + - 'BASIC' + - 'COMPLETE' + - name: 'osConstraints' + type: Array + description: | + A list of allowed OS versions. + An empty list allows all types and all versions. + item_type: + type: NestedObject + properties: + - name: 'minimumVersion' + type: String + description: | + The minimum allowed OS version. If not set, any version + of this OS satisfies the constraint. + Format: "major.minor.patch" such as "10.5.301", "9.2.1". + - name: 'osType' + type: Enum + description: | + The operating system type of the device. + required: true + enum_values: + - 'OS_UNSPECIFIED' + - 'DESKTOP_MAC' + - 'DESKTOP_WINDOWS' + - 'DESKTOP_LINUX' + - 'DESKTOP_CHROME_OS' + - 'ANDROID' + - 'IOS' + - name: 'requireAdminApproval' + type: Boolean + description: | + Whether the device needs to be approved by the customer admin. + - name: 'requireCorpOwned' + type: Boolean + description: | + Whether the device needs to be corp owned. + - name: 'regions' + type: Array + description: | + The request must originate from one of the provided + countries/regions. + Format: A valid ISO 3166-1 alpha-2 code. + item_type: + type: String + - name: 'vpcNetworkSources' + type: Array + description: 'The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ip_subnetworks`.' + item_type: + type: NestedObject + properties: + - name: 'vpcSubnetwork' + type: NestedObject + description: 'Sub networks within a VPC network.' + properties: + - name: 'network' + type: String + description: 'Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller.' + required: true + - name: 'vpcIpSubnetworks' + type: Array + description: 'CIDR block IP subnetwork specification. Must be IPv4.' + item_type: + type: String diff --git a/mmv1/products/accesscontextmanager/go_AccessLevels.yaml b/mmv1/products/accesscontextmanager/go_AccessLevels.yaml new file mode 100644 index 000000000000..d82488e54e1c --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_AccessLevels.yaml @@ -0,0 +1,315 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AccessLevels' +description: | + Replace all existing Access Levels in an Access Policy with the Access Levels provided. This is done atomically. + This is a bulk edit of all Access Levels and may override existing Access Levels created by `google_access_context_manager_access_level`, + thus causing a permadiff if used alongside `google_access_context_manager_access_level` on the same parent. +references: + guides: + 'Access Policy Quickstart': 'https://cloud.google.com/access-context-manager/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.accessLevels' +docs: + warning: | + This resource is authoritative over the access levels under an access policy. Due to a limitation in Terraform, + it will overwrite all preexisting access levels during a create opration without displaying the old values on + the left side of plan. To prevent this, we recommend importing the resource before applying it if overwriting + preexisting rules, as the plan will correctly display the complete changes to your access policy if the + resource is present in state. +id_format: '{{parent}}/accessLevels' +base_url: '{{parent}}/accessLevels:replaceAll' +self_link: '{{parent}}/accessLevels' +update_url: '{{parent}}/accessLevels:replaceAll' +update_verb: 'POST' +import_format: + - '{{parent}}/accessLevels' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_delete: 'templates/terraform/custom_delete/go/replace_all_access_levels_empty_list.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/set_access_policy_parent_from_access_policy.go.tmpl' +skip_sweeper: true +examples: + - name: 'access_context_manager_access_levels_basic' + primary_resource_id: 'access-levels' + vars: + access_level_name1: 'chromeos_no_lock' + access_level_name2: 'mac_no_lock' + skip_test: true +parameters: + - name: 'parent' + type: String + description: | + The AccessPolicy this AccessLevel lives in. + Format: accessPolicies/{policy_id} + url_param_only: true + required: true + immutable: true + ignore_read: true +properties: + - name: 'accessLevels' + type: Array + description: | + The desired Access Levels that should replace all existing Access Levels in the Access Policy. + is_set: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Resource name for the Access Level. The short_name component must begin + with a letter and only include alphanumeric and '_'. + Format: accessPolicies/{policy_id}/accessLevels/{short_name} + required: true + immutable: true + - name: 'title' + type: String + description: | + Human readable title. Must be unique within the Policy. + required: true + - name: 'description' + type: String + description: | + Description of the AccessLevel and its use. Does not affect behavior. + - name: 'basic' + type: NestedObject + description: | + A set of predefined conditions for the access level and a combining function. + # conflicts: + # - custom + properties: + - name: 'combiningFunction' + type: Enum + description: | + How the conditions list should be combined to determine if a request + is granted this AccessLevel. If AND is used, each Condition in + conditions must be satisfied for the AccessLevel to be applied. If + OR is used, at least one Condition in conditions must be satisfied + for the AccessLevel to be applied. + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: "AND" + enum_values: + - 'AND' + - 'OR' + - name: 'conditions' + type: Array + description: | + A set of requirements for the AccessLevel to be granted. + required: true + item_type: + type: NestedObject + properties: + - name: 'ipSubnetworks' + type: Array + description: | + A list of CIDR block IP subnetwork specification. May be IPv4 + or IPv6. + Note that for a CIDR IP address block, the specified IP address + portion must be properly truncated (i.e. all the host bits must + be zero) or the input is considered malformed. For example, + "192.0.2.0/24" is accepted but "192.0.2.1/24" is not. Similarly, + for IPv6, "2001:db8::/32" is accepted whereas "2001:db8::1/32" + is not. The originating IP of a request must be in one of the + listed subnets in order for this Condition to be true. + If empty, all IP addresses are allowed. + item_type: + type: String + - name: 'requiredAccessLevels' + type: Array + description: | + A list of other access levels defined in the same Policy, + referenced by resource name. Referencing an AccessLevel which + does not exist is an error. All access levels listed must be + granted for the Condition to be true. + Format: accessPolicies/{policy_id}/accessLevels/{short_name} + item_type: + type: String + - name: 'members' + type: Array + description: | + An allowed list of members (users, service accounts). + Using groups is not supported yet. + + The signed-in user originating the request must be a part of one + of the provided members. If not specified, a request may come + from any user (logged in/not logged in, not present in any + groups, etc.). + Formats: `user:{emailid}`, `serviceAccount:{emailid}` + item_type: + type: String + - name: 'negate' + type: Boolean + description: | + Whether to negate the Condition. If true, the Condition becomes + a NAND over its non-empty fields, each field must be false for + the Condition overall to be satisfied. Defaults to false. + - name: 'devicePolicy' + type: NestedObject + description: | + Device specific restrictions, all restrictions must hold for + the Condition to be true. If not specified, all devices are + allowed. + properties: + - name: 'requireScreenLock' + type: Boolean + description: | + Whether or not screenlock is required for the DevicePolicy + to be true. Defaults to false. + api_name: requireScreenlock + - name: 'allowedEncryptionStatuses' + type: Array + description: | + A list of allowed encryptions statuses. + An empty list allows all statuses. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'ENCRYPTION_UNSPECIFIED' + - 'ENCRYPTION_UNSUPPORTED' + - 'UNENCRYPTED' + - 'ENCRYPTED' + - name: 'allowedDeviceManagementLevels' + type: Array + description: | + A list of allowed device management levels. + An empty list allows all management levels. + item_type: + type: Enum + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + enum_values: + - 'MANAGEMENT_UNSPECIFIED' + - 'NONE' + - 'BASIC' + - 'COMPLETE' + - name: 'osConstraints' + type: Array + description: | + A list of allowed OS versions. + An empty list allows all types and all versions. + item_type: + type: NestedObject + properties: + - name: 'minimumVersion' + type: String + description: | + The minimum allowed OS version. If not set, any version + of this OS satisfies the constraint. + Format: "major.minor.patch" such as "10.5.301", "9.2.1". + - name: 'osType' + type: Enum + description: | + The operating system type of the device. + required: true + enum_values: + - 'OS_UNSPECIFIED' + - 'DESKTOP_MAC' + - 'DESKTOP_WINDOWS' + - 'DESKTOP_LINUX' + - 'DESKTOP_CHROME_OS' + - 'ANDROID' + - 'IOS' + - name: 'requireAdminApproval' + type: Boolean + description: | + Whether the device needs to be approved by the customer admin. + - name: 'requireCorpOwned' + type: Boolean + description: | + Whether the device needs to be corp owned. + - name: 'regions' + type: Array + description: | + The request must originate from one of the provided + countries/regions. + Format: A valid ISO 3166-1 alpha-2 code. + item_type: + type: String + - name: 'vpcNetworkSources' + type: Array + description: 'The request must originate from one of the provided VPC networks in Google Cloud. Cannot specify this field together with `ip_subnetworks`.' + item_type: + type: NestedObject + properties: + - name: 'vpcSubnetwork' + type: NestedObject + description: 'Sub networks within a VPC network.' + properties: + - name: 'network' + type: String + description: 'Required. Network name to be allowed by this Access Level. Networks of foreign organizations requires `compute.network.get` permission to be granted to caller.' + required: true + - name: 'vpcIpSubnetworks' + type: Array + description: 'CIDR block IP subnetwork specification. Must be IPv4.' + item_type: + type: String + min_size: 1 + - name: 'custom' + type: NestedObject + description: | + Custom access level conditions are set using the Cloud Common Expression Language to represent the necessary conditions for the level to apply to a request. + See CEL spec at: https://github.com/google/cel-spec. + # conflicts: + # - basic + properties: + - name: 'expr' + type: NestedObject + description: | + Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. + This page details the objects and attributes that are used to the build the CEL expressions for + custom access levels - https://cloud.google.com/access-context-manager/docs/custom-access-level-spec. + required: true + properties: + - name: 'expression' + type: String + description: + Textual representation of an expression in Common Expression + Language syntax. + required: true + - name: 'title' + type: String + description: + Title for the expression, i.e. a short string describing its + purpose. + - name: 'description' + type: String + description: Description of the expression + - name: 'location' + type: String + description: + String indicating the location of the expression for error + reporting, e.g. a file name and a position in the file diff --git a/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml b/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml new file mode 100644 index 000000000000..87b0638bebc7 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_AccessPolicy.yaml @@ -0,0 +1,117 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AccessPolicy' +description: | + AccessPolicy is a container for AccessLevels (which define the necessary + attributes to use GCP services) and ServicePerimeters (which define + regions of services able to freely pass data within a perimeter). An + access policy is globally visible within an organization, and the + restrictions it specifies apply to all projects within an organization. +references: + guides: + 'Access Policy Quickstart': 'https://cloud.google.com/access-context-manager/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{name}}' +base_url: 'accessPolicies' +self_link: 'accessPolicies/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + fetch_iam_policy_verb: 'POST' + allowed_iam_role: 'roles/accesscontextmanager.policyAdmin' + parent_resource_attribute: 'name' + import_format: + - 'accessPolicies/{{name}}' + - '{{name}}' +custom_code: + post_create: 'templates/terraform/post_create/go/accesspolicy.tmpl' +skip_sweeper: true +examples: + - name: 'access_context_manager_access_policy_basic' + primary_resource_id: 'access-policy' + skip_test: true + - name: 'access_context_manager_access_policy_scoped' + primary_resource_id: 'access-policy' + test_env_vars: + org_id: 'ORG_ID' + project: 'PROJECT_NAME' + skip_test: true + skip_import_test: true +parameters: + - name: 'parent' + type: String + description: | + The parent of this AccessPolicy in the Cloud Resource Hierarchy. + Format: organizations/{organization_id} + required: true + immutable: true + - name: 'title' + type: String + description: | + Human readable title. Does not affect behavior. + required: true + - name: 'scopes' + type: Array + description: | + Folder or project on which this policy is applicable. + Format: folders/{{folder_id}} or projects/{{project_id}} + item_type: + type: String + max_size: 1 +properties: + - name: 'name' + type: String + description: | + Resource name of the AccessPolicy. Format: {policy_id} + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'createTime' + type: Time + description: | + Time the AccessPolicy was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the AccessPolicy was updated in UTC. + output: true diff --git a/mmv1/products/accesscontextmanager/go_AuthorizedOrgsDesc.yaml b/mmv1/products/accesscontextmanager/go_AuthorizedOrgsDesc.yaml new file mode 100644 index 000000000000..abb66161283c --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_AuthorizedOrgsDesc.yaml @@ -0,0 +1,145 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AuthorizedOrgsDesc' +description: | + An authorized organizations description describes a list of organizations + (1) that have been authorized to use certain asset (for example, device) data + owned by different organizations at the enforcement points, or (2) with certain + asset (for example, device) have been authorized to access the resources in + another organization at the enforcement points. +references: + guides: + 'gcloud docs': 'https://cloud.google.com/beyondcorp-enterprise/docs/cross-org-authorization' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.authorizedOrgsDescs' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{name}}' +base_url: '' +self_link: '{{name}}' +create_url: '{{parent}}/authorizedOrgsDescs' +update_verb: 'PATCH' +import_format: + - '{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + encoder: 'templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl' + post_create: 'templates/terraform/post_create/go/sleep_2_min.go.tmpl' + pre_update: 'templates/terraform/update_mask.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl' +skip_sweeper: true +examples: + - name: 'access_context_manager_authorized_orgs_desc_basic' + primary_resource_id: 'authorized-orgs-desc' + skip_test: true +parameters: + - name: 'parent' + type: String + description: | + Required. Resource name for the access policy which owns this `AuthorizedOrgsDesc`. + required: true + immutable: true + ignore_read: true + - name: 'name' + type: String + description: | + Resource name for the `AuthorizedOrgsDesc`. Format: + `accessPolicies/{access_policy}/authorizedOrgsDescs/{authorized_orgs_desc}`. + The `authorized_orgs_desc` component must begin with a letter, followed by + alphanumeric characters or `_`. + After you create an `AuthorizedOrgsDesc`, you cannot change its `name`. + required: true + immutable: true + - name: 'orgs' + type: Array + description: | + The list of organization ids in this AuthorizedOrgsDesc. + Format: `organizations/` + Example: `organizations/123456` + item_type: + type: String + - name: 'assetType' + type: Enum + description: | + The type of entities that need to use the authorization relationship during + evaluation, such as a device. Valid values are "ASSET_TYPE_DEVICE" and + "ASSET_TYPE_CREDENTIAL_STRENGTH". + immutable: true + enum_values: + - 'ASSET_TYPE_DEVICE' + - 'ASSET_TYPE_CREDENTIAL_STRENGTH' + - name: 'authorizationDirection' + type: Enum + description: | + The direction of the authorization relationship between this organization + and the organizations listed in the "orgs" field. The valid values for this + field include the following: + + AUTHORIZATION_DIRECTION_FROM: Allows this organization to evaluate traffic + in the organizations listed in the `orgs` field. + + AUTHORIZATION_DIRECTION_TO: Allows the organizations listed in the `orgs` + field to evaluate the traffic in this organization. + + For the authorization relationship to take effect, all of the organizations + must authorize and specify the appropriate relationship direction. For + example, if organization A authorized organization B and C to evaluate its + traffic, by specifying "AUTHORIZATION_DIRECTION_TO" as the authorization + direction, organizations B and C must specify + "AUTHORIZATION_DIRECTION_FROM" as the authorization direction in their + "AuthorizedOrgsDesc" resource. + immutable: true + enum_values: + - 'AUTHORIZATION_DIRECTION_TO' + - 'AUTHORIZATION_DIRECTION_FROM' + - name: 'authorizationType' + type: Enum + description: | + A granular control type for authorization levels. Valid value is "AUTHORIZATION_TYPE_TRUST". + immutable: true + enum_values: + - 'AUTHORIZATION_TYPE_TRUST' +properties: + - name: 'createTime' + type: Time + description: | + Time the AuthorizedOrgsDesc was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the AuthorizedOrgsDesc was updated in UTC. + output: true diff --git a/mmv1/products/accesscontextmanager/go_EgressPolicy.yaml b/mmv1/products/accesscontextmanager/go_EgressPolicy.yaml new file mode 100644 index 000000000000..91f2abd49b74 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_EgressPolicy.yaml @@ -0,0 +1,78 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'EgressPolicy' +description: | + This resource has been deprecated, please refer to ServicePerimeterEgressPolicy. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#egresspolicy' +docs: +id_format: '{{egress_policy_name}}/{{resource}}' +base_url: '' +self_link: '{{egress_policy_name}}' +create_url: '{{egress_policy_name}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +import_format: + - '{{egress_policy_name}}/{{resource}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - resource +nested_query: + keys: + - status + - resources + is_list_of_ids: true + modify_by_patch: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_egress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +parameters: + - name: 'egressPolicyName' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + immutable: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'resource' + type: String + description: | + A GCP resource that is inside of the service perimeter. + required: true + immutable: true diff --git a/mmv1/products/accesscontextmanager/go_GcpUserAccessBinding.yaml b/mmv1/products/accesscontextmanager/go_GcpUserAccessBinding.yaml new file mode 100644 index 000000000000..be828126326a --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_GcpUserAccessBinding.yaml @@ -0,0 +1,90 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'GcpUserAccessBinding' +description: | + Restricts access to Cloud Console and Google Cloud APIs for a set of users using Context-Aware Access. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/organizations.gcpUserAccessBindings' +docs: +id_format: '{{name}}' +base_url: 'organizations/{{organization_id}}/gcpUserAccessBindings' +self_link: '{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/set_id_name_with_slashes.go.tmpl' +exclude_tgc: true +examples: + - name: 'access_context_manager_gcp_user_access_binding_basic' + primary_resource_id: 'gcp_user_access_binding' + vars: + group_id: 'my-identity-group' + access_level_id: 'access_level_id_for_user_access_binding' + access_level_name: 'chromeos_no_lock' + test_env_vars: + org_id: 'ORG_ID' + org_domain: 'ORG_DOMAIN' + cust_id: 'CUST_ID' + skip_test: true +parameters: + - name: 'organizationId' + type: String + description: | + Required. ID of the parent organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Immutable. Assigned by the server during creation. The last segment has an arbitrary length and has only URI unreserved characters (as defined by RFC 3986 Section 2.3). Should not be specified by the client during creation. Example: "organizations/256/gcpUserAccessBindings/b3-BhcX_Ud5N" + output: true + - name: 'groupKey' + type: String + description: | + Required. Immutable. Google Group id whose members are subject to this binding's restrictions. See "id" in the G Suite Directory API's Groups resource. If a group's email address/alias is changed, this resource will continue to point at the changed group. This field does not accept group email addresses or aliases. Example: "01d520gv4vjcrht" + required: true + immutable: true + - name: 'accessLevels' + type: Array + description: | + Required. Access level that a user must have to be granted access. Only one access level is supported, not multiple. This repeated field must have exactly one element. Example: "accessPolicies/9522/accessLevels/device_trusted" + required: true + item_type: + type: String + min_size: 1 + max_size: 1 diff --git a/mmv1/products/accesscontextmanager/go_IngressPolicy.yaml b/mmv1/products/accesscontextmanager/go_IngressPolicy.yaml new file mode 100644 index 000000000000..83fe4955ad85 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_IngressPolicy.yaml @@ -0,0 +1,78 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'IngressPolicy' +description: | + This resource has been deprecated, please refer to ServicePerimeterIngressPolicy. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#ingresspolicy' +docs: +id_format: '{{ingress_policy_name}}/{{resource}}' +base_url: '' +self_link: '{{ingress_policy_name}}' +create_url: '{{ingress_policy_name}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +import_format: + - '{{ingress_policy_name}}/{{resource}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - resource +nested_query: + keys: + - status + - resources + is_list_of_ids: true + modify_by_patch: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +parameters: + - name: 'ingressPolicyName' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + immutable: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'resource' + type: String + description: | + A GCP resource that is inside of the service perimeter. + required: true + immutable: true diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml new file mode 100644 index 000000000000..b6fef8a42c86 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeter.yaml @@ -0,0 +1,768 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeter' +description: | + ServicePerimeter describes a set of GCP resources which can freely import + and export data amongst themselves, but not export outside of the + ServicePerimeter. If a request with a source within this ServicePerimeter + has a target outside of the ServicePerimeter, the request will be blocked. + Otherwise the request is allowed. There are two types of Service Perimeter + - Regular and Bridge. Regular Service Perimeters cannot overlap, a single + GCP project can only belong to a single regular Service Perimeter. Service + Perimeter Bridges can contain only GCP projects as members, a single GCP + project may belong to multiple Service Perimeter Bridges. +references: + guides: + 'Service Perimeter Quickstart': 'https://cloud.google.com/vpc-service-controls/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{name}}' +base_url: '' +self_link: '{{name}}' +create_url: '{{parent}}/servicePerimeters' +update_verb: 'PATCH' +update_mask: true +mutex: '{{name}}' +import_format: + - '{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + encoder: 'templates/terraform/encoders/go/access_level_never_send_parent.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/set_access_policy_parent_from_self_link.go.tmpl' +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_basic' + primary_resource_id: 'service-perimeter' + vars: + access_level_name: 'chromeos_no_lock' + service_perimeter_name: 'restrict_storage' + skip_test: true + - name: 'access_context_manager_service_perimeter_secure_data_exchange' + primary_resource_id: 'secure-data-exchange' + vars: + access_level_name: 'secure_data_exchange' + skip_test: true + - name: 'access_context_manager_service_perimeter_dry-run' + primary_resource_id: 'service-perimeter' + vars: + service_perimeter_name: 'restrict_bigquery_dryrun_storage' + skip_test: true +parameters: + - name: 'parent' + type: String + description: | + The AccessPolicy this ServicePerimeter lives in. + Format: accessPolicies/{policy_id} + required: true + immutable: true + ignore_read: true + - name: 'name' + type: String + description: | + Resource name for the ServicePerimeter. The short_name component must + begin with a letter and only include alphanumeric and '_'. + Format: accessPolicies/{policy_id}/servicePerimeters/{short_name} + required: true + immutable: true +properties: + - name: 'title' + type: String + description: | + Human readable title. Must be unique within the Policy. + required: true + - name: 'description' + type: String + description: | + Description of the ServicePerimeter and its use. Does not affect + behavior. + - name: 'createTime' + type: Time + description: | + Time the AccessPolicy was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the AccessPolicy was updated in UTC. + output: true + - name: 'perimeterType' + type: Enum + description: | + Specifies the type of the Perimeter. There are two types: regular and + bridge. Regular Service Perimeter contains resources, access levels, + and restricted services. Every resource can be in at most + ONE regular Service Perimeter. + + In addition to being in a regular service perimeter, a resource can also + be in zero or more perimeter bridges. A perimeter bridge only contains + resources. Cross project operations are permitted if all effected + resources share some perimeter (whether bridge or regular). Perimeter + Bridge does not contain access levels or services: those are governed + entirely by the regular perimeter that resource is in. + + Perimeter Bridges are typically useful when building more complex + topologies with many independent perimeters that need to share some data + with a common perimeter, but should not be able to share data among + themselves. + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: "PERIMETER_TYPE_REGULAR" + enum_values: + - 'PERIMETER_TYPE_REGULAR' + - 'PERIMETER_TYPE_BRIDGE' + - name: 'status' + type: NestedObject + description: | + ServicePerimeter configuration. Specifies sets of resources, + restricted services and access levels that determine + perimeter content and boundaries. + properties: + - name: 'resources' + type: Array + description: | + A list of GCP resources that are inside of the service perimeter. + Currently only projects are allowed. + Format: projects/{project_number} + is_set: true + at_least_one_of: + - 'status.0.resources' + - 'status.0.access_levels' + - 'status.0.restricted_services' + item_type: + type: String + - name: 'accessLevels' + type: Array + description: | + A list of AccessLevel resource names that allow resources within + the ServicePerimeter to be accessed from the internet. + AccessLevels listed must be in the same policy as this + ServicePerimeter. Referencing a nonexistent AccessLevel is a + syntax error. If no AccessLevel names are listed, resources within + the perimeter can only be accessed via GCP calls with request + origins within the perimeter. For Service Perimeter Bridge, must + be empty. + + Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} + is_set: true + at_least_one_of: + - 'status.0.resources' + - 'status.0.access_levels' + - 'status.0.restricted_services' + item_type: + type: String + - name: 'restrictedServices' + type: Array + description: | + GCP services that are subject to the Service Perimeter + restrictions. Must contain a list of services. For example, if + `storage.googleapis.com` is specified, access to the storage + buckets inside the perimeter must meet the perimeter's access + restrictions. + is_set: true + at_least_one_of: + - 'status.0.resources' + - 'status.0.access_levels' + - 'status.0.restricted_services' + item_type: + type: String + - name: 'vpcAccessibleServices' + type: NestedObject + description: | + Specifies how APIs are allowed to communicate within the Service + Perimeter. + properties: + - name: 'enableRestriction' + type: Boolean + description: | + Whether to restrict API calls within the Service Perimeter to the + list of APIs specified in 'allowedServices'. + - name: 'allowedServices' + type: Array + description: | + The list of APIs usable within the Service Perimeter. + Must be empty unless `enableRestriction` is True. + is_set: true + item_type: + type: String + - name: 'ingressPolicies' + type: Array + description: | + List of `IngressPolicies` to apply to the perimeter. A perimeter may + have multiple `IngressPolicies`, each of which is evaluated + separately. Access is granted if any `Ingress Policy` grants it. + Must be empty for a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + is_set: true + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'egressPolicies' + type: Array + description: | + List of EgressPolicies to apply to the perimeter. A perimeter may + have multiple EgressPolicies, each of which is evaluated separately. + Access is granted if any EgressPolicy grants it. Must be empty for + a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_UNSPECIFIED' + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + is_set: true + item_type: + type: String + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + is_set: true + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'spec' + type: NestedObject + description: | + Proposed (or dry run) ServicePerimeter configuration. + This configuration allows to specify and test ServicePerimeter configuration + without enforcing actual access restrictions. Only allowed to be set when + the `useExplicitDryRunSpec` flag is set. + properties: + - name: 'resources' + type: Array + description: | + A list of GCP resources that are inside of the service perimeter. + Currently only projects are allowed. + Format: projects/{project_number} + is_set: true + at_least_one_of: + - 'spec.0.resources' + - 'spec.0.access_levels' + - 'spec.0.restricted_services' + item_type: + type: String + - name: 'accessLevels' + type: Array + description: | + A list of AccessLevel resource names that allow resources within + the ServicePerimeter to be accessed from the internet. + AccessLevels listed must be in the same policy as this + ServicePerimeter. Referencing a nonexistent AccessLevel is a + syntax error. If no AccessLevel names are listed, resources within + the perimeter can only be accessed via GCP calls with request + origins within the perimeter. For Service Perimeter Bridge, must + be empty. + + Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} + is_set: true + at_least_one_of: + - 'spec.0.resources' + - 'spec.0.access_levels' + - 'spec.0.restricted_services' + item_type: + type: String + - name: 'restrictedServices' + type: Array + description: | + GCP services that are subject to the Service Perimeter + restrictions. Must contain a list of services. For example, if + `storage.googleapis.com` is specified, access to the storage + buckets inside the perimeter must meet the perimeter's access + restrictions. + is_set: true + at_least_one_of: + - 'spec.0.resources' + - 'spec.0.access_levels' + - 'spec.0.restricted_services' + item_type: + type: String + - name: 'vpcAccessibleServices' + type: NestedObject + description: | + Specifies how APIs are allowed to communicate within the Service + Perimeter. + properties: + - name: 'enableRestriction' + type: Boolean + description: | + Whether to restrict API calls within the Service Perimeter to the + list of APIs specified in 'allowedServices'. + - name: 'allowedServices' + type: Array + description: | + The list of APIs usable within the Service Perimeter. + Must be empty unless `enableRestriction` is True. + is_set: true + item_type: + type: String + - name: 'ingressPolicies' + type: Array + description: | + List of `IngressPolicies` to apply to the perimeter. A perimeter may + have multiple `IngressPolicies`, each of which is evaluated + separately. Access is granted if any `Ingress Policy` grants it. + Must be empty for a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + is_set: true + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'egressPolicies' + type: Array + description: | + List of EgressPolicies to apply to the perimeter. A perimeter may + have multiple EgressPolicies, each of which is evaluated separately. + Access is granted if any EgressPolicy grants it. Must be empty for + a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_UNSPECIFIED' + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + is_set: true + item_type: + type: String + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + is_set: true + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'useExplicitDryRunSpec' + type: Boolean + description: | + Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists + for all Service Perimeters, and that spec is identical to the status for those + Service Perimeters. When this flag is set, it inhibits the generation of the + implicit spec, thereby allowing the user to explicitly provide a + configuration ("spec") to use in a dry-run version of the Service Perimeter. + This allows the user to test changes to the enforced config ("status") without + actually enforcing them. This testing is done through analyzing the differences + between currently enforced and suggested restrictions. useExplicitDryRunSpec must + bet set to True if any of the fields in the spec are set to non-default values. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml new file mode 100644 index 000000000000..c5df3c9fc897 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterDryRunResource.yaml @@ -0,0 +1,105 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterDryRunResource' +description: | + Allows configuring a single GCP resource that should be inside of the `spec` block of a dry run service perimeter. + This resource is intended to be used in cases where it is not possible to compile a full list + of projects to include in a `google_access_context_manager_service_perimeter` resource, + to enable them to be added separately. + If your perimeter is NOT in dry-run mode use `google_access_context_manager_service_perimeter_resource` instead. + + ~> **Note:** If this resource is used alongside a `google_access_context_manager_service_perimeter` resource, + the service perimeter resource must have a `lifecycle` block with `ignore_changes = [spec[0].resources]` so + they don't fight over which resources should be in the policy. +references: + guides: + 'Service Perimeter Quickstart': 'https://cloud.google.com/vpc-service-controls/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{perimeter_name}}/{{resource}}' +base_url: '' +self_link: '{{perimeter_name}}' +create_url: '{{perimeter_name}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter_name}}' +import_format: + - '{{perimeter_name}}/{{resource}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - resource +nested_query: + keys: + - spec + - resources + is_list_of_ids: true + modify_by_patch: true +custom_code: + pre_create: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/access_context_manager_service_perimeter_dry_run_resource.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_dry_run_resource_basic' + primary_resource_id: 'service-perimeter-dry-run-resource' + vars: + service_perimeter_name: 'restrict_all' + skip_test: true +parameters: + - name: 'perimeterName' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + immutable: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'resource' + type: String + description: | + A GCP resource that is inside of the service perimeter. + Currently only projects are allowed. + Format: projects/{project_number} + required: true + immutable: true diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml new file mode 100644 index 000000000000..64f807f6f375 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterEgressPolicy.yaml @@ -0,0 +1,184 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterEgressPolicy' +description: | + EgressPolicies match requests based on egressFrom and egressTo stanzas. + For an EgressPolicy to match, both egressFrom and egressTo stanzas must be matched. + If an EgressPolicy matches a request, the request is allowed to span the ServicePerimeter + boundary. For example, an EgressPolicy can be used to allow VMs on networks + within the ServicePerimeter to access a defined set of projects outside the + perimeter in certain contexts (e.g. to read data from a Cloud Storage bucket + or query against a BigQuery dataset). + + ~> **Note:** By default, updates to this resource will remove the EgressPolicy from the + from the perimeter and add it back in a non-atomic manner. To ensure that the new EgressPolicy + is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#egresspolicy' +docs: +id_format: '{{perimeter}}' +base_url: '' +self_link: '{{perimeter}}' +create_url: '{{perimeter}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter}}' +import_format: + - '{{perimeter}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - egressFrom + - egressTo +nested_query: + keys: + - status + - egressPolicies + is_list_of_ids: false + modify_by_patch: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_egress_policy' + skip_test: true +parameters: + - name: 'perimeter' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + item_type: + type: String + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_UNSPECIFIED' + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml new file mode 100644 index 000000000000..af1361dfa975 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterIngressPolicy.yaml @@ -0,0 +1,192 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterIngressPolicy' +description: | + IngressPolicies match requests based on ingressFrom and ingressTo stanzas. For an ingress policy to match, + both the ingressFrom and ingressTo stanzas must be matched. If an IngressPolicy matches a request, + the request is allowed through the perimeter boundary from outside the perimeter. + For example, access from the internet can be allowed either based on an AccessLevel or, + for traffic hosted on Google Cloud, the project of the source network. + For access from private networks, using the project of the hosting network is required. + Individual ingress policies can be limited by restricting which services and/ + or actions they match using the ingressTo field. + + ~> **Note:** By default, updates to this resource will remove the IngressPolicy from the + from the perimeter and add it back in a non-atomic manner. To ensure that the new IngressPolicy + is added before the old one is removed, add a `lifecycle` block with `create_before_destroy = true` to this resource. +references: + guides: + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters#ingresspolicy' +docs: +id_format: '{{perimeter}}' +base_url: '' +self_link: '{{perimeter}}' +create_url: '{{perimeter}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter}}' +import_format: + - '{{perimeter}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - ingressFrom + - ingressTo +nested_query: + keys: + - status + - ingressPolicies + is_list_of_ids: false + modify_by_patch: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_ingress_policy.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_ingress_policy' + skip_test: true +parameters: + - name: 'perimeter' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeterResource.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeterResource.yaml new file mode 100644 index 000000000000..840a238a552a --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeterResource.yaml @@ -0,0 +1,102 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeterResource' +description: | + Allows configuring a single GCP resource that should be inside the `status` block of a service perimeter. + This resource is intended to be used in cases where it is not possible to compile a full list + of projects to include in a `google_access_context_manager_service_perimeter` resource, + to enable them to be added separately. + If your perimeter is in dry-run mode use `google_access_context_manager_service_perimeter_dry_run_resource` instead. + + ~> **Note:** If this resource is used alongside a `google_access_context_manager_service_perimeter` resource, + the service perimeter resource must have a `lifecycle` block with `ignore_changes = [status[0].resources]` so + they don't fight over which resources should be in the policy. +references: + guides: + 'Service Perimeter Quickstart': 'https://cloud.google.com/vpc-service-controls/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters' +docs: + warning: | + If you are using User ADCs (Application Default Credentials) with this resource, + you must specify a `billing_project` and set `user_project_override` to true + in the provider configuration. Otherwise the ACM API will return a 403 error. + Your account must have the `serviceusage.services.use` permission on the + `billing_project` you defined. +id_format: '{{perimeter_name}}/{{resource}}' +base_url: '' +self_link: '{{perimeter_name}}' +create_url: '{{perimeter_name}}' +create_verb: 'PATCH' +update_mask: true +delete_verb: 'PATCH' +immutable: true +mutex: '{{perimeter_name}}' +import_format: + - '{{perimeter_name}}/{{resource}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - resource +nested_query: + keys: + - status + - resources + is_list_of_ids: true + modify_by_patch: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/access_context_manager_service_perimeter_resource.go.tmpl' +exclude_tgc: true +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeter_resource_basic' + primary_resource_id: 'service-perimeter-resource' + vars: + service_perimeter_name: 'restrict_all' + skip_test: true +parameters: + - name: 'perimeterName' + type: ResourceRef + description: | + The name of the Service Perimeter to add this resource to. + url_param_only: true + required: true + immutable: true + resource: 'ServicePerimeter' + imports: 'name' +properties: + - name: 'resource' + type: String + description: | + A GCP resource that is inside of the service perimeter. + Currently only projects are allowed. + Format: projects/{project_number} + required: true + immutable: true diff --git a/mmv1/products/accesscontextmanager/go_ServicePerimeters.yaml b/mmv1/products/accesscontextmanager/go_ServicePerimeters.yaml new file mode 100644 index 000000000000..90c478decf6b --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_ServicePerimeters.yaml @@ -0,0 +1,765 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServicePerimeters' +description: | + Replace all existing Service Perimeters in an Access Policy with the Service Perimeters provided. This is done atomically. + This is a bulk edit of all Service Perimeters and may override existing Service Perimeters created by `google_access_context_manager_service_perimeter`, + thus causing a permadiff if used alongside `google_access_context_manager_service_perimeter` on the same parent. +references: + guides: + 'Service Perimeter Quickstart': 'https://cloud.google.com/vpc-service-controls/docs/quickstart' + api: 'https://cloud.google.com/access-context-manager/docs/reference/rest/v1/accessPolicies.servicePerimeters' +docs: +id_format: '{{parent}}/servicePerimeters' +base_url: '{{parent}}/servicePerimeters:replaceAll' +self_link: '{{parent}}/servicePerimeters' +update_url: '{{parent}}/servicePerimeters:replaceAll' +update_verb: 'POST' +import_format: + - '{{parent}}/servicePerimeters' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_delete: 'templates/terraform/custom_delete/go/replace_all_service_perimeters_empty_list.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/set_access_policy_parent_from_access_policy.go.tmpl' +skip_sweeper: true +examples: + - name: 'access_context_manager_service_perimeters_basic' + primary_resource_id: 'service-perimeter' + vars: + access_level_name: 'chromeos_no_lock' + service_perimeter_name: 'restrict_storage' + skip_test: true +parameters: + - name: 'parent' + type: String + description: | + The AccessPolicy this ServicePerimeter lives in. + Format: accessPolicies/{policy_id} + required: true + immutable: true + ignore_read: true +properties: + - name: 'servicePerimeters' + type: Array + description: | + The desired Service Perimeters that should replace all existing Service Perimeters in the Access Policy. + custom_flatten: 'templates/terraform/custom_flatten/go/accesscontextmanager_serviceperimeters_custom_flatten.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Resource name for the ServicePerimeter. The short_name component must + begin with a letter and only include alphanumeric and '_'. + Format: accessPolicies/{policy_id}/servicePerimeters/{short_name} + required: true + immutable: true + - name: 'title' + type: String + description: | + Human readable title. Must be unique within the Policy. + required: true + - name: 'description' + type: String + description: | + Description of the ServicePerimeter and its use. Does not affect + behavior. + - name: 'createTime' + type: Time + description: | + Time the AccessPolicy was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the AccessPolicy was updated in UTC. + output: true + - name: 'perimeterType' + type: Enum + description: | + Specifies the type of the Perimeter. There are two types: regular and + bridge. Regular Service Perimeter contains resources, access levels, + and restricted services. Every resource can be in at most + ONE regular Service Perimeter. + + In addition to being in a regular service perimeter, a resource can also + be in zero or more perimeter bridges. A perimeter bridge only contains + resources. Cross project operations are permitted if all effected + resources share some perimeter (whether bridge or regular). Perimeter + Bridge does not contain access levels or services: those are governed + entirely by the regular perimeter that resource is in. + + Perimeter Bridges are typically useful when building more complex + topologies with many independent perimeters that need to share some data + with a common perimeter, but should not be able to share data among + themselves. + immutable: true + custom_flatten: 'templates/terraform/custom_flatten/go/default_if_empty.tmpl' + default_value: "PERIMETER_TYPE_REGULAR" + enum_values: + - 'PERIMETER_TYPE_REGULAR' + - 'PERIMETER_TYPE_BRIDGE' + - name: 'status' + type: NestedObject + description: | + ServicePerimeter configuration. Specifies sets of resources, + restricted services and access levels that determine + perimeter content and boundaries. + properties: + - name: 'resources' + type: Array + description: | + A list of GCP resources that are inside of the service perimeter. + Currently only projects are allowed. + Format: projects/{project_number} + # TODO: (mbang) won't work for arrays yet, uncomment here once they are supported. + # (github.com/hashicorp/terraform-plugin-sdk/issues/470) + # at_least_one_of: + # - status.0.resources + # - status.0.access_levels + # - status.0.restricted_services + is_set: true + item_type: + type: String + - name: 'accessLevels' + type: Array + description: | + A list of AccessLevel resource names that allow resources within + the ServicePerimeter to be accessed from the internet. + AccessLevels listed must be in the same policy as this + ServicePerimeter. Referencing a nonexistent AccessLevel is a + syntax error. If no AccessLevel names are listed, resources within + the perimeter can only be accessed via GCP calls with request + origins within the perimeter. For Service Perimeter Bridge, must + be empty. + + Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} + # TODO: (mbang) won't work for arrays yet, uncomment here once they are supported. + # (github.com/hashicorp/terraform-plugin-sdk/issues/470) + # at_least_one_of: + # - status.0.resources + # - status.0.access_levels + # - status.0.restricted_services + is_set: true + item_type: + type: String + - name: 'restrictedServices' + type: Array + description: | + GCP services that are subject to the Service Perimeter + restrictions. Must contain a list of services. For example, if + `storage.googleapis.com` is specified, access to the storage + buckets inside the perimeter must meet the perimeter's access + restrictions. + # TODO: (mbang) won't work for arrays yet, uncomment here once they are supported. + # (github.com/hashicorp/terraform-plugin-sdk/issues/470) + # at_least_one_of: + # - status.0.resources + # - status.0.access_levels + # - status.0.restricted_services + is_set: true + item_type: + type: String + - name: 'vpcAccessibleServices' + type: NestedObject + description: | + Specifies how APIs are allowed to communicate within the Service + Perimeter. + properties: + - name: 'enableRestriction' + type: Boolean + description: | + Whether to restrict API calls within the Service Perimeter to the + list of APIs specified in 'allowedServices'. + - name: 'allowedServices' + type: Array + description: | + The list of APIs usable within the Service Perimeter. + Must be empty unless `enableRestriction` is True. + is_set: true + item_type: + type: String + - name: 'ingressPolicies' + type: Array + description: | + List of `IngressPolicies` to apply to the perimeter. A perimeter may + have multiple `IngressPolicies`, each of which is evaluated + separately. Access is granted if any `Ingress Policy` grants it. + Must be empty for a perimeter bridge. + is_set: true + item_type: + type: NestedObject + properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + is_set: true + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'egressPolicies' + type: Array + description: | + List of EgressPolicies to apply to the perimeter. A perimeter may + have multiple EgressPolicies, each of which is evaluated separately. + Access is granted if any EgressPolicy grants it. Must be empty for + a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + is_set: true + item_type: + type: String + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_UNSPECIFIED' + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + is_set: true + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'spec' + type: NestedObject + description: | + Proposed (or dry run) ServicePerimeter configuration. + This configuration allows to specify and test ServicePerimeter configuration + without enforcing actual access restrictions. Only allowed to be set when + the `useExplicitDryRunSpec` flag is set. + properties: + - name: 'resources' + type: Array + description: | + A list of GCP resources that are inside of the service perimeter. + Currently only projects are allowed. + Format: projects/{project_number} + # TODO: (mbang) won't work for arrays yet, uncomment here once they are supported. + # (github.com/hashicorp/terraform-plugin-sdk/issues/470) + # at_least_one_of: + # - spec.0.resources + # - spec.0.access_levels + # - spec.0.restricted_services + is_set: true + item_type: + type: String + - name: 'accessLevels' + type: Array + description: | + A list of AccessLevel resource names that allow resources within + the ServicePerimeter to be accessed from the internet. + AccessLevels listed must be in the same policy as this + ServicePerimeter. Referencing a nonexistent AccessLevel is a + syntax error. If no AccessLevel names are listed, resources within + the perimeter can only be accessed via GCP calls with request + origins within the perimeter. For Service Perimeter Bridge, must + be empty. + + Format: accessPolicies/{policy_id}/accessLevels/{access_level_name} + # TODO: (mbang) won't work for arrays yet, uncomment here once they are supported. + # (github.com/hashicorp/terraform-plugin-sdk/issues/470) + # at_least_one_of: + # - spec.0.resources + # - spec.0.access_levels + # - spec.0.restricted_services + is_set: true + item_type: + type: String + - name: 'restrictedServices' + type: Array + description: | + GCP services that are subject to the Service Perimeter + restrictions. Must contain a list of services. For example, if + `storage.googleapis.com` is specified, access to the storage + buckets inside the perimeter must meet the perimeter's access + restrictions. + # TODO: (mbang) won't work for arrays yet, uncomment here once they are supported. + # (github.com/hashicorp/terraform-plugin-sdk/issues/470) + # at_least_one_of: + # - spec.0.resources + # - spec.0.access_levels + # - spec.0.restricted_services + is_set: true + item_type: + type: String + - name: 'vpcAccessibleServices' + type: NestedObject + description: | + Specifies how APIs are allowed to communicate within the Service + Perimeter. + properties: + - name: 'enableRestriction' + type: Boolean + description: | + Whether to restrict API calls within the Service Perimeter to the + list of APIs specified in 'allowedServices'. + - name: 'allowedServices' + type: Array + description: | + The list of APIs usable within the Service Perimeter. + Must be empty unless `enableRestriction` is True. + is_set: true + item_type: + type: String + - name: 'ingressPolicies' + type: Array + description: | + List of `IngressPolicies` to apply to the perimeter. A perimeter may + have multiple `IngressPolicies`, each of which is evaluated + separately. Access is granted if any `Ingress Policy` grants it. + Must be empty for a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'ingressFrom' + type: NestedObject + description: | + Defines the conditions on the source of a request causing this `IngressPolicy` + to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access from outside the + perimeter. If left unspecified, then members of `identities` field will be + allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this ingress policy. + Should be in the format of email address. The email address should represent + individual user or service account only. + is_set: true + item_type: + type: String + - name: 'sources' + type: Array + description: | + Sources that this `IngressPolicy` authorizes access from. + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: | + An `AccessLevel` resource name that allow resources within the + `ServicePerimeters` to be accessed from the internet. `AccessLevels` listed + must be in the same policy as this `ServicePerimeter`. Referencing a nonexistent + `AccessLevel` will cause an error. If no `AccessLevel` names are listed, + resources within the perimeter can only be accessed via Google Cloud calls + with request origins within the perimeter. + Example `accessPolicies/MY_POLICY/accessLevels/MY_LEVEL.` + If * is specified, then all IngressSources will be allowed. + - name: 'resource' + type: String + description: | + A Google Cloud resource that is allowed to ingress the perimeter. + Requests from these resources will be allowed to access perimeter data. + Currently only projects are allowed. Format `projects/{project_number}` + The project may be in any Google Cloud organization, not just the + organization that the perimeter is defined in. `*` is not allowed, the case + of allowing all Google Cloud resources only is not supported. + - name: 'ingressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and request destination that cause + this `IngressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, protected by this `ServicePerimeter` + that are allowed to be accessed by sources defined in the + corresponding `IngressFrom`. A request matches if it contains + a resource in this list. If `*` is specified for resources, + then this `IngressTo` rule will authorize access to all + resources inside the perimeter, provided that the request + also matches the `operations` field. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` the sources specified in corresponding `IngressFrom` + are allowed to perform in this `ServicePerimeter`. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with `serviceName` + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong to + the service specified by serviceName field. A single `MethodSelector` entry + with `*` specified for the method field will allow all methods AND + permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for method should be a valid method name for the corresponding + serviceName in `ApiOperation`. If `*` used as value for `method`, then + ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'egressPolicies' + type: Array + description: | + List of EgressPolicies to apply to the perimeter. A perimeter may + have multiple EgressPolicies, each of which is evaluated separately. + Access is granted if any EgressPolicy grants it. Must be empty for + a perimeter bridge. + item_type: + type: NestedObject + properties: + - name: 'egressFrom' + type: NestedObject + description: | + Defines conditions on the source of a request causing this `EgressPolicy` to apply. + properties: + - name: 'identityType' + type: Enum + description: | + Specifies the type of identities that are allowed access to outside the + perimeter. If left unspecified, then members of `identities` field will + be allowed access. + enum_values: + - 'IDENTITY_TYPE_UNSPECIFIED' + - 'ANY_IDENTITY' + - 'ANY_USER_ACCOUNT' + - 'ANY_SERVICE_ACCOUNT' + - name: 'identities' + type: Array + description: | + A list of identities that are allowed access through this `EgressPolicy`. + Should be in the format of email address. The email address should + represent individual user or service account only. + is_set: true + item_type: + type: String + - name: 'sources' + type: Array + description: 'Sources that this EgressPolicy authorizes access from.' + item_type: + type: NestedObject + properties: + - name: 'accessLevel' + type: String + description: 'An AccessLevel resource name that allows resources outside the ServicePerimeter to be accessed from the inside.' + - name: 'sourceRestriction' + type: Enum + description: 'Whether to enforce traffic restrictions based on `sources` field. If the `sources` field is non-empty, then this field must be set to `SOURCE_RESTRICTION_ENABLED`.' + enum_values: + - 'SOURCE_RESTRICTION_UNSPECIFIED' + - 'SOURCE_RESTRICTION_ENABLED' + - 'SOURCE_RESTRICTION_DISABLED' + - name: 'egressTo' + type: NestedObject + description: | + Defines the conditions on the `ApiOperation` and destination resources that + cause this `EgressPolicy` to apply. + properties: + - name: 'resources' + type: Array + description: | + A list of resources, currently only projects in the form + `projects/`, that match this to stanza. A request matches + if it contains a resource in this list. If * is specified for resources, + then this `EgressTo` rule will authorize access to all resources outside + the perimeter. + is_set: true + item_type: + type: String + - name: 'externalResources' + type: Array + description: | + A list of external resources that are allowed to be accessed. A request + matches if it contains an external resource in this list (Example: + s3://bucket/path). Currently '*' is not allowed. + is_set: true + item_type: + type: String + - name: 'operations' + type: Array + description: | + A list of `ApiOperations` that this egress rule applies to. A request matches + if it contains an operation/service in this list. + item_type: + type: NestedObject + properties: + - name: 'serviceName' + type: String + description: | + The name of the API whose methods or permissions the `IngressPolicy` or + `EgressPolicy` want to allow. A single `ApiOperation` with serviceName + field set to `*` will allow all methods AND permissions for all services. + - name: 'methodSelectors' + type: Array + description: | + API methods or permissions to allow. Method or permission must belong + to the service specified by `serviceName` field. A single MethodSelector + entry with `*` specified for the `method` field will allow all methods + AND permissions for the service specified in `serviceName`. + item_type: + type: NestedObject + properties: + - name: 'method' + type: String + description: | + Value for `method` should be a valid method name for the corresponding + `serviceName` in `ApiOperation`. If `*` used as value for method, + then ALL methods and permissions are allowed. + - name: 'permission' + type: String + description: | + Value for permission should be a valid Cloud IAM permission for the + corresponding `serviceName` in `ApiOperation`. + - name: 'useExplicitDryRunSpec' + type: Boolean + description: | + Use explicit dry run spec flag. Ordinarily, a dry-run spec implicitly exists + for all Service Perimeters, and that spec is identical to the status for those + Service Perimeters. When this flag is set, it inhibits the generation of the + implicit spec, thereby allowing the user to explicitly provide a + configuration ("spec") to use in a dry-run version of the Service Perimeter. + This allows the user to test changes to the enforced config ("status") without + actually enforcing them. This testing is done through analyzing the differences + between currently enforced and suggested restrictions. useExplicitDryRunSpec must + bet set to True if any of the fields in the spec are set to non-default values. diff --git a/mmv1/products/accesscontextmanager/go_product.yaml b/mmv1/products/accesscontextmanager/go_product.yaml new file mode 100644 index 000000000000..a6d7fbade4e6 --- /dev/null +++ b/mmv1/products/accesscontextmanager/go_product.yaml @@ -0,0 +1,34 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AccessContextManager' +display_name: 'Access Context Manager (VPC Service Controls)' +versions: + - name: 'ga' + base_url: 'https://accesscontextmanager.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' +async: + type: "OpAsync" + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' diff --git a/mmv1/products/apigee/go_AddonsConfig.yaml b/mmv1/products/apigee/go_AddonsConfig.yaml new file mode 100644 index 000000000000..295fb7026857 --- /dev/null +++ b/mmv1/products/apigee/go_AddonsConfig.yaml @@ -0,0 +1,131 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AddonsConfig' +description: | + Configures the add-ons for the Apigee organization. The existing add-on configuration will be fully replaced. +references: + guides: + 'Creating an API organization': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-org' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations#setaddons' +docs: +base_url: 'organizations' +self_link: 'organizations/{{org}}' +create_url: 'organizations/{{org}}:setAddons' +update_url: 'organizations/{{org}}:setAddons' +update_verb: 'POST' +delete_url: 'organizations/{{org}}:setAddons' +delete_verb: 'POST' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'update', 'delete'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_addons.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/apigee_addons_override.go.tmpl' +examples: + - name: 'apigee_addons_basic' + skip_test: true + - name: 'apigee_addons_full' + skip_test: true + - name: 'apigee_addons_test' + primary_resource_id: 'apigee_org_addons' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true +parameters: + - name: 'org' + type: String + description: | + Name of the Apigee organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'addonsConfig' + type: NestedObject + description: Addon configurations of the Apigee organization. + properties: + - name: 'advancedApiOpsConfig' + type: NestedObject + description: Configuration for the Monetization add-on. + properties: + - name: 'enabled' + type: Boolean + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + - name: 'integrationConfig' + type: NestedObject + description: Configuration for the Monetization add-on. + properties: + - name: 'enabled' + type: Boolean + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + - name: 'monetizationConfig' + type: NestedObject + description: Configuration for the Monetization add-on. + properties: + - name: 'enabled' + type: Boolean + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + - name: 'apiSecurityConfig' + type: NestedObject + description: Configuration for the Monetization add-on. + properties: + - name: 'enabled' + type: Boolean + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + - name: 'expiresAt' + type: String + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + output: true + - name: 'connectorsPlatformConfig' + type: NestedObject + description: Configuration for the Monetization add-on. + properties: + - name: 'enabled' + type: Boolean + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + - name: 'expiresAt' + type: String + description: + Flag that specifies whether the Advanced API Ops add-on is + enabled. + output: true diff --git a/mmv1/products/apigee/go_EndpointAttachment.yaml b/mmv1/products/apigee/go_EndpointAttachment.yaml new file mode 100644 index 000000000000..f7cd692f1d9a --- /dev/null +++ b/mmv1/products/apigee/go_EndpointAttachment.yaml @@ -0,0 +1,105 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'EndpointAttachment' +description: | + Apigee Endpoint Attachment. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.endpointAttachments/create' +docs: +base_url: 'endpointAttachments' +self_link: '{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}' +create_url: '{{org_id}}/endpointAttachments?endpointAttachmentId={{endpoint_attachment_id}}' +delete_url: '{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}' +immutable: true +import_format: + - '{{org_id}}/endpointAttachments/{{endpoint_attachment_id}}' + - '{{org_id}}/{{endpoint_attachment_id}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_endpoint_attachment.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_endpoint_attachment_basic' + skip_test: true + - name: 'apigee_endpoint_attachment_basic_test' + primary_resource_id: 'apigee_endpoint_attachment' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'orgId' + type: String + description: | + The Apigee Organization associated with the Apigee instance, + in the format `organizations/{{org_name}}`. + url_param_only: true + required: true + immutable: true + - name: 'endpointAttachmentId' + type: String + description: | + ID of the endpoint attachment. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the Endpoint Attachment in the following format: + organizations/{organization}/endpointAttachments/{endpointAttachment}. + output: true + - name: 'location' + type: String + description: | + Location of the endpoint attachment. + required: true + - name: 'host' + type: String + description: | + Host that can be used in either HTTP Target Endpoint directly, or as the host in Target Server. + output: true + - name: 'serviceAttachment' + type: String + description: | + Format: projects/*/regions/*/serviceAttachments/* + required: true + - name: 'connectionState' + type: String + description: | + State of the endpoint attachment connection to the service attachment. + output: true diff --git a/mmv1/products/apigee/go_EnvKeystore.yaml b/mmv1/products/apigee/go_EnvKeystore.yaml new file mode 100644 index 000000000000..d0d9405d4385 --- /dev/null +++ b/mmv1/products/apigee/go_EnvKeystore.yaml @@ -0,0 +1,68 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'EnvKeystore' +description: | + An `Environment KeyStore` in Apigee. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.environments.keystores/create' +docs: +base_url: '{{env_id}}/keystores' +self_link: '{{env_id}}/keystores/{{name}}' +create_url: '{{env_id}}/keystores' +delete_url: '{{env_id}}/keystores/{{name}}' +immutable: true +import_format: + - '{{env_id}}/keystores/{{name}}' + - '{{env_id}}/{{name}}' +timeouts: + insert_minutes: 1 + update_minutes: 20 + delete_minutes: 1 +autogen_async: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_environment_keystore.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_environment_keystore_test' + primary_resource_id: 'apigee_environment_keystore' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true +parameters: + - name: 'envId' + type: String + description: | + The Apigee environment group associated with the Apigee environment, + in the format `organizations/{{org_name}}/environments/{{env_name}}`. + url_param_only: true + required: true + immutable: true + - name: 'name' + type: String + description: | + The name of the newly created keystore. + immutable: true +properties: + - name: 'aliases' + type: Array + description: | + Aliases in this keystore. + output: true + item_type: + type: String diff --git a/mmv1/products/apigee/go_EnvReferences.yaml b/mmv1/products/apigee/go_EnvReferences.yaml new file mode 100644 index 000000000000..57bef5c4ef68 --- /dev/null +++ b/mmv1/products/apigee/go_EnvReferences.yaml @@ -0,0 +1,79 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'EnvReferences' +description: | + An `Environment Reference` in Apigee. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.environments.references/create' +docs: +base_url: '{{env_id}}/references' +self_link: '{{env_id}}/references/{{name}}' +create_url: '{{env_id}}/references/' +delete_url: '{{env_id}}/references/{{name}}' +immutable: true +import_format: + - '{{env_id}}/references/{{name}}' + - '{{env_id}}/{{name}}' +timeouts: + insert_minutes: 1 + update_minutes: 20 + delete_minutes: 1 +autogen_async: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_environment_reference.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_environment_reference_test' + primary_resource_id: 'apigee_environment_reference' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true +parameters: + - name: 'envId' + type: String + description: | + The Apigee environment group associated with the Apigee environment, + in the format `organizations/{{org_name}}/environments/{{env_name}}`. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Required. The resource id of this reference. Values must match the regular expression [\w\s-.]+. + required: true + immutable: true + - name: 'description' + type: String + description: | + Optional. A human-readable description of this reference. + immutable: true + - name: 'resourceType' + type: String + description: | + The type of resource referred to by this reference. Valid values are 'KeyStore' or 'TrustStore'. + required: true + immutable: true + - name: 'refers' + type: String + description: | + Required. The id of the resource to which this reference refers. Must be the id of a resource that exists in the parent environment and is of the given resourceType. + required: true + immutable: true diff --git a/mmv1/products/apigee/go_Envgroup.yaml b/mmv1/products/apigee/go_Envgroup.yaml new file mode 100644 index 000000000000..fc52152fe58f --- /dev/null +++ b/mmv1/products/apigee/go_Envgroup.yaml @@ -0,0 +1,86 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Envgroup' +description: | + An `Environment group` in Apigee. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.envgroups/create' +docs: +base_url: 'envgroups' +self_link: '{{org_id}}/envgroups/{{name}}' +create_url: '{{org_id}}/envgroups' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{org_id}}/envgroups/{{name}}' + - '{{org_id}}/{{name}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_environment_group.go.tmpl' +examples: + - name: 'apigee_environment_group_basic' + vars: + envgroup_name: 'my-envgroup' + skip_test: true + - name: 'apigee_environment_group_basic_test' + primary_resource_id: 'apigee_environment_group' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'orgId' + type: String + description: | + The Apigee Organization associated with the Apigee environment group, + in the format `organizations/{{org_name}}`. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource ID of the environment group. + required: true + immutable: true + - name: 'hostnames' + type: Array + description: | + Hostnames of the environment group. + required: false + item_type: + type: String diff --git a/mmv1/products/apigee/go_EnvgroupAttachment.yaml b/mmv1/products/apigee/go_EnvgroupAttachment.yaml new file mode 100644 index 000000000000..9f59f1128e28 --- /dev/null +++ b/mmv1/products/apigee/go_EnvgroupAttachment.yaml @@ -0,0 +1,85 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'EnvgroupAttachment' +description: | + An `Environment Group attachment` in Apigee. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.envgroups.attachments/create' +docs: +base_url: '{{envgroup_id}}/attachments' +self_link: '{{envgroup_id}}/attachments/{{name}}' +create_url: '{{envgroup_id}}/attachments' +delete_url: '{{envgroup_id}}/attachments/{{name}}' +immutable: true +import_format: + - '{{envgroup_id}}/attachments/{{name}}' + - '{{envgroup_id}}/{{name}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_environment_group_attachment.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_environment_group_attachment_basic' + vars: + project_id: 'my-project' + envgroup_name: 'my-envgroup' + environment_name: 'my-environment' + skip_test: true + - name: 'apigee_environment_group_attachment_basic_test' + primary_resource_id: 'apigee_environment_group_attachment' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'envgroupId' + type: String + description: | + The Apigee environment group associated with the Apigee environment, + in the format `organizations/{{org_name}}/envgroups/{{envgroup_name}}`. + url_param_only: true + required: true +properties: + - name: 'environment' + type: String + description: | + The resource ID of the environment. + required: true + - name: 'name' + type: String + description: | + The name of the newly created attachment (output parameter). + output: true diff --git a/mmv1/products/apigee/go_Environment.yaml b/mmv1/products/apigee/go_Environment.yaml new file mode 100644 index 000000000000..50a168e36fb0 --- /dev/null +++ b/mmv1/products/apigee/go_Environment.yaml @@ -0,0 +1,187 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Environment' +description: | + An `Environment` in Apigee. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.environments/create' +docs: +base_url: 'environments' +self_link: '{{org_id}}/environments/{{name}}' +create_url: '{{org_id}}/environments' +update_url: '{{org_id}}/environments/{{name}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{org_id}}/environments/{{name}}' + - '{{org_id}}/{{name}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'env_id' + base_url: '{{org_id}}/environments/{{name}}' + self_link: '{{org_id}}/environments/{{name}}' + import_format: + - '{{%org_id}}/environments/{{name}}' + - '{{name}}' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_environment.go.tmpl' +examples: + - name: 'apigee_environment_basic' + vars: + environment_name: 'my-environment' + skip_test: true + - name: 'apigee_environment_basic_test' + primary_resource_id: 'apigee_environment' + primary_resource_name: 'fmt.Sprintf("organizations/tf-test%s", context["random_suffix"]), fmt.Sprintf("tf-test%s", context["random_suffix"])' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_environment_basic_deployment_apiproxy_type_test' + primary_resource_id: 'apigee_environment' + primary_resource_name: 'fmt.Sprintf("organizations/tf-test%s", context["random_suffix"]), fmt.Sprintf("tf-test%s", context["random_suffix"])' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_environment_patch_update_test' + primary_resource_id: 'apigee_environment' + primary_resource_name: 'fmt.Sprintf("organizations/tf-test%s", context["random_suffix"]), fmt.Sprintf("tf-test%s", context["random_suffix"])' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'orgId' + type: String + description: | + The Apigee Organization associated with the Apigee environment, + in the format `organizations/{{org_name}}`. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource ID of the environment. + required: true + immutable: true + - name: 'displayName' + type: String + description: | + Display name of the environment. + required: false + immutable: true + - name: 'description' + type: String + description: | + Description of the environment. + required: false + immutable: true + - name: 'deploymentType' + type: Enum + description: | + Optional. Deployment type supported by the environment. The deployment type can be + set when creating the environment and cannot be changed. When you enable archive + deployment, you will be prevented from performing a subset of actions within the + environment, including: + Managing the deployment of API proxy or shared flow revisions; + Creating, updating, or deleting resource files; + Creating, updating, or deleting target servers. + immutable: true + default_from_api: true + enum_values: + - 'DEPLOYMENT_TYPE_UNSPECIFIED' + - 'PROXY' + - 'ARCHIVE' + - name: 'apiProxyType' + type: Enum + description: | + Optional. API Proxy type supported by the environment. The type can be set when creating + the Environment and cannot be changed. + immutable: true + default_from_api: true + enum_values: + - 'API_PROXY_TYPE_UNSPECIFIED' + - 'PROGRAMMABLE' + - 'CONFIGURABLE' + - name: 'nodeConfig' + type: NestedObject + description: | + NodeConfig for setting the min/max number of nodes associated with the environment. + default_from_api: true + properties: + - name: 'minNodeCount' + type: String + description: | + The minimum total number of gateway nodes that the is reserved for all instances that + has the specified environment. If not specified, the default is determined by the + recommended minimum number of nodes for that gateway. + - name: 'maxNodeCount' + type: String + description: | + The maximum total number of gateway nodes that the is reserved for all instances that + has the specified environment. If not specified, the default is determined by the + recommended maximum number of nodes for that gateway. + - name: 'currentAggregateNodeCount' + type: String + description: | + The current total number of gateway nodes that each environment currently has across + all instances. + output: true + - name: 'type' + type: Enum + description: | + Types that can be selected for an Environment. Each of the types are + limited by capability and capacity. Refer to Apigee's public documentation + to understand about each of these types in details. + An Apigee org can support heterogeneous Environments. + default_from_api: true + enum_values: + - 'ENVIRONMENT_TYPE_UNSPECIFIED' + - 'BASE' + - 'INTERMEDIATE' + - 'COMPREHENSIVE' + - name: 'forwardProxyUri' + type: String + description: | + Optional. URI of the forward proxy to be applied to the runtime instances in this environment. Must be in the format of {scheme}://{hostname}:{port}. Note that the scheme must be one of "http" or "https", and the port must be supplied. + required: false diff --git a/mmv1/products/apigee/go_Instance.yaml b/mmv1/products/apigee/go_Instance.yaml new file mode 100644 index 000000000000..9f8927bebc19 --- /dev/null +++ b/mmv1/products/apigee/go_Instance.yaml @@ -0,0 +1,188 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Instance' +description: | + An `Instance` is the runtime dataplane in Apigee. +references: + guides: + 'Creating a runtime instance': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-instance' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances/create' +docs: +base_url: 'instances' +self_link: '{{org_id}}/instances/{{name}}' +create_url: '{{org_id}}/instances' +immutable: true +mutex: '{{org_id}}/apigeeInstances' +import_format: + - '{{org_id}}/instances/{{name}}' + - '{{org_id}}/{{name}}' +timeouts: + insert_minutes: 60 + update_minutes: 20 + delete_minutes: 60 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + constants: 'templates/terraform/constants/go/apigee_instance.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/apigee_instance.go.tmpl' +error_retry_predicates: + + - 'transport_tpg.IsApigeeRetryableError' +examples: + - name: 'apigee_instance_basic' + vars: + instance_name: 'my-instance-name' + skip_test: true + - name: 'apigee_instance_basic_test' + primary_resource_id: 'apigee_instance' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_instance_cidr_range' + vars: + instance_name: 'my-instance-name' + skip_test: true + - name: 'apigee_instance_cidr_range_test' + primary_resource_id: 'apigee_instance' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_instance_ip_range' + vars: + instance_name: 'my-instance-name' + skip_test: true + - name: 'apigee_instance_ip_range_test' + primary_resource_id: 'apigee_instance' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_instance_full' + vars: + instance_name: 'my-instance-name' + skip_test: true + - name: 'apigee_instance_full_test' + primary_resource_id: 'apigee_instance' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_instance_service_attachment_basic_test' + primary_resource_id: 'apigee_instance' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'orgId' + type: String + description: | + The Apigee Organization associated with the Apigee instance, + in the format `organizations/{{org_name}}`. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Resource ID of the instance. + required: true + - name: 'location' + type: String + description: | + Required. Compute Engine location where the instance resides. + required: true + - name: 'peeringCidrRange' + type: String + description: | + The size of the CIDR block range that will be reserved by the instance. For valid values, + see [CidrRange](https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances#CidrRange) on the documentation. + default_from_api: true + - name: 'ipRange' + type: String + description: | + IP range represents the customer-provided CIDR block of length 22 that will be used for + the Apigee instance creation. This optional range, if provided, should be freely + available as part of larger named range the customer has allocated to the Service + Networking peering. If this is not provided, Apigee will automatically request for any + available /22 CIDR block from Service Networking. The customer should use this CIDR block + for configuring their firewall needs to allow traffic from Apigee. + Input format: "a.b.c.d/22" + ignore_read: true + - name: 'description' + type: String + description: | + Description of the instance. + - name: 'displayName' + type: String + description: | + Display name of the instance. + - name: 'diskEncryptionKeyName' + type: String + description: | + Customer Managed Encryption Key (CMEK) used for disk and volume encryption. Required for Apigee paid subscriptions only. + Use the following format: `projects/([^/]+)/locations/([^/]+)/keyRings/([^/]+)/cryptoKeys/([^/]+)` + immutable: true + - name: 'host' + type: String + description: | + Output only. Hostname or IP address of the exposed Apigee endpoint used by clients to connect to the service. + output: true + - name: 'port' + type: String + description: | + Output only. Port number of the exposed Apigee endpoint. + output: true + - name: 'consumerAcceptList' + type: Array + description: | + Optional. Customer accept list represents the list of projects (id/number) on customer + side that can privately connect to the service attachment. It is an optional field + which the customers can provide during the instance creation. By default, the customer + project associated with the Apigee organization will be included to the list. + required: false + default_from_api: true + diff_suppress_func: 'projectListDiffSuppress' + item_type: + type: String + - name: 'serviceAttachment' + type: String + description: | + Output only. Resource name of the service attachment created for the instance in + the format: projects/*/regions/*/serviceAttachments/* Apigee customers can privately + forward traffic to this service attachment using the PSC endpoints. + output: true diff --git a/mmv1/products/apigee/go_InstanceAttachment.yaml b/mmv1/products/apigee/go_InstanceAttachment.yaml new file mode 100644 index 000000000000..bf5450c38978 --- /dev/null +++ b/mmv1/products/apigee/go_InstanceAttachment.yaml @@ -0,0 +1,86 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'InstanceAttachment' +description: | + An `Instance attachment` in Apigee. +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances.attachments/create' +docs: +base_url: '{{instance_id}}/attachments' +self_link: '{{instance_id}}/attachments/{{name}}' +create_url: '{{instance_id}}/attachments' +delete_url: '{{instance_id}}/attachments/{{name}}' +immutable: true +mutex: 'apigeeInstanceAttachments' +import_format: + - '{{instance_id}}/attachments/{{name}}' + - '{{instance_id}}/{{name}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_instance_attachment.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_instance_attachment_basic' + vars: + project_id: 'my-project' + instance_name: 'my-instance-name' + environment_name: 'my-environment-name' + skip_test: true + - name: 'apigee_instance_attachment_basic_test' + primary_resource_id: 'apigee_instance_attachment' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'instanceId' + type: String + description: | + The Apigee instance associated with the Apigee environment, + in the format `organizations/{{org_name}}/instances/{{instance_name}}`. + url_param_only: true + required: true +properties: + - name: 'environment' + type: String + description: | + The resource ID of the environment. + required: true + - name: 'name' + type: String + description: | + The name of the newly created attachment (output parameter). + output: true diff --git a/mmv1/products/apigee/go_KeystoresAliasesSelfSignedCert.yaml b/mmv1/products/apigee/go_KeystoresAliasesSelfSignedCert.yaml new file mode 100644 index 000000000000..442341fb4a16 --- /dev/null +++ b/mmv1/products/apigee/go_KeystoresAliasesSelfSignedCert.yaml @@ -0,0 +1,218 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'KeystoresAliasesSelfSignedCert' +description: | + An Environment Keystore Alias for Self Signed Certificate Format in Apigee +references: + guides: + 'Creating an environment': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-environment' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.environments.keystores.aliases/create' +docs: +base_url: 'organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}' +self_link: 'organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}' +create_url: 'organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases?alias={{alias}}&format=selfsignedcert' +delete_url: 'organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}' +immutable: true +import_format: + - 'organizations/{{org_id}}/environments/{{environment}}/keystores/{{keystore}}/aliases/{{alias}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_env_keystore_alias_self_signed_cert.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_env_keystore_alias_self_signed_cert' + primary_resource_id: 'apigee_environment_keystore_ss_alias' + vars: + project_id: 'my-project' + environment_name: 'env-name' + keystore_name: 'env-keystore' + keystores_alias: 'alias' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_vcr: true +parameters: + - name: 'orgId' + type: String + description: | + The Apigee Organization name associated with the Apigee environment + url_param_only: true + required: true + immutable: true + - name: 'environment' + type: String + description: | + The Apigee environment name + url_param_only: true + required: true + immutable: true + - name: 'keystore' + type: String + description: | + The Apigee keystore name associated in an Apigee environment + url_param_only: true + required: true + immutable: true + - name: 'alias' + type: String + description: | + Alias for the key/certificate pair. Values must match the regular expression [\w\s-.]{1,255}. + This must be provided for all formats except selfsignedcert; self-signed certs may specify the alias in either + this parameter or the JSON body. + required: true + immutable: true + - name: 'subjectAlternativeDnsNames' + type: NestedObject + description: | + List of alternative host names. Maximum length is 255 characters for each value. + immutable: true + properties: + - name: 'subjectAlternativeName' + type: String + description: Subject Alternative Name + - name: 'keySize' + type: String + description: | + Key size. Default and maximum value is 2048 bits. + ignore_read: true + - name: 'sigAlg' + type: String + description: | + Signature algorithm to generate private key. Valid values are SHA512withRSA, SHA384withRSA, and SHA256withRSA + required: true + immutable: true + ignore_read: true + - name: 'subject' + type: NestedObject + description: Subject details. + required: true + immutable: true + ignore_read: true + properties: + - name: 'countryCode' + type: String + description: + Two-letter country code. Example, IN for India, US for United States + of America. + ignore_read: true + - name: 'state' + type: String + description: State or district name. Maximum length is 128 characters. + ignore_read: true + - name: 'locality' + type: String + description: City or town name. Maximum length is 128 characters. + ignore_read: true + - name: 'org' + type: String + description: Organization name. Maximum length is 64 characters. + ignore_read: true + - name: 'orgUnit' + type: String + description: Organization team name. Maximum length is 64 characters. + ignore_read: true + - name: 'commonName' + type: String + description: | + Common name of the organization. Maximum length is 64 characters. + ignore_read: true + - name: 'email' + type: String + description: Email address. Max 255 characters. + ignore_read: true + - name: 'certValidityInDays' + type: Integer + description: | + Validity duration of certificate, in days. Accepts positive non-zero value. Defaults to 365. + immutable: true + ignore_read: true +properties: + - name: 'certsInfo' + type: NestedObject + description: Chain of certificates under this alias. + output: true + properties: + - name: 'certInfo' + type: Array + description: List of all properties in the object. + output: true + item_type: + type: NestedObject + properties: + - name: 'version' + type: Integer + description: X.509 version. + output: true + - name: 'subject' + type: String + description: X.509 subject. + output: true + - name: 'issuer' + type: String + description: X.509 issuer. + output: true + - name: 'expiryDate' + type: String + description: + X.509 notAfter validity period in milliseconds since epoch. + output: true + - name: 'validFrom' + type: String + description: + X.509 notBefore validity period in milliseconds since epoch. + output: true + - name: 'isValid' + type: String + description: | + Flag that specifies whether the certificate is valid. + Flag is set to Yes if the certificate is valid, No if expired, or Not yet if not yet valid. + output: true + - name: 'subjectAlternativeNames' + type: Array + description: X.509 subject alternative names (SANs) extension. + output: true + item_type: + type: String + - name: 'sigAlgName' + type: String + description: X.509 signatureAlgorithm. + output: true + - name: 'publicKey' + type: String + description: + Public key component of the X.509 subject public key info. + output: true + - name: 'basicConstraints' + type: String + description: X.509 basic constraints extension. + output: true + - name: 'serialNumber' + type: String + description: X.509 serial number. + output: true + - name: 'type' + type: Enum + description: | + Optional.Type of Alias + output: true + enum_values: + - 'ALIAS_TYPE_UNSPECIFIED' + - 'CERT' + - 'KEY_CERT' diff --git a/mmv1/products/apigee/go_NatAddress.yaml b/mmv1/products/apigee/go_NatAddress.yaml new file mode 100644 index 000000000000..e306ed2f6e5e --- /dev/null +++ b/mmv1/products/apigee/go_NatAddress.yaml @@ -0,0 +1,89 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'NatAddress' +description: | + Apigee NAT (network address translation) address. A NAT address is a static external IP address used for Internet egress traffic. This is not avaible for Apigee hybrid. + Apigee NAT addresses are not automatically activated because they might require explicit allow entries on the target systems first. See https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances.natAddresses/activate +references: + guides: + 'Provisioning NAT IPs': 'https://cloud.google.com/apigee/docs/api-platform/security/nat-provisioning' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.instances.natAddresses' +docs: +base_url: '{{instance_id}}/natAddresses' +self_link: '{{instance_id}}/natAddresses/{{name}}' +create_url: '{{instance_id}}/natAddresses' +delete_url: '{{instance_id}}/natAddresses/{{name}}' +immutable: true +import_format: + - '{{instance_id}}/natAddresses/{{name}}' + - '{{instance_id}}/{{name}}' +timeouts: + insert_minutes: 30 + update_minutes: 20 + delete_minutes: 30 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_nat_address.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_nat_address_basic' + vars: + nat_address_name: 'my-nat-address' + skip_test: true + - name: 'apigee_nat_address_basic_test' + primary_resource_id: 'apigee_nat_address' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'instanceId' + type: String + description: | + The Apigee instance associated with the Apigee environment, + in the format `organizations/{{org_name}}/instances/{{instance_name}}`. + url_param_only: true + required: true +properties: + - name: 'name' + type: String + description: | + Resource ID of the NAT address. + required: true + - name: 'ipAddress' + type: String + description: | + The allocated NAT IP address. + output: true + - name: 'state' + type: String + description: | + State of the NAT IP address. + output: true diff --git a/mmv1/products/apigee/go_Organization.yaml b/mmv1/products/apigee/go_Organization.yaml new file mode 100644 index 000000000000..69042edd4c8f --- /dev/null +++ b/mmv1/products/apigee/go_Organization.yaml @@ -0,0 +1,245 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Organization' +description: | + An `Organization` is the top-level container in Apigee. +references: + guides: + 'Creating an API organization': 'https://cloud.google.com/apigee/docs/api-platform/get-started/create-org' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations' +docs: +base_url: 'organizations' +self_link: 'organizations/{{name}}' +create_url: 'organizations?parent=projects/{{project_id}}' +delete_url: 'organizations/{{name}}?retention={{retention}}' +timeouts: + insert_minutes: 45 + update_minutes: 45 + delete_minutes: 45 +autogen_async: true +async: + actions: ['create', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 45 + update_minutes: 45 + delete_minutes: 45 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + encoder: 'templates/terraform/encoders/go/apigee_organization.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/apigee_organization.go.tmpl' +examples: + - name: 'apigee_organization_cloud_basic' + skip_test: true + - name: 'apigee_organization_cloud_basic_test' + primary_resource_id: 'org' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + ignore_read_extra: + - 'properties' + skip_docs: true + skip_vcr: true + - name: 'apigee_organization_cloud_basic_disable_vpc_peering' + skip_test: true + - name: 'apigee_organization_cloud_basic_disable_vpc_peering_test' + primary_resource_id: 'org' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + ignore_read_extra: + - 'properties' + skip_docs: true + skip_vcr: true + - name: 'apigee_organization_cloud_full' + skip_test: true + - name: 'apigee_organization_cloud_full_test' + primary_resource_id: 'org' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + ignore_read_extra: + - 'properties' + skip_docs: true + skip_vcr: true + - name: 'apigee_organization_cloud_full_disable_vpc_peering' + skip_test: true + - name: 'apigee_organization_cloud_full_disable_vpc_peering_test' + primary_resource_id: 'org' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + ignore_read_extra: + - 'properties' + skip_docs: true + skip_vcr: true + - name: 'apigee_organization_retention_test' + primary_resource_id: 'org' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true + - name: 'apigee_organization_drz_test' + primary_resource_id: 'org' + min_version: 'beta' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true + skip_vcr: true +parameters: + - name: 'projectId' + type: String + description: | + The project ID associated with the Apigee organization. + url_param_only: true + required: true + immutable: true + - name: 'retention' + type: Enum + description: | + Optional. This setting is applicable only for organizations that are soft-deleted (i.e., BillingType + is not EVALUATION). It controls how long Organization data will be retained after the initial delete + operation completes. During this period, the Organization may be restored to its last known state. + After this period, the Organization will no longer be able to be restored. + url_param_only: true + required: false + default_value: "DELETION_RETENTION_UNSPECIFIED" + enum_values: + - 'DELETION_RETENTION_UNSPECIFIED' + - 'MINIMUM' +properties: + - name: 'name' + type: String + description: | + Output only. Name of the Apigee organization. + output: true + - name: 'displayName' + type: String + description: | + The display name of the Apigee organization. + - name: 'description' + type: String + description: | + Description of the Apigee organization. + - name: 'analyticsRegion' + type: String + description: | + Primary GCP region for analytics data storage. For valid values, see [Create an Apigee organization](https://cloud.google.com/apigee/docs/api-platform/get-started/create-org). + immutable: true + - name: 'apiConsumerDataLocation' + type: String + description: | + This field is needed only for customers using non-default data residency regions. + Apigee stores some control plane data only in single region. + This field determines which single region Apigee should use. + immutable: true + - name: 'apiConsumerDataEncryptionKeyName' + type: String + description: | + Cloud KMS key name used for encrypting API consumer data. + immutable: true + - name: 'controlPlaneEncryptionKeyName' + type: String + description: | + Cloud KMS key name used for encrypting control plane data that is stored in a multi region. + Only used for the data residency region "US" or "EU". + immutable: true + - name: 'authorizedNetwork' + type: String + description: | + Compute Engine network used for Service Networking to be peered with Apigee runtime instances. + See [Getting started with the Service Networking API](https://cloud.google.com/service-infrastructure/docs/service-networking/getting-started). + Valid only when `RuntimeType` is set to CLOUD. The value can be updated only when there are no runtime instances. For example: "default". + - name: 'disableVpcPeering' + type: Boolean + description: | + Flag that specifies whether the VPC Peering through Private Google Access should be + disabled between the consumer network and Apigee. Required if an `authorizedNetwork` + on the consumer project is not provided, in which case the flag should be set to `true`. + Valid only when `RuntimeType` is set to CLOUD. The value must be set before the creation + of any Apigee runtime instance and can be updated only when there are no runtime instances. + - name: 'runtimeType' + type: Enum + description: | + Runtime type of the Apigee organization based on the Apigee subscription purchased. + immutable: true + default_value: "CLOUD" + enum_values: + - 'CLOUD' + - 'HYBRID' + - name: 'subscriptionType' + type: String + description: | + Output only. Subscription type of the Apigee organization. + Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased). + output: true + - name: 'billingType' + type: String + description: | + Billing type of the Apigee organization. See [Apigee pricing](https://cloud.google.com/apigee/pricing). + immutable: true + default_from_api: true + - name: 'caCertificate' + type: String + description: | + Output only. Base64-encoded public certificate for the root CA of the Apigee organization. + Valid only when `RuntimeType` is CLOUD. A base64-encoded string. + output: true + - name: 'runtimeDatabaseEncryptionKeyName' + type: String + description: | + Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. + Update is not allowed after the organization is created. + If not specified, a Google-Managed encryption key will be used. + Valid only when `RuntimeType` is CLOUD. For example: `projects/foo/locations/us/keyRings/bar/cryptoKeys/baz`. + immutable: true + - name: 'properties' + type: NestedObject + description: Properties defined in the Apigee organization profile. + default_from_api: true + properties: + - name: 'property' + type: Array + description: List of all properties in the object. + custom_flatten: 'templates/terraform/custom_flatten/go/apigee_organization_property.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: Name of the property. + - name: 'value' + type: String + description: Value of the property. + - name: 'apigeeProjectId' + type: String + description: | + Output only. Project ID of the Apigee Tenant Project. + output: true diff --git a/mmv1/products/apigee/go_SyncAuthorization.yaml b/mmv1/products/apigee/go_SyncAuthorization.yaml new file mode 100644 index 000000000000..ed84fd5269aa --- /dev/null +++ b/mmv1/products/apigee/go_SyncAuthorization.yaml @@ -0,0 +1,76 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SyncAuthorization' +description: | + Authorize the Synchronizer to download environment data from the control plane. +references: + guides: + 'Enable Synchronizer access': 'https://cloud.google.com/apigee/docs/hybrid/v1.8/synchronizer-access#enable-synchronizer-access' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations#getsyncauthorization' +docs: +id_format: 'organizations/{{name}}/syncAuthorization' +base_url: '' +self_link: 'organizations/{{name}}:getSyncAuthorization' +create_url: 'organizations/{{name}}:setSyncAuthorization' +update_url: 'organizations/{{name}}:setSyncAuthorization' +update_verb: 'POST' +read_verb: 'POST' +skip_delete: true +import_format: + - 'organizations/{{name}}/syncAuthorization' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'apigee_sync_authorization_basic_test' + primary_resource_id: 'apigee_sync_authorization' + vars: + account_id: 'my-account' + project_id: 'my-project' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' +parameters: + - name: 'name' + type: String + description: | + Name of the Apigee organization. + url_param_only: true + required: true + immutable: true +properties: + - name: 'identities' + type: Array + description: | + Array of service accounts to grant access to control plane resources, each specified using the following format: `serviceAccount:service-account-name`. + + The `service-account-name` is formatted like an email address. For example: my-synchronizer-manager-serviceAccount@my_project_id.iam.gserviceaccount.com + + You might specify multiple service accounts, for example, if you have multiple environments and wish to assign a unique service account to each one. + + The service accounts must have **Apigee Synchronizer Manager** role. See also [Create service accounts](https://cloud.google.com/apigee/docs/hybrid/v1.8/sa-about#create-the-service-accounts). + required: true + send_empty_value: true + item_type: + type: String + - name: 'etag' + type: Fingerprint + description: | + Entity tag (ETag) used for optimistic concurrency control as a way to help prevent simultaneous updates from overwriting each other. + Used internally during updates. + output: true diff --git a/mmv1/products/apigee/go_TargetServer.yaml b/mmv1/products/apigee/go_TargetServer.yaml new file mode 100644 index 000000000000..1eba3ab6d617 --- /dev/null +++ b/mmv1/products/apigee/go_TargetServer.yaml @@ -0,0 +1,157 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'TargetServer' +description: | + TargetServer configuration. TargetServers are used to decouple a proxy TargetEndpoint HTTPTargetConnections from concrete URLs for backend services. +references: + guides: + 'Load balancing across backend servers': 'https://cloud.google.com/apigee/docs/api-platform/deploy/load-balancing-across-backend-servers' + api: 'https://cloud.google.com/apigee/docs/reference/apis/apigee/rest/v1/organizations.environments.targetservers/create' +docs: +base_url: '{{env_id}}/targetservers' +self_link: '{{env_id}}/targetservers/{{name}}' +create_url: '{{env_id}}/targetservers' +update_url: '{{env_id}}/targetservers/{{name}}' +delete_url: '{{env_id}}/targetservers/{{name}}' +import_format: + - '{{env_id}}/targetservers/{{name}}' + - '{{env_id}}/{{name}}' +timeouts: + insert_minutes: 1 + update_minutes: 1 + delete_minutes: 1 +autogen_async: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/apigee_target_server.go.tmpl' +skip_sweeper: true +examples: + - name: 'apigee_target_server_test_basic' + primary_resource_id: 'apigee_target_server' + vars: + project_id: 'my-project' + environment_name: 'my-environment-name' + target_server: 'my-target-server' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_test: true + - name: 'apigee_target_server_test' + primary_resource_id: 'apigee_target_server' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + skip_docs: true +parameters: + - name: 'envId' + type: String + description: | + The Apigee environment group associated with the Apigee environment, + in the format `organizations/{{org_name}}/environments/{{env_name}}`. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource id of this reference. Values must match the regular expression [\w\s-.]+. + required: true + immutable: true + - name: 'description' + type: String + description: | + A human-readable description of this TargetServer. + - name: 'host' + type: String + description: | + The host name this target connects to. Value must be a valid hostname as described by RFC-1123. + required: true + - name: 'port' + type: Integer + description: | + The port number this target connects to on the given host. Value must be between 1 and 65535, inclusive. + required: true + - name: 'isEnabled' + type: Boolean + description: | + Enabling/disabling a TargetServer is useful when TargetServers are used in load balancing configurations, and one or more TargetServers need to taken out of rotation periodically. Defaults to true. + default_value: true + - name: 'sSLInfo' + type: NestedObject + description: Specifies TLS configuration info for this TargetServer. The JSON name is sSLInfo for legacy/backwards compatibility reasons -- Edge originally supported SSL, and the name is still used for TLS configuration. + properties: + - name: 'enabled' + type: Boolean + description: | + Enables TLS. If false, neither one-way nor two-way TLS will be enabled. + required: true + - name: 'clientAuthEnabled' + type: Boolean + description: | + Enables two-way TLS. + - name: 'keyStore' + type: String + description: | + Required if clientAuthEnabled is true. The resource ID of the keystore. + - name: 'keyAlias' + type: String + description: | + Required if clientAuthEnabled is true. The resource ID for the alias containing the private key and cert. + - name: 'trustStore' + type: String + description: | + The resource ID of the truststore. + - name: 'ignoreValidationErrors' + type: Boolean + description: | + If true, Edge ignores TLS certificate errors. Valid when configuring TLS for target servers and target endpoints, and when configuring virtual hosts that use 2-way TLS. When used with a target endpoint/target server, if the backend system uses SNI and returns a cert with a subject Distinguished Name (DN) that does not match the hostname, there is no way to ignore the error and the connection fails. + - name: 'protocols' + type: Array + description: | + The TLS versioins to be used. + item_type: + type: String + - name: 'ciphers' + type: Array + description: | + The SSL/TLS cipher suites to be used. For programmable proxies, it must be one of the cipher suite names listed in: http://docs.oracle.com/javase/8/docs/technotes/guides/security/StandardNames.html#ciphersuites. For configurable proxies, it must follow the configuration specified in: https://commondatastorage.googleapis.com/chromium-boringssl-docs/ssl.h.html#Cipher-suite-configuration. This setting has no effect for configurable proxies when negotiating TLS 1.3. + item_type: + type: String + - name: 'commonName' + type: NestedObject + description: The TLS Common Name of the certificate. + properties: + - name: 'value' + type: String + description: | + The TLS Common Name string of the certificate. + - name: 'wildcardMatch' + type: Boolean + description: | + Indicates whether the cert should be matched against as a wildcard cert. + + - name: 'protocol' + type: Enum + description: | + Immutable. The protocol used by this TargetServer. + immutable: true + default_from_api: true + enum_values: + - 'HTTP' + - 'HTTP2' + - 'GRPC_TARGET' + - 'GRPC' + - 'EXTERNAL_CALLOUT' diff --git a/mmv1/products/apigee/go_product.yaml b/mmv1/products/apigee/go_product.yaml new file mode 100644 index 000000000000..944e02301a90 --- /dev/null +++ b/mmv1/products/apigee/go_product.yaml @@ -0,0 +1,22 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Apigee' +display_name: 'Apigee' +versions: + - name: 'ga' + base_url: 'https://apigee.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/compute/go_Autoscaler.yaml b/mmv1/products/compute/go_Autoscaler.yaml index 2625bed10ec8..86eec2f2de5c 100644 --- a/mmv1/products/compute/go_Autoscaler.yaml +++ b/mmv1/products/compute/go_Autoscaler.yaml @@ -132,6 +132,7 @@ properties: of replicas. api_name: maxNumReplicas required: true + send_empty_value: true - name: 'cooldownPeriod' type: Integer description: | diff --git a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml index cb6e3777442c..77f911edccf0 100644 --- a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml @@ -28,7 +28,7 @@ create_url: 'projects/{{project}}/global/backendBuckets/{{backend_bucket}}/addSi delete_url: 'projects/{{project}}/global/backendBuckets/{{backend_bucket}}/deleteSignedUrlKey?keyName={{name}}' delete_verb: 'POST' immutable: true -mutex: signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/ +mutex: 'signedUrlKey/{{project}}/backendBuckets/{{backend_bucket}}/' exclude_import: true timeouts: insert_minutes: 20 diff --git a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml index b12dcdc74bb0..b03ba69ac476 100644 --- a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml @@ -28,7 +28,7 @@ create_url: 'projects/{{project}}/global/backendServices/{{backend_service}}/add delete_url: 'projects/{{project}}/global/backendServices/{{backend_service}}/deleteSignedUrlKey?keyName={{name}}' delete_verb: 'POST' immutable: true -mutex: signedUrlKey/{{project}}/backendServices/{{backend_service}}/ +mutex: 'signedUrlKey/{{project}}/backendServices/{{backend_service}}/' exclude_import: true timeouts: insert_minutes: 20 diff --git a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml index 81d152748bad..fe29d45d0ce4 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml @@ -32,7 +32,7 @@ read_verb: 'POST' delete_url: 'projects/{{project}}/global/networkEndpointGroups/{{global_network_endpoint_group}}/detachNetworkEndpoints' delete_verb: 'POST' immutable: true -mutex: networkEndpoint/{{project}}/{{global_network_endpoint_group}} +mutex: 'networkEndpoint/{{project}}/{{global_network_endpoint_group}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_InstanceGroupMembership.yaml b/mmv1/products/compute/go_InstanceGroupMembership.yaml index d5592ed914cb..80b4bd6e974f 100644 --- a/mmv1/products/compute/go_InstanceGroupMembership.yaml +++ b/mmv1/products/compute/go_InstanceGroupMembership.yaml @@ -41,7 +41,7 @@ read_verb: 'POST' delete_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{instance_group}}/removeInstances' delete_verb: 'POST' immutable: true -mutex: instanceGroups/{{project}}/zones/{{zone}}/{{instance_group}} +mutex: 'instanceGroups/{{project}}/zones/{{zone}}/{{instance_group}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml index 2a896ad3aad0..0e7e8cc5dde3 100644 --- a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml +++ b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml @@ -31,7 +31,7 @@ create_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNam delete_url: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/setNamedPorts' delete_verb: 'POST' immutable: true -mutex: projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}} +mutex: 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}' import_format: - 'projects/{{project}}/zones/{{zone}}/instanceGroups/{{group}}/{{port}}/{{name}}' timeouts: diff --git a/mmv1/products/compute/go_NetworkEndpoint.yaml b/mmv1/products/compute/go_NetworkEndpoint.yaml index 4ed2e1b8ed11..626d38e5138d 100644 --- a/mmv1/products/compute/go_NetworkEndpoint.yaml +++ b/mmv1/products/compute/go_NetworkEndpoint.yaml @@ -38,7 +38,7 @@ read_verb: 'POST' delete_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints' delete_verb: 'POST' immutable: true -mutex: networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}} +mutex: 'networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_NetworkEndpoints.yaml b/mmv1/products/compute/go_NetworkEndpoints.yaml index d8248ee4c88c..89fc3591b842 100644 --- a/mmv1/products/compute/go_NetworkEndpoints.yaml +++ b/mmv1/products/compute/go_NetworkEndpoints.yaml @@ -42,7 +42,7 @@ update_verb: 'POST' read_verb: 'POST' delete_url: 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}/detachNetworkEndpoints' delete_verb: 'POST' -mutex: networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}} +mutex: 'networkEndpoint/{{project}}/{{zone}}/{{network_endpoint_group}}' import_format: - 'projects/{{project}}/zones/{{zone}}/networkEndpointGroups/{{network_endpoint_group}}' timeouts: diff --git a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml index b1e00931aeb0..ff78cb57b720 100644 --- a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml +++ b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml @@ -32,7 +32,7 @@ create_verb: 'PATCH' update_url: 'projects/{{project}}/global/networks/{{network}}/updatePeering' update_verb: 'PATCH' skip_delete: true -mutex: projects/{{project}}/global/networks/{{network}}/peerings +mutex: 'projects/{{project}}/global/networks/{{network}}/peerings' import_format: - 'projects/{{project}}/global/networks/{{network}}/networkPeerings/{{peering}}' timeouts: diff --git a/mmv1/products/compute/go_PerInstanceConfig.yaml b/mmv1/products/compute/go_PerInstanceConfig.yaml index 3ca02f701b76..7134e2e74f1c 100644 --- a/mmv1/products/compute/go_PerInstanceConfig.yaml +++ b/mmv1/products/compute/go_PerInstanceConfig.yaml @@ -31,7 +31,7 @@ update_verb: 'POST' read_verb: 'POST' delete_url: 'projects/{{project}}/zones/{{zone}}/instanceGroupManagers/{{instance_group_manager}}/deletePerInstanceConfigs' delete_verb: 'POST' -mutex: instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}} +mutex: 'instanceGroupManager/{{project}}/{{zone}}/{{instance_group_manager}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_RegionAutoscaler.yaml b/mmv1/products/compute/go_RegionAutoscaler.yaml index 3284e5faa1bd..43c28318b020 100644 --- a/mmv1/products/compute/go_RegionAutoscaler.yaml +++ b/mmv1/products/compute/go_RegionAutoscaler.yaml @@ -119,6 +119,7 @@ properties: of replicas. api_name: maxNumReplicas required: true + send_empty_value: true - name: 'cooldownPeriod' type: Integer description: | diff --git a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml index a98401a6ddf5..b74ab976f8f6 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml @@ -34,7 +34,7 @@ read_verb: 'POST' delete_url: 'projects/{{project}}/regions/{{region}}/networkEndpointGroups/{{region_network_endpoint_group}}/detachNetworkEndpoints' delete_verb: 'POST' immutable: true -mutex: networkEndpoint/{{project}}/{{region}}/{{region_network_endpoint_group}} +mutex: 'networkEndpoint/{{project}}/{{region}}/{{region_network_endpoint_group}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml index 492502f2190f..c8c275451f3f 100644 --- a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml @@ -32,7 +32,7 @@ update_verb: 'POST' read_verb: 'POST' delete_url: 'projects/{{project}}/regions/{{region}}/instanceGroupManagers/{{region_instance_group_manager}}/deletePerInstanceConfigs' delete_verb: 'POST' -mutex: instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}} +mutex: 'instanceGroupManager/{{project}}/{{region}}/{{region_instance_group_manager}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index b1bfaa4647d2..5d6cd4498bf3 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -52,7 +52,7 @@ docs: base_url: 'projects/{{project}}/global/routes' has_self_link: true immutable: true -mutex: projects/{{project}}/global/networks/{{network}}/peerings +mutex: 'projects/{{project}}/global/networks/{{network}}/peerings' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_Router.yaml b/mmv1/products/compute/go_Router.yaml index 63e95ff1f3d0..f0ab6e85ebef 100644 --- a/mmv1/products/compute/go_Router.yaml +++ b/mmv1/products/compute/go_Router.yaml @@ -25,7 +25,7 @@ docs: base_url: 'projects/{{project}}/regions/{{region}}/routers' has_self_link: true update_verb: 'PATCH' -mutex: router/{{region}}/{{name}} +mutex: 'router/{{region}}/{{name}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml index 820cf11b61f6..d53602cf5d13 100644 --- a/mmv1/products/compute/go_RouterNat.yaml +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -30,7 +30,7 @@ update_url: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' update_verb: 'PATCH' delete_url: 'projects/{{project}}/regions/{{region}}/routers/{{router}}' delete_verb: 'PATCH' -mutex: router/{{region}}/{{router}} +mutex: 'router/{{region}}/{{router}}' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl index 2c3cacc96b6a..f68ee25c1e23 100644 --- a/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl +++ b/mmv1/templates/terraform/custom_flatten/go/default_if_empty.tmpl @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/}} +*/ -}} func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { if v == nil || tpgresource.IsEmptyValue(reflect.ValueOf(v)) { return {{$.GoLiteral $.DefaultValue}} diff --git a/mmv1/templates/terraform/custom_import/apigee_endpoint_attachment.go.erb b/mmv1/templates/terraform/custom_import/apigee_endpoint_attachment.go.erb index f9cc71e0283e..9bfc3aeab667 100644 --- a/mmv1/templates/terraform/custom_import/apigee_endpoint_attachment.go.erb +++ b/mmv1/templates/terraform/custom_import/apigee_endpoint_attachment.go.erb @@ -2,32 +2,31 @@ config := meta.(*transport_tpg.Config) // current import_formats cannot import fields with forward slashes in their value if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err + return nil, err } nameParts := strings.Split(d.Get("name").(string), "/") if len(nameParts) == 4 { - // `organizations/{{org_name}}/endpointAttachment/{{endpoint_attachment_id}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("endpoint_attachment_id", nameParts[3]); err != nil { - return nil, fmt.Errorf("Error setting endpoint_attachment_id: %s", err) - } + // `organizations/{{org_name}}/endpointAttachment/{{endpoint_attachment_id}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("endpoint_attachment_id", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting endpoint_attachment_id: %s", err) + } } else { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{org_name}}/environments/{{name}}") + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{org_name}}/environments/{{name}}") } // Replace import id for the resource id id, err := tpgresource.ReplaceVars(d, config, "{{name}}") if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) + return nil, fmt.Errorf("Error constructing id: %s", err) } d.SetId(id) return []*schema.ResourceData{d}, nil - diff --git a/mmv1/templates/terraform/custom_import/go/apigee_endpoint_attachment.go.tmpl b/mmv1/templates/terraform/custom_import/go/apigee_endpoint_attachment.go.tmpl index 2e0afd971f9d..6685ea3bd72b 100644 --- a/mmv1/templates/terraform/custom_import/go/apigee_endpoint_attachment.go.tmpl +++ b/mmv1/templates/terraform/custom_import/go/apigee_endpoint_attachment.go.tmpl @@ -2,32 +2,31 @@ config := meta.(*transport_tpg.Config) // current import_formats cannot import fields with forward slashes in their value if err := tpgresource.ParseImportId([]string{"(?P.+)"}, d, config); err != nil { - return nil, err + return nil, err } nameParts := strings.Split(d.Get("name").(string), "/") if len(nameParts) == 4 { - // `organizations/{{"{{"}}org_name{{"}}"}}/endpointAttachment/{{"{{"}}endpoint_attachment_id{{"}}"}}` - orgId := fmt.Sprintf("organizations/%s", nameParts[1]) - if err := d.Set("org_id", orgId); err != nil { - return nil, fmt.Errorf("Error setting org_id: %s", err) - } - if err := d.Set("endpoint_attachment_id", nameParts[3]); err != nil { - return nil, fmt.Errorf("Error setting endpoint_attachment_id: %s", err) - } + // `organizations/{{"{{"}}org_name{{"}}"}}/endpointAttachment/{{"{{"}}endpoint_attachment_id{{"}}"}}` + orgId := fmt.Sprintf("organizations/%s", nameParts[1]) + if err := d.Set("org_id", orgId); err != nil { + return nil, fmt.Errorf("Error setting org_id: %s", err) + } + if err := d.Set("endpoint_attachment_id", nameParts[3]); err != nil { + return nil, fmt.Errorf("Error setting endpoint_attachment_id: %s", err) + } } else { - return nil, fmt.Errorf( - "Saw %s when the name is expected to have shape %s", - d.Get("name"), - "organizations/{{"{{"}}org_name{{"}}"}}/environments/{{"{{"}}name{{"}}"}}") + return nil, fmt.Errorf( + "Saw %s when the name is expected to have shape %s", + d.Get("name"), + "organizations/{{"{{"}}org_name{{"}}"}}/environments/{{"{{"}}name{{"}}"}}") } // Replace import id for the resource id id, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}name{{"}}"}}") if err != nil { - return nil, fmt.Errorf("Error constructing id: %s", err) + return nil, fmt.Errorf("Error constructing id: %s", err) } d.SetId(id) return []*schema.ResourceData{d}, nil - diff --git a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl index 2af462ec3716..64cbf701b89f 100644 --- a/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl +++ b/mmv1/templates/terraform/examples/base_configs/iam_test_file.go.tmpl @@ -55,7 +55,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ if $.IamImportQualifiersForTest }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), ImportState: true, ImportStateVerify: true, }, @@ -67,7 +67,7 @@ func TestAcc{{ $.ResourceName }}IamBindingGenerated(t *testing.T) { {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_binding.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }}", {{ if $.IamImportQualifiersForTest }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), ImportState: true, ImportStateVerify: true, }, @@ -102,7 +102,7 @@ func TestAcc{{ $.ResourceName }}IamMemberGenerated(t *testing.T) { {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_member.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }} {{ $.IamPolicy.AllowedIamRole }} user:admin@hashicorptest.com", {{ if $.IamImportQualifiersForTest }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), ImportState: true, ImportStateVerify: true, }, @@ -145,7 +145,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_policy.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ if $.IamImportQualifiersForTest }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), ImportState: true, ImportStateVerify: true, }, @@ -156,7 +156,7 @@ func TestAcc{{ $.ResourceName }}IamPolicyGenerated(t *testing.T) { {{- if not $.IamPolicy.SkipImportTest }} { ResourceName: "{{ $.IamTerraformName }}_policy.foo", - ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ $.IamImportQualifiersForTest }}, {{ $example.PrimaryResourceName }}), + ImportStateId: fmt.Sprintf("{{ $.IamResourceUriFormat }}", {{ if $.IamImportQualifiersForTest }}{{ $.IamImportQualifiersForTest }}, {{ end }}{{ $example.PrimaryResourceName }}), ImportState: true, ImportStateVerify: true, }, diff --git a/mmv1/templates/terraform/pre_update/go/netapp_storagepool.go.tmpl b/mmv1/templates/terraform/pre_update/go/netapp_storagepool.go.tmpl new file mode 100644 index 000000000000..f2a44072f39d --- /dev/null +++ b/mmv1/templates/terraform/pre_update/go/netapp_storagepool.go.tmpl @@ -0,0 +1,65 @@ +// detect manual zone switches for service level FLEX + +if d.Get("service_level").(string) == "FLEX" { + // Check if this is zonal or regional Flex. Only continue for regional pool + _, hasZone := d.GetOk("zone") + _, hasReplicaZone := d.GetOk("replica_zone") + if hasZone && hasReplicaZone { + // For a zone switch, user needs to swap zone and replica_zone. Other changes are not allowed + if d.HasChange("zone") && d.HasChange("replica_zone") { + oldZone, newZone := d.GetChange("zone") + oldReplicaZone, newReplicaZone := d.GetChange("replica_zone") + if newZone == oldReplicaZone && newReplicaZone == oldZone { + rawurl, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}NetappBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/storagePools/{{"{{"}}name{{"}}"}}:switch") + if err != nil { + return err + } + + reso, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: rawurl, + UserAgent: userAgent, + Timeout: d.Timeout(schema.TimeoutUpdate), + }) + if err != nil { + return fmt.Errorf("Error switching active zone for pool: %s, %v", d.Id(), err) + } + + err = NetappOperationWaitTime( + config, reso, project, "Switching active pool zone", userAgent, + d.Timeout(schema.TimeoutUpdate)) + if err != nil { + return err + } + + //remove zone and replicaZone from updateMask + n := 0 + for _, v := range updateMask { + if v != "zone" && v != "replicaZone" { + updateMask[n] = v + n++ + } + } + updateMask = updateMask[:n] + + // delete from payload too + delete(obj, "zone") + delete(obj, "replicaZone") + + // PATCH URL was already build prior to this code. We need to rebuild it to catch our changes + url, err = tpgresource.ReplaceVars(d, config, "{{"{{"}}NetappBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}location{{"}}"}}/storagePools/{{"{{"}}name{{"}}"}}") + if err != nil { + return err + } + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + } else { + return fmt.Errorf("Incorrect zone change for pool: %s. Supported zone, replica_zone are : %s, %s", d.Id(), oldZone, oldReplicaZone) + } + } + } +} diff --git a/mmv1/templates/terraform/resource.go.tmpl b/mmv1/templates/terraform/resource.go.tmpl index 4f59fc841d69..c63c3336f388 100644 --- a/mmv1/templates/terraform/resource.go.tmpl +++ b/mmv1/templates/terraform/resource.go.tmpl @@ -408,10 +408,12 @@ func resource{{ $.ResourceName -}}Create(d *schema.ResourceData, meta interface{ {{if $.GetAsync.IsA "PollAsync" -}} err = transport_tpg.PollingWaitTime(resource{{ $.ResourceName -}}PollRead(d, meta), {{ $.GetAsync.CheckResponseFuncExistence -}}, "Creating {{ $.Name -}}", d.Timeout(schema.TimeoutCreate), {{ $.GetAsync.TargetOccurrences -}}) if err != nil { -{{if $.GetAsync.SuppressError -}} +{{- if $.GetAsync.SuppressError -}} + log.Printf("[ERROR] Unable to confirm eventually consistent {{ $.Name }} %q finished updating: %q", d.Id(), err) -{{- else -}} -{{if $.CustomCode.PostCreateFailure -}} + +{{- else }} +{{- if $.CustomCode.PostCreateFailure -}} resource{{ $.ResourceName -}}PostCreateFailure(d, meta) {{- end}} return fmt.Errorf("Error waiting to create {{ $.Name -}}: %s", err) @@ -488,7 +490,7 @@ func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interfac if err != nil { return res, err } -{{- if or $.NestedQuery $.CustomCode.Decoder }} +{{- if $.CustomCode.Decoder }} {{""}} {{- end }} {{- if $.NestedQuery }} @@ -500,7 +502,9 @@ func resource{{ $.ResourceName -}}PollRead(d *schema.ResourceData, meta interfac if res == nil { return nil, tpgresource.Fake404("nested", "{{ $.ResourceName }}") } - +{{- if not $.CustomCode.Decoder }} +{{""}} +{{- end }} {{- end -}} {{- if $.CustomCode.Decoder -}} res, err = resource{{ $.ResourceName -}}Decoder(d, meta, res) diff --git a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl index 5ff2c6cc9dd0..312e02f6b37a 100644 --- a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl @@ -298,8 +298,8 @@ $ terraform import {{ $.IamTerraformName }}_policy.editor {{ $.FirstIamImportIdF -> **Custom Roles**: If you're importing a IAM resource with a custom role, make sure to use the full name of the custom role, e.g. `[projects/my-project|organizations/my-org]/roles/my-custom-role`. +{{- if contains $.BaseUrl "{{project}}" }} -{{ if contains $.BaseUrl "{{project}}" -}} ## User Project Overrides This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 3c4ffd3605f8..4a944e2a281c 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -184,8 +184,8 @@ async: <% unless object.async.target_occurrences.nil? -%> target_occurrences: <%= object.async.target_occurrences %> <% end -%> - actions: ['<%= object.async.actions.join('\',\'') %>'] <% end -%> + actions: ['<%= object.async.actions.join('\', \'') %>'] <% if object.async.is_a? Api::OpAsync -%> type: 'OpAsync' <% #async.operation %> diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl index b6279ad6f280..79480fe95545 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster.go.tmpl @@ -1197,9 +1197,9 @@ func ResourceContainerCluster() *schema.Resource { Optional: true, Computed: true, {{- if eq $.TargetVersionName "ga" }} - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, KUBELET and CADVISOR.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, KUBELET, CADVISOR and DCGM.`, {{- else }} - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS, KUBELET and CADVISOR.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS, KUBELET, CADVISOR and DCGM.`, {{- end }} Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl index 9c2a03316054..e823197d5234 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_cluster_migratev1.go.tmpl @@ -958,9 +958,9 @@ func resourceContainerClusterResourceV1() *schema.Resource { Optional: true, Computed: true, {{- if eq $.TargetVersionName "ga" }} - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT and STATEFULSET.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET and DCGM.`, {{- else }} - Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET and WORKLOADS.`, + Description: `GKE components exposing metrics. Valid values include SYSTEM_COMPONENTS, APISERVER, SCHEDULER, CONTROLLER_MANAGER, STORAGE, HPA, POD, DAEMONSET, DEPLOYMENT, STATEFULSET, WORKLOADS and DCGM.`, {{- end }} Elem: &schema.Schema{ Type: schema.TypeString, diff --git a/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl b/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl new file mode 100644 index 000000000000..6ad76f77b0e3 --- /dev/null +++ b/mmv1/third_party/terraform/services/netapp/go/resource_netapp_storage_pool_test.go.tmpl @@ -0,0 +1,261 @@ +package netapp_test + +import ( + "testing" +{{- if ne $.TargetVersionName "ga" }} + "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +{{- end }} + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccNetappstoragePool_storagePoolCreateExample_update(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccNetappstoragePool_storagePoolCreateExample_full(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappstoragePool_storagePoolCreateExample_update(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetappstoragePool_storagePoolCreateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_network" "peering_network" { + name = "tf-test-network%{random_suffix}" +} + +# Create an IP address +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-address%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.peering_network.id +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.peering_network.id + service = "netapp.servicenetworking.goog" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} + +resource "google_netapp_storage_pool" "test_pool" { + name = "tf-test-pool%{random_suffix}" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "2048" + network = google_compute_network.peering_network.id + active_directory = "" + description = "this is a test description" + kms_config = "" + labels = { + key= "test" + value= "pool" + } + ldap_enabled = false + +} +`, context) +} + +func testAccNetappstoragePool_storagePoolCreateExample_update(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_compute_network" "peering_network" { + name = "tf-test-network%{random_suffix}" +} + +# Create an IP address +resource "google_compute_global_address" "private_ip_alloc" { + name = "tf-test-address%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 16 + network = google_compute_network.peering_network.id +} + +# Create a private connection +resource "google_service_networking_connection" "default" { + network = google_compute_network.peering_network.id + service = "netapp.servicenetworking.goog" + reserved_peering_ranges = [google_compute_global_address.private_ip_alloc.name] +} + +resource "google_netapp_storage_pool" "test_pool" { + name = "tf-test-pool%{random_suffix}" + location = "us-central1" + service_level = "PREMIUM" + capacity_gib = "4096" + network = google_compute_network.peering_network.id + active_directory = "" + description = "this is test" + kms_config = "" + labels = { + key= "test" + value= "pool" + } + ldap_enabled = false + +} +`, context) +} + +{{ if ne $.TargetVersionName `ga` -}} +func TestAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_update(t *testing.T) { + context := map[string]interface{}{ + "network_name": acctest.BootstrapSharedServiceNetworkingConnection(t, "gcnv-network-config-1", acctest.ServiceNetworkWithParentService("netapp.servicenetworking.goog")), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + CheckDestroy: testAccCheckNetappstoragePoolDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context), + Check: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins(), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + { + Config: testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context), + }, + { + ResourceName: "google_netapp_storage_pool.test_pool", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location", "name", "labels", "terraform_labels"}, + }, + }, + }) +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_full(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "test_pool" { + provider = google-beta + name = "tf-test-pool%{random_suffix}" + location = "us-east1" + service_level = "FLEX" + capacity_gib = "2048" + network = data.google_compute_network.default.id + zone = "us-east1-c" + replica_zone = "us-east1-b" +} + +resource "time_sleep" "wait_5_minutes" { + depends_on = [google_netapp_storage_pool.test_pool] + destroy_duration = "5m" +} + +data "google_compute_network" "default" { + provider = google-beta + name = "%{network_name}" +} +`, context) +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchZone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "test_pool" { + provider = google-beta + name = "tf-test-pool%{random_suffix}" + location = "us-east1" + service_level = "FLEX" + capacity_gib = "2048" + network = data.google_compute_network.default.id + zone = "us-east1-b" + replica_zone = "us-east1-c" +} + +resource "time_sleep" "wait_5_minutes" { + depends_on = [google_netapp_storage_pool.test_pool] + destroy_duration = "5m" +} + +data "google_compute_network" "default" { + provider = google-beta + name = "%{network_name}" +} +`, context) +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_sleep_5_mins() resource.TestCheckFunc { + return func(s *terraform.State) error { + // wait 5 minutes before executing the switchback due to api zone switch issues + time.Sleep(5 * time.Minute) + return nil + } +} + +func testAccNetappstoragePool_FlexRegionalStoragePoolCreateExample_switchBackZone(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_netapp_storage_pool" "test_pool" { + provider = google-beta + name = "tf-test-pool%{random_suffix}" + location = "us-east1" + service_level = "FLEX" + capacity_gib = "2048" + network = data.google_compute_network.default.id + zone = "us-east1-c" + replica_zone = "us-east1-b" +} + +resource "time_sleep" "wait_5_minutes" { + depends_on = [google_netapp_storage_pool.test_pool] + destroy_duration = "5m" +} + +data "google_compute_network" "default" { + provider = google-beta + name = "%{network_name}" +} +`, context) +} + +{{ end }} From 2d56f5903766dd2825c13b8c1b82cd3a7252c36a Mon Sep 17 00:00:00 2001 From: wafrelka Date: Tue, 2 Jul 2024 05:47:03 +0900 Subject: [PATCH 258/356] Fix `content` in the `google_storage_bucket_object_content` data source which is incorrectly marked as a non-computed attribute (#10778) --- ...ta_source_storage_bucket_object_content.go | 1 - ...urce_storage_bucket_object_content_test.go | 27 +++++++++++++++++++ ...torage_bucket_object_content.html.markdown | 2 +- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go index af56c3c89295..fb1e36827c4d 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go @@ -18,7 +18,6 @@ func DataSourceGoogleStorageBucketObjectContent() *schema.Resource { tpgresource.AddRequiredFieldsToSchema(dsSchema, "bucket") tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") - tpgresource.AddOptionalFieldsToSchema(dsSchema, "content") return &schema.Resource{ Read: dataSourceGoogleStorageBucketObjectContentRead, diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go index 5b40c4ee1089..e6e6e4fa3301 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go @@ -47,3 +47,30 @@ resource "google_storage_bucket" "contenttest" { force_destroy = true }`, content, bucket) } + +func TestAccDataSourceStorageBucketObjectContent_Issue15717(t *testing.T) { + + bucket := "tf-bucket-object-content-" + acctest.RandString(t, 10) + content := "qwertyuioasdfghjk1234567!!@#$*" + + config := fmt.Sprintf(` +%s + +output "output" { + value = replace(data.google_storage_bucket_object_content.default.content, "q", "Q") +}`, testAccDataSourceStorageBucketObjectContent_Basic(content, bucket)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.google_storage_bucket_object_content.default", "content"), + resource.TestCheckResourceAttr("data.google_storage_bucket_object_content.default", "content", content), + ), + }, + }, + }) +} diff --git a/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown b/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown index 551f22540bbd..203c6d42fdd6 100644 --- a/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown @@ -41,4 +41,4 @@ The following arguments are supported: The following attributes are exported: -* `content` - (Computed) [Content-Language](https://tools.ietf.org/html/rfc7231#section-3.1.3.2) of the object content. +* `content` - (Computed) The content of the object. From 6dd65b1f3ff4cd3a814b2bdfcb3d76ba2d23b39b Mon Sep 17 00:00:00 2001 From: esu Date: Mon, 1 Jul 2024 13:53:49 -0700 Subject: [PATCH 259/356] Fix clashing fields in workstation_config ephemeral directories test. (#11049) --- .../resource_workstations_workstation_config_test.go.erb | 1 - 1 file changed, 1 deletion(-) diff --git a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb index 08169d2fe496..733e5fad9ccc 100644 --- a/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb +++ b/mmv1/third_party/terraform/services/workstations/resource_workstations_workstation_config_test.go.erb @@ -393,7 +393,6 @@ resource "google_workstations_workstation_config" "default" { gce_pd { disk_type = "pd-standard" source_image = google_compute_image.test_source_image.id - read_only = true } } From 3d74fe743bc1232f81b8b32e34fa49f9e1709e3a Mon Sep 17 00:00:00 2001 From: patrickmoy <53500820+patrickmoy@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:05:18 -0700 Subject: [PATCH 260/356] Add example for using TimespanConfig successfully without row_limit_percent or row_limit set (#11035) --- mmv1/products/dlp/JobTrigger.yaml | 8 ++ ...b_trigger_timespan_config_big_query.tf.erb | 43 +++++++++++ ...e_data_loss_prevention_job_trigger_test.go | 75 +++++++++++++++++++ 3 files changed, 126 insertions(+) create mode 100644 mmv1/templates/terraform/examples/dlp_job_trigger_timespan_config_big_query.tf.erb diff --git a/mmv1/products/dlp/JobTrigger.yaml b/mmv1/products/dlp/JobTrigger.yaml index e788d5672c7a..a40ceaebdeca 100644 --- a/mmv1/products/dlp/JobTrigger.yaml +++ b/mmv1/products/dlp/JobTrigger.yaml @@ -117,6 +117,13 @@ examples: trigger: 'trigger' test_env_vars: project: :PROJECT_NAME + - !ruby/object:Provider::Terraform::Examples + name: 'dlp_job_trigger_timespan_config_big_query' + primary_resource_id: 'timespan_config_big_query' + vars: + trigger: 'trigger' + test_env_vars: + project: :PROJECT_NAME custom_code: !ruby/object:Provider::Terraform::CustomCode encoder: templates/terraform/encoders/dlp_job_trigger.go.erb decoder: templates/terraform/decoders/dlp_job_trigger.go.erb @@ -939,6 +946,7 @@ properties: description: | How to sample rows if not all rows are scanned. Meaningful only when used in conjunction with either rowsLimit or rowsLimitPercent. If not specified, rows are scanned in the order BigQuery reads them. + If TimespanConfig is set, set this to an empty string to avoid using the default value. values: - :TOP - :RANDOM_START diff --git a/mmv1/templates/terraform/examples/dlp_job_trigger_timespan_config_big_query.tf.erb b/mmv1/templates/terraform/examples/dlp_job_trigger_timespan_config_big_query.tf.erb new file mode 100644 index 000000000000..2c708f41813b --- /dev/null +++ b/mmv1/templates/terraform/examples/dlp_job_trigger_timespan_config_big_query.tf.erb @@ -0,0 +1,43 @@ +resource "google_data_loss_prevention_job_trigger" "<%= ctx[:primary_resource_id] %>" { + parent = "projects/<%= ctx[:test_env_vars]['project'] %>" + description = "BigQuery DLP Job Trigger with timespan config and row limit" + display_name = "bigquery-dlp-job-trigger-limit-timespan" + + triggers { + schedule { + recurrence_period_duration ="86400s" + } + } + + inspect_job { + inspect_template_name = "projects/test/locations/global/inspectTemplates/6425492983381733900" + storage_config { + big_query_options { + table_reference { + project_id = "project" + dataset_id = "dataset" + table_id = "table" + } + sample_method = "" + } + + timespan_config { + start_time = "2023-01-01T00:00:23Z" + timestamp_field { + name = "timestamp" + } + } + } + + actions { + save_findings { + output_config { + table { + project_id = "project" + dataset_id = "output" + } + } + } + } +} +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_job_trigger_test.go b/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_job_trigger_test.go index 9360817b7e23..b9133a1de2bd 100644 --- a/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_job_trigger_test.go +++ b/mmv1/third_party/terraform/services/datalossprevention/resource_data_loss_prevention_job_trigger_test.go @@ -487,6 +487,32 @@ func TestAccDataLossPreventionJobTrigger_dlpJobTrigger_withSensitivityScore(t *t }) } +func TestAccDataLossPreventionJobTrigger_dlpJobTriggerCreateWithTimespanConfigBigQuery(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckDataLossPreventionJobTriggerDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccDataLossPreventionJobTrigger_dlpJobTrigger_rowLimit_timespanConfig(context), + }, + { + ResourceName: "google_data_loss_prevention_job_trigger.bigquery_row_limit_timespan", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"parent", "inspect_job.0.storage_config.0.big_query_options.0.sample_method"}, + }, + }, + }) +} + func testAccDataLossPreventionJobTrigger_dlpJobTriggerBasic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_data_loss_prevention_job_trigger" "basic" { @@ -2752,3 +2778,52 @@ resource "google_data_loss_prevention_job_trigger" "basic" { } `, context) } + +func testAccDataLossPreventionJobTrigger_dlpJobTrigger_rowLimit_timespanConfig(context map[string]interface{}) string { + return acctest.Nprintf(` + +resource "google_data_loss_prevention_job_trigger" "bigquery_row_limit_timespan" { + parent = "projects/%{project}" + description = "BigQuery DLP Job Trigger with timespan config and row limit" + display_name = "bigquery-dlp-job-trigger-limit-timespan" + + triggers { + schedule { + recurrence_period_duration ="86400s" + } + } + + inspect_job { + inspect_template_name = "projects/test/locations/global/inspectTemplates/6425492983381733900" + storage_config { + big_query_options { + table_reference { + project_id = "project" + dataset_id = "dataset" + table_id = "table" + } + sample_method = "" + } + + timespan_config { + start_time = "2023-01-01T00:00:23Z" + timestamp_field { + name = "timestamp" + } + } + } + + actions { + save_findings { + output_config { + table { + project_id = "project" + dataset_id = "output" + } + } + } + } + } +} +`, context) +} From 0370a3f963a3d697066673ec95d927fb0a6cc45e Mon Sep 17 00:00:00 2001 From: Iris Chen <10179943+iyabchen@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:19:34 -0700 Subject: [PATCH 261/356] remove comment like #[START #[END in .tf.erb files (#11050) --- .../compute_packet_mirroring_full.tf.erb | 2 -- .../examples/compute_reservation.tf.erb | 2 -- .../examples/dns_managed_zone_basic.tf.erb | 2 -- .../examples/dns_managed_zone_private.tf.erb | 2 -- ...dns_managed_zone_private_forwarding.tf.erb | 2 -- .../dns_managed_zone_private_gke.tf.erb | 2 -- .../dns_managed_zone_private_peering.tf.erb | 2 -- .../dns_managed_zone_quickstart.tf.erb | 2 -- .../dns_managed_zone_service_directory.tf.erb | 2 -- .../examples/dns_policy_basic.tf.erb | 2 -- .../examples/dns_record_set_basic.tf.erb | 2 -- .../examples/dns_response_policy_basic.tf.erb | 2 -- .../dns_response_policy_rule_basic.tf.erb | 2 -- ...external_cdn_lb_with_backend_bucket.tf.erb | 20 ------------- .../external_http_lb_mig_backend.tf.erb | 18 ------------ ...l_http_lb_mig_backend_custom_header.tf.erb | 2 -- .../external_ssl_proxy_lb_mig_backend.tf.erb | 2 -- .../external_tcp_proxy_lb_mig_backend.tf.erb | 2 -- .../examples/external_vpn_gateway.tf.erb | 2 -- .../examples/firewall_with_target_tags.tf.erb | 2 -- .../flask_google_cloud_quickstart.tf.erb | 8 ------ ...l_internal_http_lb_with_mig_backend.tf.erb | 2 -- .../examples/ha_vpn_gateway_gcp_to_gcp.tf.erb | 2 -- .../examples/instance_custom_hostname.tf.erb | 3 -- .../examples/instance_settings_basic.tf.erb | 2 -- .../instance_virtual_display_enabled.tf.erb | 4 --- .../int_https_lb_https_redirect.tf.erb | 2 -- .../internal_http_lb_with_mig_backend.tf.erb | 2 -- ...nternal_tcp_udp_lb_with_mig_backend.tf.erb | 2 -- ...k_custom_firewall_enforcement_order.tf.erb | 2 -- .../examples/network_custom_mtu.tf.erb | 2 -- ...agement_connectivity_test_addresses.tf.erb | 2 -- ...agement_connectivity_test_instances.tf.erb | 2 -- ...k_services_lb_route_extension_basic.tf.erb | 4 --- ...services_lb_traffic_extension_basic.tf.erb | 3 -- ...private_service_connect_google_apis.tf.erb | 6 ---- .../privateca_capool_all_fields.tf.erb | 2 -- .../examples/privateca_capool_basic.tf.erb | 2 -- ...ivateca_certificate_authority_basic.tf.erb | 2 -- ...ateca_certificate_authority_byo_key.tf.erb | 2 -- ...ca_certificate_authority_custom_ski.tf.erb | 2 -- ...a_certificate_authority_subordinate.tf.erb | 2 -- .../privateca_certificate_config.tf.erb | 2 -- .../examples/privateca_certificate_csr.tf.erb | 2 -- .../privateca_certificate_custom_ski.tf.erb | 2 -- .../privateca_certificate_no_authority.tf.erb | 2 -- ...privateca_certificate_with_template.tf.erb | 2 -- .../examples/privateca_quickstart.tf.erb | 2 -- .../examples/privateca_template_basic.tf.erb | 2 -- .../examples/region_autoscaler_basic.tf.erb | 2 -- .../region_target_tcp_proxy_basic.tf.erb | 3 -- .../region_url_map_path_template_match.tf.erb | 2 -- ...egional_external_http_load_balancer.tf.erb | 28 ------------------- .../examples/spot_instance_basic.tf.erb | 3 -- .../examples/sql_database_basic.tf.erb | 2 -- .../sql_database_deletion_policy.tf.erb | 2 -- .../sql_database_instance_my_sql.tf.erb | 4 --- .../sql_database_instance_postgres.tf.erb | 4 --- .../sql_database_instance_sqlserver.tf.erb | 4 --- .../examples/sql_instance_cmek.tf.erb | 14 ---------- .../terraform/examples/sql_instance_ha.tf.erb | 6 ---- .../sql_instance_iam_condition.tf.erb | 2 -- .../examples/sql_instance_labels.tf.erb | 6 ---- .../examples/sql_instance_pitr.tf.erb | 4 --- .../examples/sql_instance_ssl_cert.tf.erb | 10 ------- ...l_mysql_instance_authorized_network.tf.erb | 2 -- .../examples/sql_mysql_instance_backup.tf.erb | 2 -- .../sql_mysql_instance_backup_location.tf.erb | 2 -- ...sql_mysql_instance_backup_retention.tf.erb | 2 -- .../examples/sql_mysql_instance_clone.tf.erb | 4 --- .../examples/sql_mysql_instance_flags.tf.erb | 2 -- .../sql_mysql_instance_public_ip.tf.erb | 2 -- .../examples/sql_mysql_instance_pvp.tf.erb | 2 -- .../sql_mysql_instance_replica.tf.erb | 4 --- ...ostgres_instance_authorized_network.tf.erb | 2 -- .../sql_postgres_instance_backup.tf.erb | 2 -- ...l_postgres_instance_backup_location.tf.erb | 2 -- ..._postgres_instance_backup_retention.tf.erb | 2 -- .../sql_postgres_instance_clone.tf.erb | 4 --- .../sql_postgres_instance_flags.tf.erb | 2 -- .../sql_postgres_instance_public_ip.tf.erb | 2 -- .../examples/sql_postgres_instance_pvp.tf.erb | 2 -- .../sql_postgres_instance_replica.tf.erb | 4 --- ...lserver_instance_authorized_network.tf.erb | 2 -- .../sql_sqlserver_instance_backup.tf.erb | 2 -- ..._sqlserver_instance_backup_location.tf.erb | 2 -- ...sqlserver_instance_backup_retention.tf.erb | 2 -- .../sql_sqlserver_instance_clone.tf.erb | 4 --- .../sql_sqlserver_instance_flags.tf.erb | 2 -- .../sql_sqlserver_instance_public_ip.tf.erb | 2 -- .../sql_sqlserver_instance_replica.tf.erb | 4 --- .../examples/sql_sqlserver_vm_instance.tf.erb | 4 --- .../examples/storage_hmac_key.tf.erb | 2 -- .../examples/storage_make_data_public.tf.erb | 2 -- .../examples/storage_new_bucket.tf.erb | 8 ------ .../storage_object_lifecycle_setting.tf.erb | 2 -- .../storage_pubsub_notifications.tf.erb | 2 -- .../examples/storage_static_website.tf.erb | 6 ---- .../examples/target_grpc_proxy_basic.tf.erb | 2 -- .../examples/target_http_proxy_basic.tf.erb | 2 -- ..._http_proxy_http_keep_alive_timeout.tf.erb | 2 -- .../target_http_proxy_https_redirect.tf.erb | 2 -- .../examples/target_https_proxy_basic.tf.erb | 2 -- ...https_proxy_http_keep_alive_timeout.tf.erb | 2 -- .../examples/target_https_proxy_mtls.tf.erb | 2 -- .../examples/target_ssl_proxy_basic.tf.erb | 2 -- .../examples/target_tcp_proxy_basic.tf.erb | 2 -- .../url_map_bucket_and_service.tf.erb | 2 -- .../url_map_header_based_routing.tf.erb | 2 -- .../url_map_parameter_based_routing.tf.erb | 2 -- .../url_map_path_template_match.tf.erb | 2 -- .../url_map_traffic_director_path.tf.erb | 2 -- ...l_map_traffic_director_path_partial.tf.erb | 2 -- .../url_map_traffic_director_route.tf.erb | 2 -- ..._map_traffic_director_route_partial.tf.erb | 2 -- 115 files changed, 368 deletions(-) diff --git a/mmv1/templates/terraform/examples/compute_packet_mirroring_full.tf.erb b/mmv1/templates/terraform/examples/compute_packet_mirroring_full.tf.erb index 1e74c3e7ff2e..0c7e388a4109 100644 --- a/mmv1/templates/terraform/examples/compute_packet_mirroring_full.tf.erb +++ b/mmv1/templates/terraform/examples/compute_packet_mirroring_full.tf.erb @@ -54,7 +54,6 @@ resource "google_compute_forwarding_rule" "default" { network_tier = "PREMIUM" } -# [START compute_vm_packet_mirror] resource "google_compute_packet_mirroring" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mirroring_name'] %>" description = "bar" @@ -76,4 +75,3 @@ resource "google_compute_packet_mirroring" "<%= ctx[:primary_resource_id] %>" { direction = "BOTH" } } -# [END compute_vm_packet_mirror] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/compute_reservation.tf.erb b/mmv1/templates/terraform/examples/compute_reservation.tf.erb index c2f3ba6443a6..b50b19a1c8ca 100644 --- a/mmv1/templates/terraform/examples/compute_reservation.tf.erb +++ b/mmv1/templates/terraform/examples/compute_reservation.tf.erb @@ -1,4 +1,3 @@ -# [START compute_reservation_create_local_reservation] resource "google_compute_reservation" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['gce_reservation_local'] %>" @@ -17,4 +16,3 @@ resource "google_compute_reservation" "<%= ctx[:primary_resource_id] %>" { } } -# [END compute_reservation_create_local_reservation] diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_basic.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_basic.tf.erb index baab783622d7..db5721b24113 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_basic.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_basic.tf.erb @@ -1,4 +1,3 @@ -# [START dns_managed_zone_basic] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { name = "example-zone" dns_name = "example-${random_id.rnd.hex}.com." @@ -11,4 +10,3 @@ resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { resource "random_id" "rnd" { byte_length = 4 } -# [END dns_managed_zone_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_private.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_private.tf.erb index 14831795c698..2b6ca85e12ab 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_private.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_private.tf.erb @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['zone_name'] %>" dns_name = "private.example.com." @@ -28,4 +27,3 @@ resource "google_compute_network" "network-2" { name = "<%= ctx[:vars]['network_2_name'] %>" auto_create_subnetworks = false } -# [END dns_managed_zone_private] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb index 7492fc14cfea..e5f49dd70e80 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_private_forwarding.tf.erb @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_forwarding] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['zone_name'] %>" dns_name = "private.example.com." @@ -37,4 +36,3 @@ resource "google_compute_network" "network-2" { name = "<%= ctx[:vars]['network_2_name'] %>" auto_create_subnetworks = false } -# [END dns_managed_zone_private_forwarding] diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_private_gke.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_private_gke.tf.erb index 5c58fb5e6b71..064b99b32724 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_private_gke.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_private_gke.tf.erb @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_gke] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['zone_name'] %>" dns_name = "private.example.com." @@ -67,4 +66,3 @@ resource "google_container_cluster" "cluster-1" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END dns_managed_zone_private_gke] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb index a7d7d22ff3b9..0914f25979d3 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_private_peering.tf.erb @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_peering] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['zone_name'] %>" dns_name = "peering.example.com." @@ -28,4 +27,3 @@ resource "google_compute_network" "network-target" { name = "<%= ctx[:vars]['network_target_name'] %>" auto_create_subnetworks = false } -# [END dns_managed_zone_private_peering] diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_quickstart.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_quickstart.tf.erb index 51d2a67d7bde..850897284e7c 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_quickstart.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_quickstart.tf.erb @@ -1,4 +1,3 @@ -# [START dns_domain_tutorial] # to setup a web-server resource "google_compute_instance" "default" { name = "<%= ctx[:vars]['dns_compute_instance'] %>" @@ -53,4 +52,3 @@ resource "google_dns_record_set" "default" { google_compute_instance.default.network_interface.0.access_config.0.nat_ip ] } -# [END dns_domain_tutorial] diff --git a/mmv1/templates/terraform/examples/dns_managed_zone_service_directory.tf.erb b/mmv1/templates/terraform/examples/dns_managed_zone_service_directory.tf.erb index 527e332c289a..011eb2d5d540 100644 --- a/mmv1/templates/terraform/examples/dns_managed_zone_service_directory.tf.erb +++ b/mmv1/templates/terraform/examples/dns_managed_zone_service_directory.tf.erb @@ -1,4 +1,3 @@ -# [START dns_managed_zone_service_directory] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { provider = google-beta @@ -28,4 +27,3 @@ resource "google_compute_network" "network" { name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } -# [END dns_managed_zone_service_directory] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dns_policy_basic.tf.erb b/mmv1/templates/terraform/examples/dns_policy_basic.tf.erb index 217945cc9ebb..9de748173253 100644 --- a/mmv1/templates/terraform/examples/dns_policy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/dns_policy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START dns_policy_basic] resource "google_dns_policy" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['policy_name'] %>" enable_inbound_forwarding = true @@ -32,4 +31,3 @@ resource "google_compute_network" "network-2" { name = "<%= ctx[:vars]['network_2_name'] %>" auto_create_subnetworks = false } -# [END dns_policy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dns_record_set_basic.tf.erb b/mmv1/templates/terraform/examples/dns_record_set_basic.tf.erb index 6c45d7f39ac1..915f9fd33c58 100644 --- a/mmv1/templates/terraform/examples/dns_record_set_basic.tf.erb +++ b/mmv1/templates/terraform/examples/dns_record_set_basic.tf.erb @@ -1,4 +1,3 @@ -# [START dns_record_set_basic] resource "google_dns_managed_zone" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sample_zone'] %>" dns_name = "<%= ctx[:vars]['sample_zone'] %>.hashicorptest.com." @@ -12,4 +11,3 @@ resource "google_dns_record_set" "default" { rrdatas = ["10.0.0.1", "10.1.0.1"] ttl = 86400 } -# [END dns_record_set_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/dns_response_policy_basic.tf.erb b/mmv1/templates/terraform/examples/dns_response_policy_basic.tf.erb index 767f8706638c..e08a71461935 100644 --- a/mmv1/templates/terraform/examples/dns_response_policy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/dns_response_policy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START dns_response_policy_basic] resource "google_compute_network" "network-1" { name = "<%= ctx[:vars]['network_1_name'] %>" auto_create_subnetworks = false @@ -69,4 +68,3 @@ resource "google_dns_response_policy" "<%= ctx[:primary_resource_id] %>" { gke_cluster_name = google_container_cluster.cluster-1.id } } -# [END dns_response_policy_basic] diff --git a/mmv1/templates/terraform/examples/dns_response_policy_rule_basic.tf.erb b/mmv1/templates/terraform/examples/dns_response_policy_rule_basic.tf.erb index 1500979ee66e..8564efc32e8f 100644 --- a/mmv1/templates/terraform/examples/dns_response_policy_rule_basic.tf.erb +++ b/mmv1/templates/terraform/examples/dns_response_policy_rule_basic.tf.erb @@ -1,4 +1,3 @@ -# [START dns_response_policy_rule_basic] resource "google_compute_network" "network-1" { name = "<%= ctx[:vars]['network_1_name'] %>" auto_create_subnetworks = false @@ -35,4 +34,3 @@ resource "google_dns_response_policy_rule" "<%= ctx[:primary_resource_id] %>" { } } -# [END dns_response_policy_rule_basic] diff --git a/mmv1/templates/terraform/examples/external_cdn_lb_with_backend_bucket.tf.erb b/mmv1/templates/terraform/examples/external_cdn_lb_with_backend_bucket.tf.erb index f4f9eb9be413..2d9f24a89eef 100644 --- a/mmv1/templates/terraform/examples/external_cdn_lb_with_backend_bucket.tf.erb +++ b/mmv1/templates/terraform/examples/external_cdn_lb_with_backend_bucket.tf.erb @@ -1,6 +1,5 @@ # CDN load balancer with Cloud bucket as backend -# [START cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] # Cloud Storage bucket resource "google_storage_bucket" "default" { name = "<%= ctx[:vars]['my_bucket'] %>" @@ -16,18 +15,14 @@ resource "google_storage_bucket" "default" { } } -# [END cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] -# [START cloudloadbalancing_cdn_with_backend_bucket_make_public] # make bucket public resource "google_storage_bucket_iam_member" "default" { bucket = google_storage_bucket.default.name role = "roles/storage.objectViewer" member = "allUsers" } -# [END cloudloadbalancing_cdn_with_backend_bucket_make_public] -# [START cloudloadbalancing_cdn_with_backend_bucket_index_page] resource "google_storage_bucket_object" "index_page" { name = "<%= ctx[:vars]['index_page'] %>" bucket = google_storage_bucket.default.name @@ -37,9 +32,7 @@ resource "google_storage_bucket_object" "index_page" { EOT } -# [END cloudloadbalancing_cdn_with_backend_bucket_index_page] -# [START cloudloadbalancing_cdn_with_backend_bucket_error_page] resource "google_storage_bucket_object" "error_page" { name = "<%= ctx[:vars]['404_page'] %>" bucket = google_storage_bucket.default.name @@ -49,9 +42,7 @@ resource "google_storage_bucket_object" "error_page" { EOT } -# [END cloudloadbalancing_cdn_with_backend_bucket_error_page] -# [START cloudloadbalancing_cdn_with_backend_bucket_image] # image object for testing, try to access http:///test.jpg resource "google_storage_bucket_object" "test_image" { name = "<%= ctx[:vars]['test_object'] %>" @@ -65,16 +56,12 @@ resource "google_storage_bucket_object" "test_image" { bucket = google_storage_bucket.default.name } -# [END cloudloadbalancing_cdn_with_backend_bucket_image] -# [START cloudloadbalancing_cdn_with_backend_bucket_ip_address] # reserve IP address resource "google_compute_global_address" "default" { name = "<%= ctx[:vars]['example_ip'] %>" } -# [END cloudloadbalancing_cdn_with_backend_bucket_ip_address] -# [START cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] # forwarding rule resource "google_compute_global_forwarding_rule" "default" { name = "<%= ctx[:vars]['http_lb_forwarding_rule'] %>" @@ -84,25 +71,19 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] -# [START cloudloadbalancing_cdn_with_backend_bucket_http_proxy] # http proxy resource "google_compute_target_http_proxy" "default" { name = "<%= ctx[:vars]['http_lb_proxy'] %>" url_map = google_compute_url_map.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_http_proxy] -# [START cloudloadbalancing_cdn_with_backend_bucket_url_map] # url map resource "google_compute_url_map" "default" { name = "<%= ctx[:vars]['http_lb'] %>" default_service = google_compute_backend_bucket.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_url_map] -# [START cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] # backend bucket with CDN policy with default ttl settings resource "google_compute_backend_bucket" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['cat_backend_bucket'] %>" @@ -118,4 +99,3 @@ resource "google_compute_backend_bucket" "<%= ctx[:primary_resource_id] %>" { serve_while_stale = 86400 } } -# [END cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] diff --git a/mmv1/templates/terraform/examples/external_http_lb_mig_backend.tf.erb b/mmv1/templates/terraform/examples/external_http_lb_mig_backend.tf.erb index b4f58d138f81..e7fccf9493ca 100644 --- a/mmv1/templates/terraform/examples/external_http_lb_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/external_http_lb_mig_backend.tf.erb @@ -1,6 +1,5 @@ # External HTTP load balancer with an CDN-enabled managed instance group backend -# [START cloudloadbalancing_ext_http_gce_instance_template] resource "google_compute_instance_template" "default" { name = "<%= ctx[:vars]['lb_backend_template'] %>" disk { @@ -37,9 +36,7 @@ resource "google_compute_instance_template" "default" { } tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_http_gce_instance_template] -# [START cloudloadbalancing_ext_http_gce_instance_mig] resource "google_compute_instance_group_manager" "default" { name = "<%= ctx[:vars]['lb_backend_example'] %>" zone = "us-east1-b" @@ -54,10 +51,8 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } -# [END cloudloadbalancing_ext_http_gce_instance_mig] -# [START cloudloadbalancing_ext_http_gce_instance_firewall_rule] resource "google_compute_firewall" "default" { name = "<%= ctx[:vars]['fw_allow_health_check'] %>" direction = "INGRESS" @@ -70,16 +65,12 @@ resource "google_compute_firewall" "default" { protocol = "tcp" } } -# [END cloudloadbalancing_ext_http_gce_instance_firewall_rule] -# [START cloudloadbalancing_ext_http_gce_instance_ip_address] resource "google_compute_global_address" "default" { name = "<%= ctx[:vars]['lb_ipv4_1'] %>" ip_version = "IPV4" } -# [END cloudloadbalancing_ext_http_gce_instance_ip_address] -# [START cloudloadbalancing_ext_http_gce_instance_health_check] resource "google_compute_health_check" "default" { name = "<%= ctx[:vars]['http_basic_check'] %>" check_interval_sec = 5 @@ -93,9 +84,7 @@ resource "google_compute_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } -# [END cloudloadbalancing_ext_http_gce_instance_health_check] -# [START cloudloadbalancing_ext_http_gce_instance_backend_service] resource "google_compute_backend_service" "default" { name = "<%= ctx[:vars]['web_backend_service'] %>" connection_draining_timeout_sec = 0 @@ -111,23 +100,17 @@ resource "google_compute_backend_service" "default" { capacity_scaler = 1.0 } } -# [END cloudloadbalancing_ext_http_gce_instance_backend_service] -# [START cloudloadbalancing_ext_http_gce_instance_url_map] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['web_map_http'] %>" default_service = google_compute_backend_service.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_url_map] -# [START cloudloadbalancing_ext_http_gce_instance_target_http_proxy] resource "google_compute_target_http_proxy" "default" { name = "<%= ctx[:vars]['http_lb_proxy'] %>" url_map = google_compute_url_map.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_target_http_proxy] -# [START cloudloadbalancing_ext_http_gce_instance_forwarding_rule] resource "google_compute_global_forwarding_rule" "default" { name = "<%= ctx[:vars]['http_content_rule'] %>" ip_protocol = "TCP" @@ -136,4 +119,3 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_forwarding_rule] diff --git a/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb b/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb index 3bdd1a01ce4b..64ef8ef62a79 100644 --- a/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb +++ b/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb @@ -1,7 +1,6 @@ # External HTTP load balancer with a CDN-enabled managed instance group backend # and custom request and response headers -# [START cloudloadbalancing_ext_http_gce_custom_header] # VPC resource "google_compute_network" "default" { name = "<%= ctx[:vars]['xlb_network_name'] %>" @@ -154,4 +153,3 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_http_gce_custom_header] diff --git a/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb b/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb index fe55e09c5348..56a6d98288a8 100644 --- a/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb @@ -1,6 +1,5 @@ # External SSL proxy load balancer with managed instance group backend -# [START cloudloadbalancing_ext_ssl_proxy_lb] # VPC resource "google_compute_network" "default" { name = "<%= ctx[:vars]['ssl_proxy_xlb_network'] %>" @@ -184,5 +183,4 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_ssl_proxy_lb] diff --git a/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb b/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb index d811f2eeffb4..98b971e9fd46 100644 --- a/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb @@ -1,6 +1,5 @@ # External TCP proxy load balancer with managed instance group backend -# [START cloudloadbalancing_ext_tcp_proxy_lb] # VPC resource "google_compute_network" "default" { name = "<%= ctx[:vars]['tcp_proxy_xlb_network'] %>" @@ -142,4 +141,3 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_tcp_proxy_lb] diff --git a/mmv1/templates/terraform/examples/external_vpn_gateway.tf.erb b/mmv1/templates/terraform/examples/external_vpn_gateway.tf.erb index f13b9f55d30d..d81ab0df846c 100644 --- a/mmv1/templates/terraform/examples/external_vpn_gateway.tf.erb +++ b/mmv1/templates/terraform/examples/external_vpn_gateway.tf.erb @@ -1,4 +1,3 @@ -# [START cloudvpn_ha_external] resource "google_compute_ha_vpn_gateway" "ha_gateway" { region = "us-central1" name = "<%= ctx[:vars]['ha_vpn_gateway_name'] %>" @@ -100,4 +99,3 @@ resource "google_compute_router_peer" "router1_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router1_interface2.name } -# [END cloudvpn_ha_external] diff --git a/mmv1/templates/terraform/examples/firewall_with_target_tags.tf.erb b/mmv1/templates/terraform/examples/firewall_with_target_tags.tf.erb index 31c30a5bbc03..48f9bee58579 100644 --- a/mmv1/templates/terraform/examples/firewall_with_target_tags.tf.erb +++ b/mmv1/templates/terraform/examples/firewall_with_target_tags.tf.erb @@ -1,4 +1,3 @@ -# [START vpc_firewall_create] resource "google_compute_firewall" "<%= ctx[:primary_resource_id] %>" { project = "<%= ctx[:test_env_vars]["project"] %>" name = "<%= ctx[:vars]['firewall_name'] %>" @@ -13,4 +12,3 @@ resource "google_compute_firewall" "<%= ctx[:primary_resource_id] %>" { source_tags = ["foo"] target_tags = ["web"] } -# [END vpc_firewall_create] diff --git a/mmv1/templates/terraform/examples/flask_google_cloud_quickstart.tf.erb b/mmv1/templates/terraform/examples/flask_google_cloud_quickstart.tf.erb index db33131755cb..65accdc7ebd9 100644 --- a/mmv1/templates/terraform/examples/flask_google_cloud_quickstart.tf.erb +++ b/mmv1/templates/terraform/examples/flask_google_cloud_quickstart.tf.erb @@ -1,4 +1,3 @@ -# [START compute_flask_quickstart_vm] # Create a single Compute Engine instance resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['flask_vm'] %>" @@ -26,9 +25,7 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { } } } -# [END compute_flask_quickstart_vm] -# [START vpc_flask_quickstart_ssh_fw] resource "google_compute_firewall" "ssh" { name = "<%= ctx[:vars]['allow_ssh'] %>" allow { @@ -41,10 +38,8 @@ resource "google_compute_firewall" "ssh" { source_ranges = ["0.0.0.0/0"] target_tags = ["ssh"] } -# [END vpc_flask_quickstart_ssh_fw] -# [START vpc_flask_quickstart_5000_fw] resource "google_compute_firewall" "flask" { name = "<%= ctx[:vars]['flask_app_firewall'] %>" network = "default" @@ -55,12 +50,10 @@ resource "google_compute_firewall" "flask" { } source_ranges = ["0.0.0.0/0"] } -# [END vpc_flask_quickstart_5000_fw] # Create new multi-region storage bucket in the US # with versioning enabled -# [START storage_bucket_tf_with_versioning] resource "google_storage_bucket" "default" { name = "<%= ctx[:vars]['bucket_tfstate'] %>" force_destroy = false @@ -70,4 +63,3 @@ resource "google_storage_bucket" "default" { enabled = true } } -# [END storage_bucket_tf_with_versioning] diff --git a/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb b/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb index 25a069dd2ea3..52c06538d796 100644 --- a/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb @@ -1,6 +1,5 @@ # Global Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "gilb_network" { name = "<%= ctx[:vars]['gilb_network_name'] %>" @@ -182,4 +181,3 @@ resource "google_compute_instance" "vm-test" { } } } -# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb b/mmv1/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb index fc48c1388f5b..6d47ba2abd50 100644 --- a/mmv1/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb +++ b/mmv1/templates/terraform/examples/ha_vpn_gateway_gcp_to_gcp.tf.erb @@ -1,4 +1,3 @@ -# [START cloudvpn_ha_gcp_to_gcp] resource "google_compute_ha_vpn_gateway" "<%= ctx[:primary_resource_id] %>" { region = "us-central1" name = "<%= ctx[:vars]['ha_vpn_gateway1_name'] %>" @@ -178,4 +177,3 @@ resource "google_compute_router_peer" "router2_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router2_interface2.name } -# [END cloudvpn_ha_gcp_to_gcp] diff --git a/mmv1/templates/terraform/examples/instance_custom_hostname.tf.erb b/mmv1/templates/terraform/examples/instance_custom_hostname.tf.erb index d487410d10f8..2e6147d44e46 100644 --- a/mmv1/templates/terraform/examples/instance_custom_hostname.tf.erb +++ b/mmv1/templates/terraform/examples/instance_custom_hostname.tf.erb @@ -1,4 +1,3 @@ -# [START compute_custom_hostname_instance_create] resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['custom_hostname_instance_name'] %>" @@ -7,7 +6,6 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { # Set a custom hostname below hostname = "hashicorptest.com" - boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -21,4 +19,3 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { } } -# [END compute_custom_hostname_instance_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/instance_settings_basic.tf.erb b/mmv1/templates/terraform/examples/instance_settings_basic.tf.erb index 7dcc9218e779..c664f01a49f6 100644 --- a/mmv1/templates/terraform/examples/instance_settings_basic.tf.erb +++ b/mmv1/templates/terraform/examples/instance_settings_basic.tf.erb @@ -1,4 +1,3 @@ -# [START instance_settings_basic] resource "google_compute_instance_settings" "<%= ctx[:primary_resource_id] %>" { zone = "us-east7-b" @@ -9,4 +8,3 @@ resource "google_compute_instance_settings" "<%= ctx[:primary_resource_id] %>" { } } -# [END instance_settings_basic] diff --git a/mmv1/templates/terraform/examples/instance_virtual_display_enabled.tf.erb b/mmv1/templates/terraform/examples/instance_virtual_display_enabled.tf.erb index 6c74d280868d..6efb91a2e298 100644 --- a/mmv1/templates/terraform/examples/instance_virtual_display_enabled.tf.erb +++ b/mmv1/templates/terraform/examples/instance_virtual_display_enabled.tf.erb @@ -1,13 +1,10 @@ -# [START compute_instance_virtual_display_enabled] resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['instance_virtual_display'] %>" machine_type = "f1-micro" zone = "us-central1-c" - # Set the below to true to enable virtual display enable_display = true - boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -21,4 +18,3 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { } } -# [END compute_instance_virtual_display_enabled] diff --git a/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb b/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb index b938426cb79c..ceb616c816b2 100644 --- a/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb +++ b/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb @@ -1,6 +1,5 @@ # Internal HTTPS load balancer with HTTP-to-HTTPS redirect -# [START cloudloadbalancing_int_https_with_redirect] # VPC network resource "google_compute_network" "default" { @@ -283,4 +282,3 @@ resource "google_compute_region_url_map" "<%= ctx[:primary_resource_id] %>" { } } } -# [END cloudloadbalancing_int_https_with_redirect] diff --git a/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb b/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb index b315f3be309f..67090495776b 100644 --- a/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb @@ -1,6 +1,5 @@ # Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "<%= ctx[:vars]['ilb_network_name'] %>" @@ -188,4 +187,3 @@ resource "google_compute_instance" "vm-test" { } } } -# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb b/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb index 88584b4632ef..ef673e831077 100644 --- a/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb @@ -1,6 +1,5 @@ # Internal TCP/UDP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_tcp_udp_gce] # VPC resource "google_compute_network" "ilb_network" { name = "<%= ctx[:vars]['ilb_network_name'] %>" @@ -177,4 +176,3 @@ resource "google_compute_instance" "vm_test" { } } } -# [END cloudloadbalancing_int_tcp_udp_gce] diff --git a/mmv1/templates/terraform/examples/network_custom_firewall_enforcement_order.tf.erb b/mmv1/templates/terraform/examples/network_custom_firewall_enforcement_order.tf.erb index b4dfd75a6d46..f6525d486d24 100644 --- a/mmv1/templates/terraform/examples/network_custom_firewall_enforcement_order.tf.erb +++ b/mmv1/templates/terraform/examples/network_custom_firewall_enforcement_order.tf.erb @@ -1,8 +1,6 @@ -# [START vpc_auto_create] resource "google_compute_network" "<%= ctx[:primary_resource_id] %>" { project = "<%= ctx[:test_env_vars]["project"] %>" name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = true network_firewall_policy_enforcement_order = "BEFORE_CLASSIC_FIREWALL" } -# [END vpc_auto_create] diff --git a/mmv1/templates/terraform/examples/network_custom_mtu.tf.erb b/mmv1/templates/terraform/examples/network_custom_mtu.tf.erb index a9250f3e645b..ffcf20bd6e33 100644 --- a/mmv1/templates/terraform/examples/network_custom_mtu.tf.erb +++ b/mmv1/templates/terraform/examples/network_custom_mtu.tf.erb @@ -1,8 +1,6 @@ -# [START vpc_auto_create] resource "google_compute_network" "<%= ctx[:primary_resource_id] %>" { project = "<%= ctx[:test_env_vars]["project"] %>" name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = true mtu = 1460 } -# [END vpc_auto_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/network_management_connectivity_test_addresses.tf.erb b/mmv1/templates/terraform/examples/network_management_connectivity_test_addresses.tf.erb index 7bd05ca1f941..f67260dd4f94 100644 --- a/mmv1/templates/terraform/examples/network_management_connectivity_test_addresses.tf.erb +++ b/mmv1/templates/terraform/examples/network_management_connectivity_test_addresses.tf.erb @@ -1,4 +1,3 @@ -# [START networkmanagement_test_addresses] resource "google_network_management_connectivity_test" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['primary_resource_name'] %>" source { @@ -43,4 +42,3 @@ resource "google_compute_address" "dest-addr" { address = "10.0.43.43" region = "us-central1" } -# [END networkmanagement_test_addresses] diff --git a/mmv1/templates/terraform/examples/network_management_connectivity_test_instances.tf.erb b/mmv1/templates/terraform/examples/network_management_connectivity_test_instances.tf.erb index 190c4fc40b1e..e3a712045aac 100644 --- a/mmv1/templates/terraform/examples/network_management_connectivity_test_instances.tf.erb +++ b/mmv1/templates/terraform/examples/network_management_connectivity_test_instances.tf.erb @@ -1,4 +1,3 @@ -# [START networkmanagement_test_instances] resource "google_network_management_connectivity_test" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['primary_resource_name'] %>" source { @@ -57,4 +56,3 @@ data "google_compute_image" "debian_9" { family = "debian-11" project = "debian-cloud" } -# [END networkmanagement_test_instances] diff --git a/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb b/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb index 2fc0dff39194..9eb60a35f993 100644 --- a/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb @@ -1,5 +1,4 @@ # Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "<%= ctx[:vars]['ilb_network_name'] %>" @@ -188,9 +187,7 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } -# [END cloudloadbalancing_int_http_gce] -# [START lb_route_extension] resource "google_network_services_lb_route_extension" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['lb_route_extension_name'] %>" description = "my route extension" @@ -349,4 +346,3 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } -# [END lb_route_extension] diff --git a/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb b/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb index a2ff13074a89..899eaab56e66 100644 --- a/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb @@ -177,9 +177,7 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } -# [END cloudloadbalancing_int_http_gce] -# [START lb_traffic_extension] resource "google_network_services_lb_traffic_extension" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['lb_traffic_extension_name'] %>" description = "my traffic extension" @@ -334,4 +332,3 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } -# [END lb_traffic_extension] diff --git a/mmv1/templates/terraform/examples/private_service_connect_google_apis.tf.erb b/mmv1/templates/terraform/examples/private_service_connect_google_apis.tf.erb index a106bf40d440..33a0bfc10783 100644 --- a/mmv1/templates/terraform/examples/private_service_connect_google_apis.tf.erb +++ b/mmv1/templates/terraform/examples/private_service_connect_google_apis.tf.erb @@ -1,4 +1,3 @@ -# [START vpc_subnet_private_access] resource "google_compute_network" "network" { provider = google-beta project = "<%= ctx[:test_env_vars]['project'] %>" @@ -15,9 +14,7 @@ resource "google_compute_subnetwork" "vpc_subnetwork" { network = google_compute_network.network.id private_ip_google_access = true } -# [END vpc_subnet_private_access] -# [START compute_internal_ip_private_access] resource "google_compute_global_address" "default" { provider = google-beta project = google_compute_network.network.project @@ -27,9 +24,7 @@ resource "google_compute_global_address" "default" { network = google_compute_network.network.id address = "100.100.100.106" } -# [END compute_internal_ip_private_access] -# [START compute_forwarding_rule_private_access] resource "google_compute_global_forwarding_rule" "default" { provider = google-beta project = google_compute_network.network.project @@ -43,4 +38,3 @@ resource "google_compute_global_forwarding_rule" "default" { service_directory_region = "europe-west3" } } -# [END compute_forwarding_rule_private_access] diff --git a/mmv1/templates/terraform/examples/privateca_capool_all_fields.tf.erb b/mmv1/templates/terraform/examples/privateca_capool_all_fields.tf.erb index d1a82fdc3506..bbc6dd4ccb45 100644 --- a/mmv1/templates/terraform/examples/privateca_capool_all_fields.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_capool_all_fields.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_ca_pool_all_fields] resource "google_privateca_ca_pool" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["name"] %>" location = "us-central1" @@ -88,4 +87,3 @@ resource "google_privateca_ca_pool" "<%= ctx[:primary_resource_id] %>" { } } } -# [END privateca_create_ca_pool_all_fields] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_capool_basic.tf.erb b/mmv1/templates/terraform/examples/privateca_capool_basic.tf.erb index 6bda46ae48cf..5c64b66dc306 100644 --- a/mmv1/templates/terraform/examples/privateca_capool_basic.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_capool_basic.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_ca_pool] resource "google_privateca_ca_pool" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["name"] %>" location = "us-central1" @@ -11,4 +10,3 @@ resource "google_privateca_ca_pool" "<%= ctx[:primary_resource_id] %>" { foo = "bar" } } -# [END privateca_create_ca_pool] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_authority_basic.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_authority_basic.tf.erb index dc458d6dfebf..8a0bc4b6e59a 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_authority_basic.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_authority_basic.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_ca] resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] %>" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -47,4 +46,3 @@ resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] algorithm = "RSA_PKCS1_4096_SHA256" } } -# [END privateca_create_ca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb index 392e848712f3..c91014c663ed 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_ca_byo_key] resource "google_project_service_identity" "privateca_sa" { service = "privateca.googleapis.com" } @@ -69,4 +68,3 @@ resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] google_kms_crypto_key_iam_member.privateca_sa_keyuser_viewer, ] } -# [END privateca_create_ca_byo_key] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_authority_custom_ski.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_authority_custom_ski.tf.erb index 63e4c26f231f..b734f094c82e 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_authority_custom_ski.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_authority_custom_ski.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_ca] resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] %>" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -50,4 +49,3 @@ resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] cloud_kms_key_version = "<%= ctx[:vars]["kms_key_name"] %>/cryptoKeyVersions/1" } } -# [END privateca_create_ca] diff --git a/mmv1/templates/terraform/examples/privateca_certificate_authority_subordinate.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_authority_subordinate.tf.erb index ba88a4dd8958..e664f695ec17 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_authority_subordinate.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_authority_subordinate.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_subordinateca] resource "google_privateca_certificate_authority" "root-ca" { pool = "<%= ctx[:vars]["pool_name"] %>" certificate_authority_id = "<%= ctx[:vars]["certificate_authority_id"] %>-root" @@ -93,4 +92,3 @@ resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] } type = "SUBORDINATE" } -# [END privateca_create_subordinateca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_config.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_config.tf.erb index 2b25835dc3a5..a1195e873264 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_config.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_config.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_config] resource "google_privateca_ca_pool" "default" { location = "us-central1" @@ -99,4 +98,3 @@ resource "google_privateca_certificate" "<%= ctx[:primary_resource_id] %>" { } } } -# [END privateca_create_certificate_config] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_csr.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_csr.tf.erb index 6daf05c78929..49968b90cf73 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_csr.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_csr.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_csr] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "<%= ctx[:vars]["ca_pool_id"] %>" @@ -55,4 +54,3 @@ resource "google_privateca_certificate" "<%= ctx[:primary_resource_id] %>" { lifetime = "860s" pem_csr = file("test-fixtures/rsa_csr.pem") } -# [END privateca_create_certificate_csr] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_custom_ski.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_custom_ski.tf.erb index ccbf9a7f741c..df01c2b40205 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_custom_ski.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_custom_ski.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "<%= ctx[:vars]["ca_pool_id"] %>" @@ -90,4 +89,3 @@ resource "google_privateca_certificate" "<%= ctx[:primary_resource_id] %>" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } -# [END privateca_create_certificate] diff --git a/mmv1/templates/terraform/examples/privateca_certificate_no_authority.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_no_authority.tf.erb index dee0edf68bb5..73b985a7c551 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_no_authority.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_no_authority.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "<%= ctx[:vars]["ca_pool_id"] %>" @@ -87,4 +86,3 @@ resource "google_privateca_certificate" "<%= ctx[:primary_resource_id] %>" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } -# [END privateca_create_certificate] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_certificate_with_template.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_with_template.tf.erb index 7f675c722980..34f46e4deb0b 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_with_template.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_with_template.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_template] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "<%= ctx[:vars]["ca_pool_id"] %>" @@ -131,4 +130,3 @@ resource "google_privateca_certificate" "<%= ctx[:primary_resource_id] %>" { pem_csr = file("test-fixtures/rsa_csr.pem") certificate_template = google_privateca_certificate_template.default.id } -# [END privateca_create_certificate_template] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_quickstart.tf.erb b/mmv1/templates/terraform/examples/privateca_quickstart.tf.erb index 6d88934175ea..4fdd1504e837 100644 --- a/mmv1/templates/terraform/examples/privateca_quickstart.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_quickstart.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_quickstart] provider google{} provider tls{} @@ -94,4 +93,3 @@ resource "google_privateca_certificate" "default" { name = "<%= ctx[:vars]['my_certificate'] %>" pem_csr = tls_cert_request.example.cert_request_pem } -# [END privateca_quickstart] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/privateca_template_basic.tf.erb b/mmv1/templates/terraform/examples/privateca_template_basic.tf.erb index 7af7c93673b9..d9e29702a91c 100644 --- a/mmv1/templates/terraform/examples/privateca_template_basic.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_template_basic.tf.erb @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_template] resource "google_privateca_certificate_template" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]["name"] %>" location = "us-central1" @@ -71,4 +70,3 @@ resource "google_privateca_certificate_template" "<%= ctx[:primary_resource_id] label-one = "value-one" } } -# [END privateca_create_certificate_template] diff --git a/mmv1/templates/terraform/examples/region_autoscaler_basic.tf.erb b/mmv1/templates/terraform/examples/region_autoscaler_basic.tf.erb index e64ea1c12fb0..8550d9cfcb78 100644 --- a/mmv1/templates/terraform/examples/region_autoscaler_basic.tf.erb +++ b/mmv1/templates/terraform/examples/region_autoscaler_basic.tf.erb @@ -14,7 +14,6 @@ resource "google_compute_region_autoscaler" "<%= ctx[:primary_resource_id] %>" { } } -# [START compute_instance_template_basic] resource "google_compute_instance_template" "foobar" { name = "<%= ctx[:vars]['instance_template_name'] %>" machine_type = "e2-standard-4" @@ -46,7 +45,6 @@ resource "google_compute_instance_template" "foobar" { ] } } -# [END compute_instance_template_basic] resource "google_compute_target_pool" "foobar" { name = "<%= ctx[:vars]['target_pool_name'] %>" diff --git a/mmv1/templates/terraform/examples/region_target_tcp_proxy_basic.tf.erb b/mmv1/templates/terraform/examples/region_target_tcp_proxy_basic.tf.erb index a6cbec32ec73..0237febc1db7 100644 --- a/mmv1/templates/terraform/examples/region_target_tcp_proxy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/region_target_tcp_proxy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_region_target_tcp_proxy_basic] resource "google_compute_region_target_tcp_proxy" "default" { name = "<%= ctx[:vars]['region_target_tcp_proxy_name'] %>" region = "europe-west4" @@ -20,9 +19,7 @@ resource "google_compute_region_health_check" "default" { region = "europe-west4" timeout_sec = 1 check_interval_sec = 1 - tcp_health_check { port = "80" } } -# [END cloudloadbalancing_region_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/region_url_map_path_template_match.tf.erb b/mmv1/templates/terraform/examples/region_url_map_path_template_match.tf.erb index 5a7008e1b80b..6c3218949f96 100644 --- a/mmv1/templates/terraform/examples/region_url_map_path_template_match.tf.erb +++ b/mmv1/templates/terraform/examples/region_url_map_path_template_match.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_region_url_map" "<%= ctx[:primary_resource_id] %>" { region = "us-central1" @@ -87,4 +86,3 @@ resource "google_compute_region_health_check" "default" { } } -# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb b/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb index fc409598a4ae..59fb83b35491 100644 --- a/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb +++ b/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb @@ -1,14 +1,10 @@ -# [START cloudloadbalancing_rllxlb_example] -# [START cloudloadbalancing_vpc_network_rllxlb_example] resource "google_compute_network" "default" { name = "<%= ctx[:vars]['lb_network'] %>" auto_create_subnetworks = false routing_mode = "REGIONAL" } -# [END cloudloadbalancing_vpc_network_rllxlb_example] -# [START cloudloadbalancing_vpc_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "default" { name = "<%= ctx[:vars]['backend_subnet'] %>" ip_cidr_range = "10.1.2.0/24" @@ -18,9 +14,7 @@ resource "google_compute_subnetwork" "default" { region = "us-west1" stack_type = "IPV4_ONLY" } -# [END cloudloadbalancing_vpc_subnetwork_rllxlb_example] -# [START cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "proxy_only" { name = "<%= ctx[:vars]['proxy_only_subnet'] %>" ip_cidr_range = "10.129.0.0/23" @@ -29,9 +23,7 @@ resource "google_compute_subnetwork" "proxy_only" { region = "us-west1" role = "ACTIVE" } -# [END cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] -# [START cloudloadbalancing_health_firewall_rllxlb_example] resource "google_compute_firewall" "default" { name = "<%= ctx[:vars]['fw_allow_health_check'] %>" allow { @@ -43,9 +35,7 @@ resource "google_compute_firewall" "default" { source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] target_tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_health_firewall_rllxlb_example] -# [START cloudloadbalancing_proxy_firewall_rllxlb_example] resource "google_compute_firewall" "allow_proxy" { name = "<%= ctx[:vars]['fw_allow_proxies'] %>" allow { @@ -66,9 +56,7 @@ resource "google_compute_firewall" "allow_proxy" { source_ranges = ["10.129.0.0/23"] target_tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_proxy_firewall_rllxlb_example] -# [START cloudloadbalancing_instance_template_rllxlb_example] resource "google_compute_instance_template" "default" { name = "<%= ctx[:vars]['l7_xlb_backend_template'] %>" disk { @@ -106,9 +94,7 @@ resource "google_compute_instance_template" "default" { } tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_instance_template_rllxlb_example] -# [START cloudloadbalancing_instance_group_rllxlb_example] resource "google_compute_instance_group_manager" "default" { name = "<%= ctx[:vars]['l7_xlb_backend_example'] %>" zone = "us-west1-a" @@ -123,19 +109,15 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } -# [END cloudloadbalancing_instance_group_rllxlb_example] -# [START cloudloadbalancing_ip_address_rllxlb_example] resource "google_compute_address" "default" { name = "<%= ctx[:vars]['address_name'] %>" address_type = "EXTERNAL" network_tier = "STANDARD" region = "us-west1" } -# [END cloudloadbalancing_ip_address_rllxlb_example] -# [START cloudloadbalancing_health_check_rllxlb_example] resource "google_compute_region_health_check" "default" { name = "<%= ctx[:vars]['l7_xlb_basic_check'] %>" check_interval_sec = 5 @@ -149,9 +131,7 @@ resource "google_compute_region_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } -# [END cloudloadbalancing_health_check_rllxlb_example] -# [START cloudloadbalancing_backend_service_rllxlb_example] resource "google_compute_region_backend_service" "default" { name = "<%= ctx[:vars]['l7_xlb_backend_service'] %>" region = "us-west1" @@ -166,25 +146,19 @@ resource "google_compute_region_backend_service" "default" { capacity_scaler = 1.0 } } -# [END cloudloadbalancing_backend_service_rllxlb_example] -# [START cloudloadbalancing_url_map_rllxlb_example] resource "google_compute_region_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['regional_l7_xlb_map'] %>" region = "us-west1" default_service = google_compute_region_backend_service.default.id } -# [END cloudloadbalancing_url_map_rllxlb_example] -# [START cloudloadbalancing_target_http_proxy_rllxlb_example] resource "google_compute_region_target_http_proxy" "default" { name = "<%= ctx[:vars]['l7_xlb_proxy'] %>" region = "us-west1" url_map = google_compute_region_url_map.default.id } -# [END cloudloadbalancing_target_http_proxy_rllxlb_example] -# [START cloudloadbalancing_forwarding_rule_rllxlb_example] resource "google_compute_forwarding_rule" "default" { name = "l7-xlb-forwarding-rule" provider = google-beta @@ -199,6 +173,4 @@ resource "google_compute_forwarding_rule" "default" { ip_address = google_compute_address.default.address network_tier = "STANDARD" } -# [END cloudloadbalancing_forwarding_rule_rllxlb_example] -# [END cloudloadbalancing_rllxlb_example] diff --git a/mmv1/templates/terraform/examples/spot_instance_basic.tf.erb b/mmv1/templates/terraform/examples/spot_instance_basic.tf.erb index 3fb2cbf1fef3..5a8f04d91aad 100644 --- a/mmv1/templates/terraform/examples/spot_instance_basic.tf.erb +++ b/mmv1/templates/terraform/examples/spot_instance_basic.tf.erb @@ -1,4 +1,3 @@ -# [START compute_spot_instance_create] resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['spot_instance_name'] %>" @@ -10,7 +9,6 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { image = "debian-cloud/debian-11" } } - scheduling { preemptible = true automatic_restart = false @@ -26,4 +24,3 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { } } -# [END compute_spot_instance_create] diff --git a/mmv1/templates/terraform/examples/sql_database_basic.tf.erb b/mmv1/templates/terraform/examples/sql_database_basic.tf.erb index 05d6fee2afef..020d7b834b23 100644 --- a/mmv1/templates/terraform/examples/sql_database_basic.tf.erb +++ b/mmv1/templates/terraform/examples/sql_database_basic.tf.erb @@ -1,9 +1,7 @@ -# [START cloud_sql_database_create] resource "google_sql_database" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['database_name'] %>" instance = google_sql_database_instance.instance.name } -# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/sql_database_deletion_policy.tf.erb b/mmv1/templates/terraform/examples/sql_database_deletion_policy.tf.erb index 891977d08291..4609d0a29934 100644 --- a/mmv1/templates/terraform/examples/sql_database_deletion_policy.tf.erb +++ b/mmv1/templates/terraform/examples/sql_database_deletion_policy.tf.erb @@ -1,10 +1,8 @@ -# [START cloud_sql_database_create] resource "google_sql_database" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['database_name'] %>" instance = google_sql_database_instance.instance.name deletion_policy = "ABANDON" } -# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/sql_database_instance_my_sql.tf.erb b/mmv1/templates/terraform/examples/sql_database_instance_my_sql.tf.erb index be0da23b96b9..79c06a034774 100644 --- a/mmv1/templates/terraform/examples/sql_database_instance_my_sql.tf.erb +++ b/mmv1/templates/terraform/examples/sql_database_instance_my_sql.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_80_db_n1_s2] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['database_instance_name'] %>" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_80_db_n1_s2] -# [START cloud_sql_mysql_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -27,4 +24,3 @@ resource "google_sql_user" "user" { enable_password_verification = true } } -# [END cloud_sql_mysql_instance_user] diff --git a/mmv1/templates/terraform/examples/sql_database_instance_postgres.tf.erb b/mmv1/templates/terraform/examples/sql_database_instance_postgres.tf.erb index 376bda2eacec..82aa94afd586 100644 --- a/mmv1/templates/terraform/examples/sql_database_instance_postgres.tf.erb +++ b/mmv1/templates/terraform/examples/sql_database_instance_postgres.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_80_db_n1_s2] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['database_instance_name'] %>" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_80_db_n1_s2] -# [START cloud_sql_postgres_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -22,4 +19,3 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } -# [END cloud_sql_postgres_instance_user] diff --git a/mmv1/templates/terraform/examples/sql_database_instance_sqlserver.tf.erb b/mmv1/templates/terraform/examples/sql_database_instance_sqlserver.tf.erb index 7c38e07d6643..0be842147a82 100644 --- a/mmv1/templates/terraform/examples/sql_database_instance_sqlserver.tf.erb +++ b/mmv1/templates/terraform/examples/sql_database_instance_sqlserver.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_80_db_n1_s2] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['database_instance_name'] %>" region = "us-central1" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_80_db_n1_s2] -# [START cloud_sql_sqlserver_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -22,4 +19,3 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } -# [END cloud_sql_sqlserver_instance_user] diff --git a/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb b/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb index e0134a2caa28..83236aa87fea 100644 --- a/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb @@ -1,28 +1,21 @@ -# [START cloud_sql_instance_service_identity] resource "google_project_service_identity" "gcp_sa_cloud_sql" { provider = google-beta service = "sqladmin.googleapis.com" } -# [END cloud_sql_instance_service_identity] -# [START cloud_sql_instance_keyring] resource "google_kms_key_ring" "keyring" { provider = google-beta name = "<%= ctx[:vars]['keyring_name'] %>" location = "us-central1" } -# [END cloud_sql_instance_keyring] -# [START cloud_sql_instance_key] resource "google_kms_crypto_key" "key" { provider = google-beta name = "<%= ctx[:vars]['crypto_key_name'] %>" key_ring = google_kms_key_ring.keyring.id purpose = "ENCRYPT_DECRYPT" } -# [END cloud_sql_instance_key] -# [START cloud_sql_instance_crypto_key] resource "google_kms_crypto_key_iam_member" "crypto_key" { provider = google-beta crypto_key_id = google_kms_crypto_key.key.id @@ -30,9 +23,7 @@ resource "google_kms_crypto_key_iam_member" "crypto_key" { member = "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}" } -# [END cloud_sql_instance_crypto_key] -# [START cloud_sql_mysql_instance_cmek] resource "google_sql_database_instance" "mysql_instance_with_cmek" { name = "<%= ctx[:vars]['mysql_instance_cmek'] %>" provider = google-beta @@ -44,9 +35,7 @@ resource "google_sql_database_instance" "mysql_instance_with_cmek" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_cmek] -# [START cloud_sql_postgres_instance_cmek] resource "google_sql_database_instance" "postgres_instance_with_cmek" { name = "<%= ctx[:vars]['postgres_instance_cmek'] %>" provider = google-beta @@ -58,9 +47,7 @@ resource "google_sql_database_instance" "postgres_instance_with_cmek" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_cmek] -# [START cloud_sql_sqlserver_instance_cmek] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_cmek'] %>" provider = google-beta @@ -73,4 +60,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_cmek] diff --git a/mmv1/templates/terraform/examples/sql_instance_ha.tf.erb b/mmv1/templates/terraform/examples/sql_instance_ha.tf.erb index d73d43336199..9c068f2e207a 100644 --- a/mmv1/templates/terraform/examples/sql_instance_ha.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_ha.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_ha] resource "google_sql_database_instance" "mysql_instance_ha" { name = "<%= ctx[:vars]['mysql_instance_ha'] %>" region = "asia-northeast1" @@ -14,9 +13,7 @@ resource "google_sql_database_instance" "mysql_instance_ha" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_ha] -# [START cloud_sql_postgres_instance_ha] resource "google_sql_database_instance" "postgres_instance_ha" { name = "<%= ctx[:vars]['postgres_instance_ha'] %>" region = "us-central1" @@ -32,9 +29,7 @@ resource "google_sql_database_instance" "postgres_instance_ha" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_ha] -# [START cloud_sql_sqlserver_instance_ha] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_ha'] %>" region = "us-central1" @@ -50,4 +45,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_ha] diff --git a/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb b/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb index 038ab74a6abe..dba3989e5014 100644 --- a/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb @@ -6,7 +6,6 @@ resource "google_project_service_identity" "gcp_sa_cloud_sql" { service = "sqladmin.googleapis.com" } -# [START cloud_sql_instance_iam_conditions] data "google_iam_policy" "sql_iam_policy" { binding { role = "roles/cloudsql.client" @@ -25,7 +24,6 @@ resource "google_project_iam_policy" "project" { project = data.google_project.project.id policy_data = data.google_iam_policy.sql_iam_policy.policy_data } -# [END cloud_sql_instance_iam_conditions] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_iam_condition'] %>" diff --git a/mmv1/templates/terraform/examples/sql_instance_labels.tf.erb b/mmv1/templates/terraform/examples/sql_instance_labels.tf.erb index 9acfab3c11f0..ac5e13f7904b 100644 --- a/mmv1/templates/terraform/examples/sql_instance_labels.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_labels.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_labels] resource "google_sql_database_instance" "mysql_instance_labels" { name = "<%= ctx[:vars]['mysql_instance_labels'] %>" region = "us-central1" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "mysql_instance_labels" { } deletion_protection = "false" } -# [END cloud_sql_mysql_instance_labels] -# [START cloud_sql_postgres_instance_labels] resource "google_sql_database_instance" "postgres_instance_labels" { name = "<%= ctx[:vars]['postgres_instance_labels'] %>" region = "us-central1" @@ -28,9 +25,7 @@ resource "google_sql_database_instance" "postgres_instance_labels" { } deletion_protection = "false" } -# [END cloud_sql_postgres_instance_labels] -# [START cloud_sql_sqlserver_instance_labels] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_labels'] %>" region = "us-central1" @@ -45,4 +40,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "false" } -# [END cloud_sql_sqlserver_instance_labels] diff --git a/mmv1/templates/terraform/examples/sql_instance_pitr.tf.erb b/mmv1/templates/terraform/examples/sql_instance_pitr.tf.erb index 63be88d4a883..2c22326bc3a3 100644 --- a/mmv1/templates/terraform/examples/sql_instance_pitr.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_pitr.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_pitr] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_pitr'] %>" region = "asia-northeast1" @@ -14,9 +13,7 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_pitr] -# [START cloud_sql_postgres_instance_pitr] resource "google_sql_database_instance" "postgres_instance_pitr" { name = "<%= ctx[:vars]['postgres_instance__pitr'] %>" region = "us-central1" @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "postgres_instance_pitr" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_pitr] diff --git a/mmv1/templates/terraform/examples/sql_instance_ssl_cert.tf.erb b/mmv1/templates/terraform/examples/sql_instance_ssl_cert.tf.erb index e79b8aa20b7f..0baba74f59ef 100644 --- a/mmv1/templates/terraform/examples/sql_instance_ssl_cert.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_ssl_cert.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_require_ssl] resource "google_sql_database_instance" "mysql_instance" { name = "<%= ctx[:vars]['mysql_instance'] %>" region = "asia-northeast1" @@ -11,16 +10,12 @@ resource "google_sql_database_instance" "mysql_instance" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_require_ssl] -# [START cloud_sql_mysql_instance_ssl_cert] resource "google_sql_ssl_cert" "mysql_client_cert" { common_name = "mysql_common_name" instance = google_sql_database_instance.mysql_instance.name } -# [END cloud_sql_mysql_instance_ssl_cert] -# [START cloud_sql_postgres_instance_require_ssl] resource "google_sql_database_instance" "postgres_instance" { name = "<%= ctx[:vars]['postgres_instance'] %>" region = "asia-northeast1" @@ -33,16 +28,12 @@ resource "google_sql_database_instance" "postgres_instance" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_require_ssl] -# [START cloud_sql_postgres_instance_ssl_cert] resource "google_sql_ssl_cert" "postgres_client_cert" { common_name = "postgres_common_name" instance = google_sql_database_instance.postgres_instance.name } -# [END cloud_sql_postgres_instance_ssl_cert] -# [START cloud_sql_sqlserver_instance_require_ssl] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance'] %>" region = "asia-northeast1" @@ -56,4 +47,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_require_ssl] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_authorized_network.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_authorized_network.tf.erb index 651225195b4f..f4c14135fc8c 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_authorized_network.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_authorized_network.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_authorized_network] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_with_authorized_network'] %>" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_backup.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_backup.tf.erb index 2097850aea05..64e86572e7de 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_backup.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_backup.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_backup'] %>" region = "asia-northeast1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_backup] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_backup_location.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_backup_location.tf.erb index 5af702ec7c5f..fdb5e6140199 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_backup_location.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_backup_location.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup_location] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_backup_location'] %>" region = "asia-northeast1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_backup_retention.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_backup_retention.tf.erb index 7624c8951a6c..0dd93f7b6904 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_backup_retention.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_backup_retention.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup_retention] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_backup_retention'] %>" region = "asia-northeast1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_clone.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_clone.tf.erb index 42c716b5d470..0bc24a1121b4 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_clone.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_clone.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_source] resource "google_sql_database_instance" "source" { name = "<%= ctx[:vars]['mysql_instance_source_name'] %>" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_source] -# [START cloud_sql_mysql_instance_clone] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_instance_clone_name'] %>" region = "us-central1" @@ -20,4 +17,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_clone] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_flags.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_flags.tf.erb index 85171709f9ba..8dc76ac0b216 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_flags.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_flags.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_flags] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { database_version = "MYSQL_8_0" name = "<%= ctx[:vars]['mysql_instance'] %>" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_flags] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_public_ip.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_public_ip.tf.erb index c7ba81c1554e..a3088daa01be 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_public_ip.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_public_ip.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_public_ip] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { database_version = "MYSQL_5_7" name = "<%= ctx[:vars]['mysql_public_ip_instance_name'] %>" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_pvp.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_pvp.tf.erb index 48d224ff918a..43cf7746672c 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_pvp.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_pvp.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_pvp] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_pvp_instance_name'] %>" region = "asia-northeast1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_pvp] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/sql_mysql_instance_replica.tf.erb b/mmv1/templates/terraform/examples/sql_mysql_instance_replica.tf.erb index 8f91dafddf5a..f872d84a9670 100644 --- a/mmv1/templates/terraform/examples/sql_mysql_instance_replica.tf.erb +++ b/mmv1/templates/terraform/examples/sql_mysql_instance_replica.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_primary] resource "google_sql_database_instance" "primary" { name = "<%= ctx[:vars]['mysql_primary_instance_name'] %>" region = "europe-west4" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_primary] -# [START cloud_sql_mysql_instance_replica] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['mysql_replica_instance_name'] %>" master_instance_name = google_sql_database_instance.primary.name @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_mysql_instance_replica] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_authorized_network.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_authorized_network.tf.erb index da8a79773149..a470906e9409 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_authorized_network.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_authorized_network.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_authorized_network] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_instance_with_authorized_network'] %>" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_backup.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_backup.tf.erb index 75da7ac182af..e96215a2983d 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_backup.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_backup.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_instance_backup'] %>" region = "us-central1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_backup] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_backup_location.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_backup_location.tf.erb index 30dbb951d4ea..4e8002c5aaee 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_backup_location.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_backup_location.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup_location] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_instance_backup_location'] %>" region = "us-central1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_backup_retention.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_backup_retention.tf.erb index d9ba9f7fd261..c8779aeca19d 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_backup_retention.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_backup_retention.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup_retention] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_instance_backup_retention'] %>" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_clone.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_clone.tf.erb index 2e70ac4c8205..b0c2db262099 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_clone.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_clone.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_source] resource "google_sql_database_instance" "source" { name = "<%= ctx[:vars]['postgres_instance_source_name'] %>" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_source] -# [START cloud_sql_postgres_instance_clone] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_instance_clone_name'] %>" region = "us-central1" @@ -20,4 +17,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_clone] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_flags.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_flags.tf.erb index 3e46b25f1361..094ee609fe86 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_flags.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_flags.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_flags] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_instance'] %>" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_flags] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_public_ip.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_public_ip.tf.erb index 5662cce8d476..2c69e6fecb35 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_public_ip.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_public_ip.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_public_ip] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { database_version = "POSTGRES_14" name = "<%= ctx[:vars]['postgres_public_ip_instance_name'] %>" @@ -19,4 +18,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_pvp.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_pvp.tf.erb index da0f0282b169..5676c1aab0dc 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_pvp.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_pvp.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_pvp] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_pvp_instance_name'] %>" region = "asia-northeast1" @@ -17,4 +16,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_pvp] diff --git a/mmv1/templates/terraform/examples/sql_postgres_instance_replica.tf.erb b/mmv1/templates/terraform/examples/sql_postgres_instance_replica.tf.erb index 78a8eef896c5..0ec218dd5b3d 100644 --- a/mmv1/templates/terraform/examples/sql_postgres_instance_replica.tf.erb +++ b/mmv1/templates/terraform/examples/sql_postgres_instance_replica.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_primary] resource "google_sql_database_instance" "primary" { name = "<%= ctx[:vars]['postgres_primary_instance_name'] %>" region = "europe-west4" @@ -11,9 +10,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_primary] -# [START cloud_sql_postgres_instance_replica] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['postgres_replica_instance_name'] %>" master_instance_name = google_sql_database_instance.primary.name @@ -31,4 +28,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_postgres_instance_replica] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_authorized_network.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_authorized_network.tf.erb index c73b08128029..3b94e9f543fb 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_authorized_network.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_authorized_network.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_authorized_network] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_with_authorized_network'] %>" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup.tf.erb index 1a840e20e729..dde6337eabb1 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_backup'] %>" region = "us-central1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_backup] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_location.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_location.tf.erb index 803fea5ac908..711aeb9ec41c 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_location.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_location.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup_location] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_backup_location'] %>" region = "us-central1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_retention.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_retention.tf.erb index 887f3f2bdf41..26ab7ac728fb 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_retention.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_backup_retention.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup_retention] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_backup_retention'] %>" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_clone.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_clone.tf.erb index 74cd4076064a..a233977ddfad 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_clone.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_clone.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_source] resource "google_sql_database_instance" "source" { name = "<%= ctx[:vars]['sqlserver_instance_source_name'] %>" region = "us-central1" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_source] -# [START cloud_sql_sqlserver_instance_clone] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance_clone_name'] %>" region = "us-central1" @@ -22,4 +19,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_clone] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_flags.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_flags.tf.erb index af661c675d6a..b3c455736e07 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_flags.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_flags.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_flags] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_instance'] %>" region = "us-central1" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_flags] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_public_ip.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_public_ip.tf.erb index 5c79db76c210..1404f6245e54 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_public_ip.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_public_ip.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_public_ip] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_public_ip_instance_name'] %>" region = "europe-west4" @@ -20,4 +19,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_instance_replica.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_instance_replica.tf.erb index e4a115a629d0..623bbac50c39 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_instance_replica.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_instance_replica.tf.erb @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_primary] resource "google_sql_database_instance" "primary" { name = "<%= ctx[:vars]['sqlserver_primary_instance_name'] %>" region = "europe-west4" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_primary] -# [START cloud_sql_sqlserver_instance_replica] resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['sqlserver_replica_instance_name'] %>" master_instance_name = google_sql_database_instance.primary.name @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "<%= ctx[:primary_resource_id] %>" { } deletion_protection = "<%= ctx[:vars]['deletion_protection'] %>" } -# [END cloud_sql_sqlserver_instance_replica] diff --git a/mmv1/templates/terraform/examples/sql_sqlserver_vm_instance.tf.erb b/mmv1/templates/terraform/examples/sql_sqlserver_vm_instance.tf.erb index 8f5f7dcccfd5..2b5d7e1a8a04 100644 --- a/mmv1/templates/terraform/examples/sql_sqlserver_vm_instance.tf.erb +++ b/mmv1/templates/terraform/examples/sql_sqlserver_vm_instance.tf.erb @@ -14,7 +14,6 @@ resource "google_compute_subnetwork" "default" { network = google_compute_network.default.id } -# [START cloud_sql_sqlserver_vm_instance] resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { provider = google-beta name = "<%= ctx[:vars]['sqlserver_vm'] %>" @@ -39,9 +38,7 @@ resource "google_compute_instance" "<%= ctx[:primary_resource_id] %>" { subnetwork = google_compute_subnetwork.default.id } } -# [END cloud_sql_sqlserver_vm_instance] -# [START cloud_sql_sqlserver_vm_firewall_rule] resource "google_compute_firewall" "sql_server_1433" { provider = google-beta name = "<%= ctx[:vars]['sql_server_1433_3'] %>" @@ -55,4 +52,3 @@ resource "google_compute_firewall" "sql_server_1433" { priority = 1000 source_ranges = ["0.0.0.0/0"] } -# [END cloud_sql_sqlserver_vm_firewall_rule] diff --git a/mmv1/templates/terraform/examples/storage_hmac_key.tf.erb b/mmv1/templates/terraform/examples/storage_hmac_key.tf.erb index fd307ef06b5a..22c86624cb86 100644 --- a/mmv1/templates/terraform/examples/storage_hmac_key.tf.erb +++ b/mmv1/templates/terraform/examples/storage_hmac_key.tf.erb @@ -1,4 +1,3 @@ -# [START storage_hmac_key] # Create a new service account resource "google_service_account" "service_account" { account_id = "<%= ctx[:vars]['account_id'] %>" @@ -8,4 +7,3 @@ resource "google_service_account" "service_account" { resource "google_storage_hmac_key" "<%= ctx[:primary_resource_id] %>" { service_account_email = google_service_account.service_account.email } -# [END storage_hmac_key] diff --git a/mmv1/templates/terraform/examples/storage_make_data_public.tf.erb b/mmv1/templates/terraform/examples/storage_make_data_public.tf.erb index 42553e4ad582..04a34e657955 100644 --- a/mmv1/templates/terraform/examples/storage_make_data_public.tf.erb +++ b/mmv1/templates/terraform/examples/storage_make_data_public.tf.erb @@ -5,7 +5,6 @@ resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { uniform_bucket_level_access = true } -# [START storage_make_data_public] # Make bucket public resource "google_storage_bucket_iam_member" "member" { provider = google-beta @@ -13,4 +12,3 @@ resource "google_storage_bucket_iam_member" "member" { role = "roles/storage.objectViewer" member = "allUsers" } -# [END storage_make_data_public] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/storage_new_bucket.tf.erb b/mmv1/templates/terraform/examples/storage_new_bucket.tf.erb index b789db57a4e6..9b8312e8c769 100644 --- a/mmv1/templates/terraform/examples/storage_new_bucket.tf.erb +++ b/mmv1/templates/terraform/examples/storage_new_bucket.tf.erb @@ -1,4 +1,3 @@ -# [START storage_create_new_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { @@ -8,9 +7,7 @@ resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { uniform_bucket_level_access = true } -# [END storage_create_new_bucket_tf] -# [START storage_upload_object_tf] # Upload files # Discussion about using tf to upload a large number of objects # https://stackoverflow.com/questions/68455132/terraform-copy-multiple-files-to-bucket-at-the-same-time-bucket-creation @@ -24,9 +21,7 @@ resource "google_storage_bucket_object" "default" { content_type = "text/plain" bucket = google_storage_bucket.static.id } -# [END storage_upload_object_tf] -# [START storage_get_object_metadata_tf] # Get object metadata data "google_storage_bucket_object" "default" { name = google_storage_bucket_object.default.name @@ -36,9 +31,7 @@ data "google_storage_bucket_object" "default" { output "object_metadata" { value = data.google_storage_bucket_object.default } -# [END storage_get_object_metadata_tf] -# [START storage_get_bucket_metadata_tf] # Get bucket metadata data "google_storage_bucket" "default" { name = google_storage_bucket.static.id @@ -47,5 +40,4 @@ data "google_storage_bucket" "default" { output "bucket_metadata" { value = data.google_storage_bucket.default } -# [END storage_get_bucket_metadata_tf] diff --git a/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb b/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb index 908b65b93e4c..82cbed0a9167 100644 --- a/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb +++ b/mmv1/templates/terraform/examples/storage_object_lifecycle_setting.tf.erb @@ -1,4 +1,3 @@ -# [START storage_create_lifecycle_setting_tf] resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { provider = google-beta name = "<%= ctx[:vars]['example_bucket'] %>" @@ -14,4 +13,3 @@ resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { } } } -# [END storage_create_lifecycle_setting_tf] diff --git a/mmv1/templates/terraform/examples/storage_pubsub_notifications.tf.erb b/mmv1/templates/terraform/examples/storage_pubsub_notifications.tf.erb index fce786c6f763..f1a5943a9de2 100644 --- a/mmv1/templates/terraform/examples/storage_pubsub_notifications.tf.erb +++ b/mmv1/templates/terraform/examples/storage_pubsub_notifications.tf.erb @@ -1,4 +1,3 @@ -# [START storage_create_pubsub_notifications_tf] // Create a Pub/Sub notification. resource "google_storage_notification" "notification" { provider = google-beta @@ -33,4 +32,3 @@ resource "google_pubsub_topic" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['your_topic_name'] %>" provider = google-beta } -# [END storage_create_pubsub_notifications_tf] diff --git a/mmv1/templates/terraform/examples/storage_static_website.tf.erb b/mmv1/templates/terraform/examples/storage_static_website.tf.erb index 1150c4865d2b..d8e3dfa10007 100644 --- a/mmv1/templates/terraform/examples/storage_static_website.tf.erb +++ b/mmv1/templates/terraform/examples/storage_static_website.tf.erb @@ -1,4 +1,3 @@ -# [START storage_static_website_create_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage and settings for main_page_suffix and not_found_page resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { @@ -10,18 +9,14 @@ resource "google_storage_bucket" "<%= ctx[:primary_resource_id] %>" { not_found_page = "<%= ctx[:vars]['main_page_suffix'] %>" } } -# [END storage_static_website_create_bucket_tf] -# [START storage_static_website_make_bucket_public_tf] # Make bucket public by granting allUsers READER access resource "google_storage_bucket_access_control" "public_rule" { bucket = google_storage_bucket.static_website.id role = "READER" entity = "allUsers" } -# [END storage_static_website_make_bucket_public_tf] -# [START storage_static_website_upload_files_tf] # Upload a simple index.html page to the bucket resource "google_storage_bucket_object" "indexpage" { name = "<%= ctx[:vars]['main_page_suffix'] %>" @@ -37,4 +32,3 @@ resource "google_storage_bucket_object" "errorpage" { content_type = "text/html" bucket = google_storage_bucket.static_website.id } -# [END storage_static_website_upload_files_tf] diff --git a/mmv1/templates/terraform/examples/target_grpc_proxy_basic.tf.erb b/mmv1/templates/terraform/examples/target_grpc_proxy_basic.tf.erb index 228988c6e871..4a1bc8552af3 100644 --- a/mmv1/templates/terraform/examples/target_grpc_proxy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/target_grpc_proxy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_grpc_proxy_basic] resource "google_compute_target_grpc_proxy" "default" { name = "<%= ctx[:vars]['proxy_name'] %>" url_map = google_compute_url_map.urlmap.id @@ -86,4 +85,3 @@ resource "google_compute_health_check" "default" { grpc_service_name = "testservice" } } -# [END cloudloadbalancing_target_grpc_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/target_http_proxy_basic.tf.erb b/mmv1/templates/terraform/examples/target_http_proxy_basic.tf.erb index 8e947a9477f1..a61871f01920 100644 --- a/mmv1/templates/terraform/examples/target_http_proxy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/target_http_proxy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_basic] resource "google_compute_target_http_proxy" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['target_http_proxy_name'] %>" url_map = google_compute_url_map.default.id @@ -39,4 +38,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_http_proxy_basic] diff --git a/mmv1/templates/terraform/examples/target_http_proxy_http_keep_alive_timeout.tf.erb b/mmv1/templates/terraform/examples/target_http_proxy_http_keep_alive_timeout.tf.erb index de469704cf0d..f4d1ea5cfb08 100644 --- a/mmv1/templates/terraform/examples/target_http_proxy_http_keep_alive_timeout.tf.erb +++ b/mmv1/templates/terraform/examples/target_http_proxy_http_keep_alive_timeout.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] resource "google_compute_target_http_proxy" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['target_http_proxy_name'] %>" http_keep_alive_timeout_sec = 610 @@ -41,4 +40,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/target_http_proxy_https_redirect.tf.erb b/mmv1/templates/terraform/examples/target_http_proxy_https_redirect.tf.erb index fe8c150c8767..a23beb01b55a 100644 --- a/mmv1/templates/terraform/examples/target_http_proxy_https_redirect.tf.erb +++ b/mmv1/templates/terraform/examples/target_http_proxy_https_redirect.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_https_redirect] resource "google_compute_target_http_proxy" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['target_http_proxy_name'] %>" url_map = google_compute_url_map.default.id @@ -11,4 +10,3 @@ resource "google_compute_url_map" "default" { strip_query = false } } -# [END cloudloadbalancing_target_http_proxy_https_redirect] diff --git a/mmv1/templates/terraform/examples/target_https_proxy_basic.tf.erb b/mmv1/templates/terraform/examples/target_https_proxy_basic.tf.erb index ed26be823ec9..e6481bb07bff 100644 --- a/mmv1/templates/terraform/examples/target_https_proxy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/target_https_proxy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_basic] resource "google_compute_target_https_proxy" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['target_https_proxy_name'] %>" url_map = google_compute_url_map.default.id @@ -48,4 +47,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_basic] diff --git a/mmv1/templates/terraform/examples/target_https_proxy_http_keep_alive_timeout.tf.erb b/mmv1/templates/terraform/examples/target_https_proxy_http_keep_alive_timeout.tf.erb index 12a281e6e3af..f4afae77e3b9 100644 --- a/mmv1/templates/terraform/examples/target_https_proxy_http_keep_alive_timeout.tf.erb +++ b/mmv1/templates/terraform/examples/target_https_proxy_http_keep_alive_timeout.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] resource "google_compute_target_https_proxy" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['target_https_proxy_name'] %>" http_keep_alive_timeout_sec = 610 @@ -50,4 +49,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/target_https_proxy_mtls.tf.erb b/mmv1/templates/terraform/examples/target_https_proxy_mtls.tf.erb index c77f12732a4d..62ec99bc0832 100644 --- a/mmv1/templates/terraform/examples/target_https_proxy_mtls.tf.erb +++ b/mmv1/templates/terraform/examples/target_https_proxy_mtls.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_mtls] data "google_project" "project" { provider = google-beta } @@ -90,4 +89,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_mtls] diff --git a/mmv1/templates/terraform/examples/target_ssl_proxy_basic.tf.erb b/mmv1/templates/terraform/examples/target_ssl_proxy_basic.tf.erb index 4a38bad37ac3..8bbfcf259643 100644 --- a/mmv1/templates/terraform/examples/target_ssl_proxy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/target_ssl_proxy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_ssl_proxy_basic] resource "google_compute_target_ssl_proxy" "default" { name = "<%= ctx[:vars]['target_ssl_proxy_name'] %>" backend_service = google_compute_backend_service.default.id @@ -25,4 +24,3 @@ resource "google_compute_health_check" "default" { port = "443" } } -# [END cloudloadbalancing_target_ssl_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/target_tcp_proxy_basic.tf.erb b/mmv1/templates/terraform/examples/target_tcp_proxy_basic.tf.erb index 5d1bd8b4eb8b..69ad592fdeb0 100644 --- a/mmv1/templates/terraform/examples/target_tcp_proxy_basic.tf.erb +++ b/mmv1/templates/terraform/examples/target_tcp_proxy_basic.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_tcp_proxy_basic] resource "google_compute_target_tcp_proxy" "default" { name = "<%= ctx[:vars]['target_tcp_proxy_name'] %>" backend_service = google_compute_backend_service.default.id @@ -21,4 +20,3 @@ resource "google_compute_health_check" "default" { port = "443" } } -# [END cloudloadbalancing_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/url_map_bucket_and_service.tf.erb b/mmv1/templates/terraform/examples/url_map_bucket_and_service.tf.erb index 3d1bad085931..d6aeb563f47b 100644 --- a/mmv1/templates/terraform/examples/url_map_bucket_and_service.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_bucket_and_service.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_bucket_and_service] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" @@ -73,4 +72,3 @@ resource "google_storage_bucket" "static" { name = "<%= ctx[:vars]['storage_bucket_name'] %>" location = "US" } -# [END cloudloadbalancing_url_map_bucket_and_service] diff --git a/mmv1/templates/terraform/examples/url_map_header_based_routing.tf.erb b/mmv1/templates/terraform/examples/url_map_header_based_routing.tf.erb index 791507a5d5b4..eea9d8b7e2f6 100644 --- a/mmv1/templates/terraform/examples/url_map_header_based_routing.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_header_based_routing.tf.erb @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_header_based_routing] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "header-based routing example" @@ -73,4 +72,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END trafficdirector_url_map_header_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/url_map_parameter_based_routing.tf.erb b/mmv1/templates/terraform/examples/url_map_parameter_based_routing.tf.erb index 812a488c320c..ee88140a1561 100644 --- a/mmv1/templates/terraform/examples/url_map_parameter_based_routing.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_parameter_based_routing.tf.erb @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_parameter_based_routing] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "parameter-based routing example" @@ -73,4 +72,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END trafficdirector_url_map_parameter_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/url_map_path_template_match.tf.erb b/mmv1/templates/terraform/examples/url_map_path_template_match.tf.erb index 136d3ed5cc4c..612f669aaae4 100644 --- a/mmv1/templates/terraform/examples/url_map_path_template_match.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_path_template_match.tf.erb @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" @@ -74,4 +73,3 @@ resource "google_storage_bucket" "static" { name = "<%= ctx[:vars]['storage_bucket_name'] %>" location = "US" } -# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/url_map_traffic_director_path.tf.erb b/mmv1/templates/terraform/examples/url_map_traffic_director_path.tf.erb index 810aa8a04398..71a326b5614b 100644 --- a/mmv1/templates/terraform/examples/url_map_traffic_director_path.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_traffic_director_path.tf.erb @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_path] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" @@ -102,4 +101,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_path] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/url_map_traffic_director_path_partial.tf.erb b/mmv1/templates/terraform/examples/url_map_traffic_director_path_partial.tf.erb index 7891954cfafc..aea9bc2f8333 100644 --- a/mmv1/templates/terraform/examples/url_map_traffic_director_path_partial.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_traffic_director_path_partial.tf.erb @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_path_partial] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" @@ -72,4 +71,3 @@ resource "google_compute_health_check" "default" { } } -# [END trafficdirector_url_map_traffic_director_path_partial] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/url_map_traffic_director_route.tf.erb b/mmv1/templates/terraform/examples/url_map_traffic_director_route.tf.erb index 0c371c226dd2..e959621a45e1 100644 --- a/mmv1/templates/terraform/examples/url_map_traffic_director_route.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_traffic_director_route.tf.erb @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_route] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" @@ -82,4 +81,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_route] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/url_map_traffic_director_route_partial.tf.erb b/mmv1/templates/terraform/examples/url_map_traffic_director_route_partial.tf.erb index 5daf5a006078..c0c8777ee9f6 100644 --- a/mmv1/templates/terraform/examples/url_map_traffic_director_route_partial.tf.erb +++ b/mmv1/templates/terraform/examples/url_map_traffic_director_route_partial.tf.erb @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_route_partial] resource "google_compute_url_map" "<%= ctx[:primary_resource_id] %>" { name = "<%= ctx[:vars]['url_map_name'] %>" description = "a description" @@ -53,4 +52,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_route_partial] \ No newline at end of file From cfd3149c30968c2139d18252f2089921d75fcede Mon Sep 17 00:00:00 2001 From: arungantasala <139814434+arungantasala@users.noreply.github.com> Date: Mon, 1 Jul 2024 14:41:18 -0700 Subject: [PATCH 262/356] Promote max_run_duration, on_instance_stop_action fields on instance, instance template resources to GA. (#11041) --- .../compute/compute_instance_helpers.go.erb | 28 +++++++------------ .../compute/resource_compute_instance.go.erb | 10 ++----- .../resource_compute_instance_template.go.erb | 6 ++-- ...urce_compute_instance_template_test.go.erb | 8 ------ .../resource_compute_instance_test.go.erb | 8 ------ ...ce_compute_region_instance_template.go.erb | 2 +- ...mpute_region_instance_template_test.go.erb | 9 ------ .../docs/r/compute_instance.html.markdown | 6 ++-- .../r/compute_instance_template.html.markdown | 6 ++-- 9 files changed, 23 insertions(+), 60 deletions(-) diff --git a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb index b0a01710a046..3a658fb6e76a 100644 --- a/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb +++ b/mmv1/third_party/terraform/services/compute/compute_instance_helpers.go.erb @@ -134,7 +134,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.InstanceTerminationAction = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "InstanceTerminationAction") } -<% unless version == 'ga' -%> if v, ok := original["max_run_duration"]; ok { transformedMaxRunDuration, err := expandComputeMaxRunDuration(v) if err != nil { @@ -143,9 +142,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.MaxRunDuration = transformedMaxRunDuration scheduling.ForceSendFields = append(scheduling.ForceSendFields, "MaxRunDuration") } - if v, ok := original["maintenance_interval"]; ok { - scheduling.MaintenanceInterval = v.(string) - } if v, ok := original["on_instance_stop_action"]; ok { transformedOnInstanceStopAction, err := expandComputeOnInstanceStopAction(v) @@ -155,6 +151,10 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.OnInstanceStopAction = transformedOnInstanceStopAction scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnInstanceStopAction") } +<% unless version == 'ga' -%> + if v, ok := original["maintenance_interval"]; ok { + scheduling.MaintenanceInterval = v.(string) + } <% end -%> if v, ok := original["local_ssd_recovery_timeout"]; ok { transformedLocalSsdRecoveryTimeout, err := expandComputeLocalSsdRecoveryTimeout(v) @@ -167,7 +167,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { return scheduling, nil } -<% unless version == 'ga' -%> func expandComputeMaxRunDuration(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) duration := compute.Duration{} @@ -219,7 +218,6 @@ func expandComputeOnInstanceStopAction(v interface{}) (*compute.SchedulingOnInst return &onInstanceStopAction, nil } -<% end -%> func expandComputeLocalSsdRecoveryTimeout(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) @@ -267,16 +265,18 @@ func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { schedulingMap["automatic_restart"] = *resp.AutomaticRestart } -<% unless version == 'ga' -%> if resp.MaxRunDuration != nil { schedulingMap["max_run_duration"] = flattenComputeMaxRunDuration(resp.MaxRunDuration) } - if resp.MaintenanceInterval != "" { - schedulingMap["maintenance_interval"] = resp.MaintenanceInterval - } + if resp.OnInstanceStopAction != nil { schedulingMap["on_instance_stop_action"] = flattenOnInstanceStopAction(resp.OnInstanceStopAction) } + +<% unless version == 'ga' -%> + if resp.MaintenanceInterval != "" { + schedulingMap["maintenance_interval"] = resp.MaintenanceInterval + } <% end -%> if resp.LocalSsdRecoveryTimeout != nil { @@ -296,7 +296,6 @@ func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { return []map[string]interface{}{schedulingMap} } -<% unless version == 'ga' -%> func flattenComputeMaxRunDuration(v *compute.Duration) []interface{} { if v == nil { return nil @@ -315,7 +314,6 @@ func flattenOnInstanceStopAction(v *compute.SchedulingOnInstanceStopAction) []in transformed["discard_local_ssd"] = v.DiscardLocalSsd return []interface{}{transformed} } -<% end -%> func flattenComputeLocalSsdRecoveryTimeout(v *compute.Duration) []interface{} { if v == nil { @@ -688,11 +686,7 @@ func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { oScheduling := o.([]interface{})[0].(map[string]interface{}) newScheduling := n.([]interface{})[0].(map[string]interface{}) -<% unless version == 'ga' -%> return hasNodeAffinitiesChanged(oScheduling, newScheduling) || hasMaxRunDurationChanged(oScheduling, newScheduling) -<% else -%> - return hasNodeAffinitiesChanged(oScheduling, newScheduling) -<% end -%> } // Terraform doesn't correctly calculate changes on schema.Set, so we do it manually @@ -734,7 +728,6 @@ func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { return false } -<% unless version == 'ga' -%> func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { oMrd := oScheduling["max_run_duration"].([]interface{}) nMrd := nScheduling["max_run_duration"].([]interface{}) @@ -758,7 +751,6 @@ func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) b return false } -<% end -%> func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) bool { oldNAs := oScheduling["node_affinities"].(*schema.Set).List() diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb index 68f26d8eaa4c..3f97ff24517c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance.go.erb @@ -83,10 +83,10 @@ var ( "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", "scheduling.0.instance_termination_action", -<% unless version == 'ga' -%> "scheduling.0.max_run_duration", - "scheduling.0.maintenance_interval", "scheduling.0.on_instance_stop_action", +<% unless version == 'ga' -%> + "scheduling.0.maintenance_interval", <% end -%> "scheduling.0.local_ssd_recovery_timeout", } @@ -826,7 +826,6 @@ func ResourceComputeInstance() *schema.Resource { AtLeastOneOf: schedulingKeys, Description: `Specifies the action GCE should take when SPOT VM is preempted.`, }, -<% unless version == 'ga' -%> "max_run_duration" : { Type: schema.TypeList, Optional: true, @@ -872,6 +871,7 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, +<% unless version == 'ga' -%> "maintenance_interval": { Type: schema.TypeString, Optional: true, @@ -2433,11 +2433,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err desiredStatus := d.Get("desired_status").(string) if statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED" && !d.Get("allow_stopping_for_update").(bool) { -<% unless version == 'ga' -%> return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities, scheduling.max_run_duration " + -<% else -%> - return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities " + -<% end -%> "or network_interface.[#d].(network/subnetwork/subnetwork_project) or advanced_machine_features on a started instance requires stopping it. " + "To acknowledge this, please set allow_stopping_for_update = true in your config. " + "You can also stop it by setting desired_status = \"TERMINATED\", but the instance will not be restarted after the update.") diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb index 29496540192c..bda56492c283 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template.go.erb @@ -35,10 +35,10 @@ var ( "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", "scheduling.0.instance_termination_action", -<% unless version == 'ga' -%> "scheduling.0.max_run_duration", - "scheduling.0.maintenance_interval", "scheduling.0.on_instance_stop_action", +<% unless version == 'ga' -%> + "scheduling.0.maintenance_interval", <% end -%> "scheduling.0.local_ssd_recovery_timeout", } @@ -715,7 +715,6 @@ Google Cloud KMS.`, AtLeastOneOf: schedulingInstTemplateKeys, Description: `Specifies the action GCE should take when SPOT VM is preempted.`, }, -<% unless version == 'ga' -%> "max_run_duration" : { Type: schema.TypeList, Optional: true, @@ -761,6 +760,7 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, +<% unless version == 'ga' -%> "maintenance_interval" : { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index f1a511161174..f66aee66a6a9 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -1076,7 +1076,6 @@ func TestAccComputeInstanceTemplate_spot(t *testing.T) { }) } -<% unless version == 'ga' -%> func TestAccComputeInstanceTemplate_spot_maxRunDuration_deleteTerminationAction(t *testing.T) { t.Parallel() @@ -1219,7 +1218,6 @@ func TestAccComputeInstanceTemplate_spot_maxRunDuration(t *testing.T) { }, }) } -<% end -%> func TestAccComputeInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -1770,7 +1768,6 @@ func testAccCheckComputeInstanceTemplateInstanceTerminationAction(instanceTempla } -<% unless version == 'ga' -%> func testAccCheckComputeInstanceTemplateMaxRunDuration(instanceTemplate *compute.InstanceTemplate, instance_max_run_duration_want compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.MaxRunDuration, instance_max_run_duration_want) { @@ -1780,7 +1777,6 @@ func testAccCheckComputeInstanceTemplateMaxRunDuration(instanceTemplate *compute return nil } } -<% end -%> func testAccCheckComputeInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate *compute.InstanceTemplate, instance_local_ssd_recovery_timeout_want compute.Duration) resource.TestCheckFunc { @@ -3846,12 +3842,10 @@ resource "google_compute_instance_template" "foobar" { automatic_restart = false provisioning_model = "SPOT" instance_termination_action = "%s" -<% unless version == 'ga' -%> max_run_duration { nanos = 123 seconds = 60 } -<% end -%> } @@ -3866,7 +3860,6 @@ resource "google_compute_instance_template" "foobar" { `, suffix, instanceTerminationAction) } -<% unless version == 'ga' -%> func testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3914,7 +3907,6 @@ resource "google_compute_instance_template" "foobar" { } `, suffix) } -<% end -%> func testAccComputeInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 682c7c6e8ace..2f8eadb0c39d 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -2636,7 +2636,6 @@ func TestAccComputeInstance_spotVM_update(t *testing.T) { }) } -<% unless version == 'ga' -%> func TestAccComputeInstance_maxRunDuration_update(t *testing.T) { t.Parallel() @@ -2824,7 +2823,6 @@ func TestAccComputeInstance_spotVM_maxRunDuration_update(t *testing.T) { }, }) } -<% end -%> func TestAccComputeInstance_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -3780,7 +3778,6 @@ func testAccCheckComputeResourcePolicy(instance *compute.Instance, scheduleName } } -<% unless version == 'ga' -%> func testAccCheckComputeInstanceMaxRunDuration(instance *compute.Instance, instanceMaxRunDurationWant compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { if instance == nil { @@ -3797,7 +3794,6 @@ func testAccCheckComputeInstanceMaxRunDuration(instance *compute.Instance, insta return nil } } -<% end -%> func testAccCheckComputeInstanceLocalSsdRecoveryTimeout(instance *compute.Instance, instanceLocalSsdRecoveryTiemoutWant compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -8345,7 +8341,6 @@ resource "google_compute_instance" "foobar" { `, instance) } -<% unless version == 'ga' -%> func testAccComputeInstance_standardVM_maxRunDuration(instance string, instanceTerminationAction string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -8456,7 +8451,6 @@ resource "google_compute_instance" "foobar" { } `, instance, instanceTerminationAction) } -<% end -%> func testAccComputeInstance_spotVM_maxRunDuration(instance string, instanceTerminationAction string) string { @@ -8486,12 +8480,10 @@ resource "google_compute_instance" "foobar" { automatic_restart = false preemptible = true instance_termination_action = "%s" -<% unless version == 'ga' -%> max_run_duration { nanos = 123 seconds = 60 } -<% end -%> } } `, instance, instanceTerminationAction) diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb index 99de89c3dd39..00639a1e6ba8 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template.go.erb @@ -675,7 +675,6 @@ Google Cloud KMS.`, AtLeastOneOf: schedulingInstTemplateKeys, Description: `Specifies the action GCE should take when SPOT VM is preempted.`, }, -<% unless version == 'ga' -%> "max_run_duration" : { Type: schema.TypeList, Optional: true, @@ -721,6 +720,7 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, +<% unless version == 'ga' -%> "maintenance_interval" : { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index 2947f3ccb095..4d79d38f47db 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -1025,7 +1025,6 @@ func TestAccComputeRegionInstanceTemplate_spot(t *testing.T) { }) } -<% unless version == 'ga' -%> func TestAccComputeRegionInstanceTemplate_spot_maxRunDuration(t *testing.T) { t.Parallel() @@ -1093,7 +1092,6 @@ func TestAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(t }, }) } -<% end -%> func TestAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -1485,7 +1483,6 @@ func testAccCheckComputeRegionInstanceTemplateInstanceTerminationAction(instance } } -<% unless version == 'ga' -%> func testAccCheckComputeRegionInstanceTemplateMaxRunDuration(instanceTemplate *compute.InstanceTemplate, instance_max_run_duration_want compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.MaxRunDuration, instance_max_run_duration_want) { @@ -1495,7 +1492,6 @@ func testAccCheckComputeRegionInstanceTemplateMaxRunDuration(instanceTemplate *c return nil } } -<% end -%> func testAccCheckComputeRegionInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate *compute.InstanceTemplate, instance_local_ssd_recovery_timeout_want compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -3388,13 +3384,10 @@ resource "google_compute_region_instance_template" "foobar" { automatic_restart = false provisioning_model = "SPOT" instance_termination_action = "DELETE" -<% unless version == 'ga' -%> max_run_duration { nanos = 123 seconds = 60 } -<% end -%> - } metadata = { @@ -3408,7 +3401,6 @@ resource "google_compute_region_instance_template" "foobar" { `, suffix) } -<% unless version == 'ga' -%> func testAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3457,7 +3449,6 @@ resource "google_compute_region_instance_template" "foobar" { } `, suffix) } -<% end -%> func testAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown index 25baeb72ef4c..311501bfe04d 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance.html.markdown @@ -430,10 +430,10 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `instance_termination_action` - (Optional) Describe the type of termination action for VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) -* `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Structure is [documented below](#nested_max_run_duration). +* `max_run_duration` - (Optional) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Structure is [documented below](#nested_max_run_duration). -* `on_instance_stop_action` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the action to be performed when the instance is terminated using `max_run_duration` and `STOP` `instance_termination_action`. Only support `true` `discard_local_ssd` at this point. Structure is [documented below](#nested_on_instance_stop_action). +* `on_instance_stop_action` - (Optional) Specifies the action to be performed when the instance is terminated using `max_run_duration` and `STOP` `instance_termination_action`. Only support `true` `discard_local_ssd` at this point. Structure is [documented below](#nested_on_instance_stop_action). * `maintenance_interval` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. @@ -463,7 +463,7 @@ specified, then this instance will have no external IPv6 Internet access. Struct The `on_instance_stop_action` block supports: -* `discard_local_ssd` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Whether to discard local SSDs attached to the VM while terminating using `max_run_duration`. Only supports `true` at this point. +* `discard_local_ssd` - (Optional) Whether to discard local SSDs attached to the VM while terminating using `max_run_duration`. Only supports `true` at this point. The `guest_accelerator` block supports: diff --git a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown index e984cd86b4ee..3a04965be160 100644 --- a/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/compute_instance_template.html.markdown @@ -589,9 +589,9 @@ specified, then this instance will have no external IPv6 Internet access. Struct * `instance_termination_action` - (Optional) Describe the type of termination action for `SPOT` VM. Can be `STOP` or `DELETE`. Read more on [here](https://cloud.google.com/compute/docs/instances/create-use-spot) -* `max_run_duration` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Structure is [documented below](#nested_max_run_duration). +* `max_run_duration` - (Optional) The duration of the instance. Instance will run and be terminated after then, the termination action could be defined in `instance_termination_action`. Structure is [documented below](#nested_max_run_duration). -* `on_instance_stop_action` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the action to be performed when the instance is terminated using `max_run_duration` and `STOP` `instance_termination_action`. Only support `true` `discard_local_ssd` at this point. Structure is [documented below](#nested_on_instance_stop_action). +* `on_instance_stop_action` - (Optional) Specifies the action to be performed when the instance is terminated using `max_run_duration` and `STOP` `instance_termination_action`. Only support `true` `discard_local_ssd` at this point. Structure is [documented below](#nested_on_instance_stop_action). * `maintenance_interval` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Specifies the frequency of planned maintenance events. The accepted values are: `PERIODIC`. @@ -620,7 +620,7 @@ specified, then this instance will have no external IPv6 Internet access. Struct The `on_instance_stop_action` block supports: -* `discard_local_ssd` - (Optional) [Beta](https://terraform.io/docs/providers/google/guides/provider_versions.html) Whether to discard local SSDs attached to the VM while terminating using `max_run_duration`. Only supports `true` at this point. +* `discard_local_ssd` - (Optional) Whether to discard local SSDs attached to the VM while terminating using `max_run_duration`. Only supports `true` at this point. The `guest_accelerator` block supports: From b164fac9db42f672e4b0debb4200669c1dc042df Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Tue, 2 Jul 2024 08:58:14 -0700 Subject: [PATCH 263/356] Go rewrite alloydb yaml files (#11089) --- mmv1/api/type.go | 5 +- mmv1/products/alloydb/Cluster.yaml | 22 +- mmv1/products/alloydb/go_Backup.yaml | 246 ++++++++ mmv1/products/alloydb/go_Cluster.yaml | 562 ++++++++++++++++++ mmv1/products/alloydb/go_Instance.yaml | 337 +++++++++++ mmv1/products/alloydb/go_User.yaml | 128 ++++ mmv1/products/alloydb/go_product.yaml | 24 + .../terraform/schema_property.go.tmpl | 13 + mmv1/templates/terraform/yaml_conversion.erb | 3 + .../terraform/yaml_conversion_field.erb | 9 + 10 files changed, 1337 insertions(+), 12 deletions(-) create mode 100644 mmv1/products/alloydb/go_Backup.yaml create mode 100644 mmv1/products/alloydb/go_Cluster.yaml create mode 100644 mmv1/products/alloydb/go_Instance.yaml create mode 100644 mmv1/products/alloydb/go_User.yaml create mode 100644 mmv1/products/alloydb/go_product.yaml diff --git a/mmv1/api/type.go b/mmv1/api/type.go index b3c00d744c7a..7170a76262a8 100644 --- a/mmv1/api/type.go +++ b/mmv1/api/type.go @@ -141,6 +141,9 @@ type Type struct { ItemType *Type `yaml:"item_type"` MinSize string `yaml:"min_size"` MaxSize string `yaml:"max_size"` + // Adds a ValidateFunc to the item schema + ItemValidation resource.Validation `yaml:"item_validation"` + // __name ParentName string @@ -585,7 +588,7 @@ func (t Type) ExactlyOneOfList() []string { // Returns list of properties that needs required with their fields set. // func (t *Type) required_with_list() { func (t Type) RequiredWithList() []string { - if t.ResourceMetadata == nil || t.Parent() != nil { + if t.ResourceMetadata == nil { return []string{} } diff --git a/mmv1/products/alloydb/Cluster.yaml b/mmv1/products/alloydb/Cluster.yaml index 502547cd6df8..f323441301b3 100644 --- a/mmv1/products/alloydb/Cluster.yaml +++ b/mmv1/products/alloydb/Cluster.yaml @@ -106,6 +106,17 @@ custom_code: !ruby/object:Provider::Terraform::CustomCode pre_create: templates/terraform/pre_create/alloydb_cluster.go.erb pre_update: templates/terraform/pre_update/alloydb_cluster.go.erb pre_delete: templates/terraform/pre_delete/alloydb_cluster.go.erb +virtual_fields: + - !ruby/object:Api::Type::Enum + name: 'deletion_policy' + description: | + Policy to determine if the cluster should be deleted forcefully. + Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. + Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + values: + - :DEFAULT + - :FORCE + default_value: :DEFAULT parameters: - !ruby/object:Api::Type::String name: 'clusterId' @@ -556,14 +567,3 @@ properties: name: nanos description: | Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. -virtual_fields: - - !ruby/object:Api::Type::Enum - name: 'deletion_policy' - description: | - Policy to determine if the cluster should be deleted forcefully. - Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. - Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. - values: - - :DEFAULT - - :FORCE - default_value: :DEFAULT diff --git a/mmv1/products/alloydb/go_Backup.yaml b/mmv1/products/alloydb/go_Backup.yaml new file mode 100644 index 000000000000..301727973cb8 --- /dev/null +++ b/mmv1/products/alloydb/go_Backup.yaml @@ -0,0 +1,246 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Backup' +description: 'An AlloyDB Backup.' +references: + guides: + 'AlloyDB': 'https://cloud.google.com/alloydb/docs/' + api: 'https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.backups/create' +docs: +base_url: 'projects/{{project}}/locations/{{location}}/backups' +self_link: 'projects/{{project}}/locations/{{location}}/backups/{{backup_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/backups?backupId={{backup_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/backups/{{backup_id}}' +timeouts: + insert_minutes: 10 + update_minutes: 10 + delete_minutes: 10 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + result: + resource_inside_response: false +custom_code: + encoder: 'templates/terraform/encoders/go/alloydb_backup.tmpl' +examples: + - name: 'alloydb_backup_basic' + primary_resource_id: 'default' + vars: + alloydb_backup_id: 'alloydb-backup' + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - name: 'alloydb_backup_basic_test' + primary_resource_id: 'default' + vars: + alloydb_backup_id: 'alloydb-backup' + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true + - name: 'alloydb_backup_full' + primary_resource_id: 'default' + vars: + alloydb_backup_id: 'alloydb-backup' + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - name: 'alloydb_backup_full_test' + primary_resource_id: 'default' + vars: + alloydb_backup_id: 'alloydb-backup' + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true + skip_vcr: true +parameters: + - name: 'backupId' + type: String + description: | + The ID of the alloydb backup. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the alloydb backup should reside. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Output only. The name of the backup resource with the format: * projects/{project}/locations/{region}/backups/{backupId} + output: true + - name: 'displayName' + type: String + description: | + User-settable and human-readable display name for the Backup. + - name: 'uid' + type: String + description: | + Output only. The system-generated UID of the resource. The UID is assigned when the resource is created, and it is retained until it is deleted. + output: true + - name: 'createTime' + type: Time + description: | + Output only. Create time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'updateTime' + type: Time + description: | + Output only. Update time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'deleteTime' + type: Time + description: | + Output only. Delete time stamp. A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'labels' + type: KeyValueLabels + description: | + User-defined labels for the alloydb backup. An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + immutable: false + - name: 'state' + type: String + description: Output only. The current state of the backup. + output: true + - name: 'type' + type: Enum + description: 'The backup type, which suggests the trigger for the backup.' + default_from_api: true + enum_values: + - 'TYPE_UNSPECIFIED' + - 'ON_DEMAND' + - 'AUTOMATED' + - 'CONTINUOUS' + - name: 'description' + type: String + description: 'User-provided description of the backup.' + - name: 'clusterUid' + type: String + description: 'Output only. The system-generated UID of the cluster which was used to create this resource.' + output: true + - name: 'clusterName' + type: String + description: + 'The full resource name of the backup source cluster (e.g., + projects/{project}/locations/{location}/clusters/{clusterId}).' + required: true + immutable: true + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'reconciling' + type: Boolean + description: | + Output only. Reconciling (https://google.aip.dev/128#reconciliation), if true, indicates that the service is actively updating the resource. + This can happen due to user-triggered updates or system actions like failover or maintenance. + output: true + - name: 'encryptionConfig' + type: NestedObject + description: | + EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). + properties: + - name: 'kmsKeyName' + type: String + description: | + The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. + immutable: true + - name: 'encryptionInfo' + type: NestedObject + description: | + EncryptionInfo describes the encryption information of a cluster or a backup. + output: true + properties: + - name: 'encryptionType' + type: Enum + description: 'Output only. Type of encryption.' + output: true + enum_values: + - 'TYPE_UNSPECIFIED' + - 'GOOGLE_DEFAULT_ENCRYPTION' + - 'CUSTOMER_MANAGED_ENCRYPTION' + - name: 'kmsKeyVersions' + type: Array + description: | + Output only. Cloud KMS key versions that are being used to protect the database or the backup. + output: true + item_type: + type: String + - name: 'etag' + type: String + description: 'For Resource freshness validation (https://google.aip.dev/154)' + output: true + - name: 'annotations' + type: KeyValueAnnotations + description: | + Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + - name: 'sizeBytes' + type: String + description: 'Output only. The size of the backup in bytes.' + output: true + - name: 'expiryTime' + type: String + description: | + Output only. The time at which after the backup is eligible to be garbage collected. + It is the duration specified by the backup's retention policy, added to the backup's createTime. + output: true + - name: 'expiryQuantity' + type: NestedObject + description: | + Output only. The QuantityBasedExpiry of the backup, specified by the backup's retention policy. + Once the expiry quantity is over retention, the backup is eligible to be garbage collected. + output: true + properties: + - name: 'retentionCount' + type: Integer + description: | + Output only. The backup's position among its backups with the same source cluster and type, by descending chronological order create time (i.e. newest first). + output: true + - name: 'totalRetentionCount' + type: Integer + description: | + Output only. The length of the quantity-based queue, specified by the backup's retention policy. + output: true diff --git a/mmv1/products/alloydb/go_Cluster.yaml b/mmv1/products/alloydb/go_Cluster.yaml new file mode 100644 index 000000000000..e88025f5671b --- /dev/null +++ b/mmv1/products/alloydb/go_Cluster.yaml @@ -0,0 +1,562 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Cluster' +description: 'A managed alloydb cluster.' +references: + guides: + 'AlloyDB': 'https://cloud.google.com/alloydb/docs/' + api: 'https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.clusters/create' +docs: + note: | + Users can promote a secondary cluster to a primary cluster with the help of `cluster_type`. + To promote, users have to set the `cluster_type` property as `PRIMARY` and remove the `secondary_config` field from cluster configuration. + [See Example](https://github.com/hashicorp/terraform-provider-google/pull/16413). +base_url: 'projects/{{project}}/locations/{{location}}/clusters' +self_link: 'projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}' +create_url: 'projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}' + - '{{cluster_id}}' +timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 30 + update_minutes: 30 + delete_minutes: 30 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' +custom_code: + pre_create: 'templates/terraform/pre_create/go/alloydb_cluster.go.tmpl' + pre_update: 'templates/terraform/pre_update/go/alloydb_cluster.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/alloydb_cluster.go.tmpl' +skip_sweeper: true +examples: + - name: 'alloydb_cluster_basic' + primary_resource_id: 'default' + vars: + alloydb_cluster_name: 'alloydb-cluster' + - name: 'alloydb_cluster_full' + primary_resource_id: 'full' + vars: + alloydb_cluster_name: 'alloydb-cluster-full' + - name: 'alloydb_cluster_restore' + primary_resource_id: 'source' + vars: + alloydb_cluster_name: 'alloydb-source-cluster' + alloydb_backup_restored_cluster_name: 'alloydb-backup-restored' + alloydb_pitr_restored_cluster_name: 'alloydb-pitr-restored' + alloydb_backup_id: 'alloydb-backup' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedTestNetwork(t, "alloydb-instance-basic")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - name: 'alloydb_secondary_cluster_basic' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + skip_test: true + - name: 'alloydb_secondary_cluster_basic_test' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + skip_docs: true +virtual_fields: + - name: 'deletion_policy' + description: | + Policy to determine if the cluster should be deleted forcefully. + Deleting a cluster forcefully, deletes the cluster and all its associated instances within the cluster. + Deleting a Secondary cluster with a secondary instance REQUIRES setting deletion_policy = "FORCE" otherwise an error is returned. This is needed as there is no support to delete just the secondary instance, and the only way to delete secondary instance is to delete the associated secondary cluster forcefully which also deletes the secondary instance. + type: Enum + default_value: "DEFAULT" +parameters: + - name: 'clusterId' + type: String + description: | + The ID of the alloydb cluster. + url_param_only: true + required: true + immutable: true + - name: 'location' + type: String + description: | + The location where the alloydb cluster should reside. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The name of the cluster resource. + output: true + - name: 'uid' + type: String + description: | + The system-generated UID of the resource. + output: true + - name: 'labels' + type: KeyValueLabels + description: 'User-defined labels for the alloydb cluster.' + immutable: false + - name: 'encryptionConfig' + type: NestedObject + description: | + EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). + properties: + - name: 'kmsKeyName' + type: String + description: | + The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. + immutable: true + - name: 'encryptionInfo' + type: NestedObject + description: | + EncryptionInfo describes the encryption information of a cluster or a backup. + output: true + properties: + - name: 'encryptionType' + type: Enum + description: 'Output only. Type of encryption.' + output: true + enum_values: + - 'TYPE_UNSPECIFIED' + - 'GOOGLE_DEFAULT_ENCRYPTION' + - 'CUSTOMER_MANAGED_ENCRYPTION' + - name: 'kmsKeyVersions' + type: Array + description: | + Output only. Cloud KMS key versions that are being used to protect the database or the backup. + output: true + item_type: + type: String + - name: 'continuousBackupInfo' + type: NestedObject + description: | + ContinuousBackupInfo describes the continuous backup properties of a cluster. + output: true + properties: + - name: 'enabledTime' + type: String + description: | + When ContinuousBackup was most recently enabled. Set to null if ContinuousBackup is not enabled. + output: true + - name: 'schedule' + type: Array + description: | + Days of the week on which a continuous backup is taken. Output only field. Ignored if passed into the request. + output: true + item_type: + type: String + - name: 'earliestRestorableTime' + type: String + description: | + The earliest restorable time that can be restored to. Output only field. + output: true + - name: 'encryptionInfo' + type: NestedObject + description: | + Output only. The encryption information for the WALs and backups required for ContinuousBackup. + output: true + properties: + - name: 'encryptionType' + type: String + description: 'Output only. Type of encryption.' + output: true + - name: 'kmsKeyVersions' + type: Array + description: | + Output only. Cloud KMS key versions that are being used to protect the database or the backup. + output: true + item_type: + type: String + - name: 'network' + type: String + description: | + The relative resource name of the VPC network on which the instance can be accessed. It is specified in the following form: + + "projects/{projectNumber}/global/networks/{network_id}". + default_from_api: true + exactly_one_of: + - 'network' + - 'network_config.0.network' + - 'psc_config.0.psc_enabled' + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + deprecation_message: '`network` is deprecated and will be removed in a future major release. Instead, use `network_config` to define the network configuration.' + - name: 'networkConfig' + type: NestedObject + description: | + Metadata related to network configuration. + default_from_api: true + properties: + - name: 'network' + type: String + description: | + The resource link for the VPC network in which cluster resources are created and from which they are accessible via Private IP. The network must belong to the same project as the cluster. + It is specified in the form: "projects/{projectNumber}/global/networks/{network_id}". + exactly_one_of: + - 'network' + - 'network_config.0.network' + - 'psc_config.0.psc_enabled' + diff_suppress_func: 'tpgresource.ProjectNumberDiffSuppress' + - name: 'allocatedIpRange' + type: String + description: | + The name of the allocated IP range for the private IP AlloyDB cluster. For example: "google-managed-services-default". + If set, the instance IPs for this cluster will be created in the allocated range. + - name: 'displayName' + type: String + description: | + User-settable and human-readable display name for the Cluster. + - name: 'etag' + type: String + description: 'For Resource freshness validation (https://google.aip.dev/154)' + - name: 'reconciling' + type: Boolean + description: | + Output only. Reconciling (https://google.aip.dev/128#reconciliation). + Set to true if the current state of Cluster does not match the user's intended state, and the service is actively updating the resource to reconcile them. + This can happen due to user-triggered updates or system actions like failover or maintenance. + output: true + - name: 'state' + type: String + description: 'Output only. The current serving state of the cluster.' + output: true + - name: 'annotations' + type: KeyValueAnnotations + description: | + Annotations to allow client tools to store small amount of arbitrary data. This is distinct from labels. https://google.aip.dev/128 + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + - name: 'databaseVersion' + type: String + description: | + The database engine major version. This is an optional field and it's populated at the Cluster creation time. This field cannot be changed after cluster creation. + default_from_api: true + - name: 'pscConfig' + type: NestedObject + description: 'Configuration for Private Service Connect (PSC) for the cluster.' + properties: + - name: 'pscEnabled' + type: Boolean + description: 'Create an instance that allows connections from Private Service Connect endpoints to the instance.' + - name: 'initialUser' + type: NestedObject + description: | + Initial user to setup during cluster creation. + ignore_read: true + custom_flatten: 'templates/terraform/custom_flatten/go/alloydb_cluster_input_user_flatten.go.tmpl' + properties: + - name: 'user' + type: String + description: | + The database username. + - name: 'password' + type: String + description: | + The initial password for the user. + required: true + sensitive: true + - name: 'restoreBackupSource' + type: NestedObject + description: | + The source when restoring from a backup. Conflicts with 'restore_continuous_backup_source', both can't be set together. + immutable: true + ignore_read: true + conflicts: + - restore_continuous_backup_source + properties: + - name: 'backupName' + type: String + description: | + The name of the backup that this cluster is restored from. + required: true + immutable: true + - name: 'restoreContinuousBackupSource' + type: NestedObject + description: | + The source when restoring via point in time recovery (PITR). Conflicts with 'restore_backup_source', both can't be set together. + immutable: true + ignore_read: true + conflicts: + - restore_backup_source + properties: + - name: 'cluster' + type: String + description: | + The name of the source cluster that this cluster is restored from. + required: true + immutable: true + - name: 'pointInTime' + type: String + description: | + The point in time that this cluster is restored to, in RFC 3339 format. + required: true + immutable: true + - name: 'continuousBackupConfig' + type: NestedObject + description: | + The continuous backup config for this cluster. + + If no policy is provided then the default policy will be used. The default policy takes one backup a day and retains backups for 14 days. + default_from_api: true + properties: + - name: 'enabled' + type: Boolean + description: | + Whether continuous backup recovery is enabled. If not set, defaults to true. + send_empty_value: true + default_value: true + - name: 'recoveryWindowDays' + type: Integer + description: | + The numbers of days that are eligible to restore from using PITR. To support the entire recovery window, backups and logs are retained for one day more than the recovery window. + + If not set, defaults to 14 days. + default_from_api: true + - name: 'encryptionConfig' + type: NestedObject + description: | + EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). + properties: + - name: 'kmsKeyName' + type: String + description: | + The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. + - name: 'automatedBackupPolicy' + type: NestedObject + description: | + The automated backup policy for this cluster. AutomatedBackupPolicy is disabled by default. + default_from_api: true + properties: + - name: 'backupWindow' + type: String + description: | + The length of the time window during which a backup can be taken. If a backup does not succeed within this time window, it will be canceled and considered failed. + + The backup window must be at least 5 minutes long. There is no upper bound on the window. If not set, it will default to 1 hour. + + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + default_from_api: true + - name: 'location' + type: String + description: | + The location where the backup will be stored. Currently, the only supported option is to store the backup in the same region as the cluster. + default_from_api: true + - name: 'labels' + type: KeyValuePairs + description: + 'Labels to apply to backups created using this configuration.' + - name: 'encryptionConfig' + type: NestedObject + description: | + EncryptionConfig describes the encryption config of a cluster or a backup that is encrypted with a CMEK (customer-managed encryption key). + properties: + - name: 'kmsKeyName' + type: String + description: | + The fully-qualified resource name of the KMS key. Each Cloud KMS key is regionalized and has the following format: projects/[PROJECT]/locations/[REGION]/keyRings/[RING]/cryptoKeys/[KEY_NAME]. + - name: 'weeklySchedule' + type: NestedObject + description: 'Weekly schedule for the Backup.' + default_from_api: true + properties: + - name: 'daysOfWeek' + type: Array + description: 'The days of the week to perform a backup. At least one day of the week must be provided.' + item_type: + type: Enum + description: 'The days of the week to perform a backup. At least one day of the week must be provided.' + enum_values: + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + min_size: 1 + - name: 'startTimes' + type: Array + description: | + The times during the day to start a backup. At least one start time must be provided. The start times are assumed to be in UTC and to be an exact hour (e.g., 04:00:00). + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/alloydb_cluster_input_automated_backup_policy_start_times_flatten.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. An API may choose to allow the value "24:00:00" for scenarios like business closing time. + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Currently, only the value 0 is supported. + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Currently, only the value 0 is supported. + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. + - name: 'timeBasedRetention' + type: NestedObject + description: | + Time-based Backup retention policy. Conflicts with 'quantity_based_retention', both can't be set together. + conflicts: + - automated_backup_policy.0.quantity_based_retention + properties: + - name: 'retentionPeriod' + type: String + description: | + The retention period. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + - name: 'quantityBasedRetention' + type: NestedObject + description: | + Quantity-based Backup retention policy to retain recent backups. Conflicts with 'time_based_retention', both can't be set together. + conflicts: + - automated_backup_policy.0.time_based_retention + properties: + - name: 'count' + type: Integer + description: | + The number of backups to retain. + - name: 'enabled' + type: Boolean + description: | + Whether automated backups are enabled. + default_from_api: true + - name: 'backupSource' + type: NestedObject + description: 'Cluster created from backup.' + output: true + properties: + - name: 'backupName' + type: String + description: 'The name of the backup resource.' + - name: 'migrationSource' + type: NestedObject + description: 'Cluster created via DMS migration.' + output: true + properties: + - name: 'hostPort' + type: String + description: + 'The host and port of the on-premises instance in host:port format' + - name: 'referenceId' + type: String + description: + 'Place holder for the external source identifier(e.g DMS job name) + that created the cluster.' + - name: 'sourceType' + type: String + description: 'Type of migration source.' + - name: 'clusterType' + type: Enum + description: | + The type of cluster. If not set, defaults to PRIMARY. + default_value: "PRIMARY" + enum_values: + - 'PRIMARY' + - 'SECONDARY' + - name: 'secondaryConfig' + type: NestedObject + description: | + Configuration of the secondary cluster for Cross Region Replication. This should be set if and only if the cluster is of type SECONDARY. + properties: + - name: 'primaryClusterName' + type: String + description: | + Name of the primary cluster must be in the format + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + required: true + - name: 'maintenanceUpdatePolicy' + type: NestedObject + description: | + MaintenanceUpdatePolicy defines the policy for system updates. + properties: + - name: 'maintenanceWindows' + type: Array + description: | + Preferred windows to perform maintenance. Currently limited to 1. + item_type: + description: | + specifies a preferred day and time for maintenance. + type: NestedObject + properties: + - name: 'day' + type: Enum + description: | + Preferred day of the week for maintenance, e.g. MONDAY, TUESDAY, etc. + required: true + enum_values: + - 'MONDAY' + - 'TUESDAY' + - 'WEDNESDAY' + - 'THURSDAY' + - 'FRIDAY' + - 'SATURDAY' + - 'SUNDAY' + - name: 'startTime' + type: NestedObject + description: | + Preferred time to start the maintenance operation on the specified day. Maintenance will start within 1 hour of this time. + required: true + properties: + - name: 'hours' + type: Integer + description: | + Hours of day in 24 hour format. Should be from 0 to 23. + required: true + - name: 'minutes' + type: Integer + description: | + Minutes of hour of day. Currently, only the value 0 is supported. + - name: 'seconds' + type: Integer + description: | + Seconds of minutes of the time. Currently, only the value 0 is supported. + - name: 'nanos' + type: Integer + description: | + Fractions of seconds in nanoseconds. Currently, only the value 0 is supported. diff --git a/mmv1/products/alloydb/go_Instance.yaml b/mmv1/products/alloydb/go_Instance.yaml new file mode 100644 index 000000000000..b6d15291e136 --- /dev/null +++ b/mmv1/products/alloydb/go_Instance.yaml @@ -0,0 +1,337 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Instance' +description: 'A managed alloydb cluster instance.' +references: + guides: + 'AlloyDB': 'https://cloud.google.com/alloydb/docs/' + api: 'https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.clusters.instances/create' +docs: + warning: | + Deleting an instance with instanceType = SECONDARY does not delete the secondary instance, and abandons it instead. + Use deletion_policy = "FORCE" in the associated secondary cluster and delete the cluster forcefully to delete the secondary cluster as well its associated secondary instance. + Users can undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import. +base_url: '{{cluster}}/instances?instanceId={{instance_id}}' +self_link: '{{cluster}}/instances/{{instance_id}}' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/instances/{{instance_id}}' +timeouts: + insert_minutes: 120 + update_minutes: 120 + delete_minutes: 120 +autogen_async: true +async: + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 120 + update_minutes: 120 + delete_minutes: 120 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error' + message: 'message' + include_project: true +custom_code: + pre_create: 'templates/terraform/pre_create/go/alloydb_instance.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/alloydb_instance.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/alloydb_instance.go.tmpl' +skip_sweeper: true +examples: + - name: 'alloydb_instance_basic' + primary_resource_id: 'default' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - name: 'alloydb_secondary_instance_basic' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + alloydb_secondary_instance_name: 'alloydb-secondary-instance' + network_name: 'alloydb-secondary-network' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_test: true + - name: 'alloydb_instance_basic_test' + primary_resource_id: 'default' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true + - name: 'alloydb_secondary_instance_basic_test' + primary_resource_id: 'secondary' + vars: + alloydb_primary_cluster_name: 'alloydb-primary-cluster' + alloydb_primary_instance_name: 'alloydb-primary-instance' + alloydb_secondary_cluster_name: 'alloydb-secondary-cluster' + alloydb_secondary_instance_name: 'alloydb-secondary-instance' + network_name: 'alloydb-secondary-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'reconciling' + - 'update_time' + skip_docs: true +parameters: + - name: 'cluster' + type: ResourceRef + description: | + Identifies the alloydb cluster. Must be in the format + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + url_param_only: true + required: true + immutable: true + resource: 'Cluster' + imports: 'name' + - name: 'instanceId' + type: String + description: | + The ID of the alloydb instance. + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The name of the instance resource. + output: true + - name: 'createTime' + type: Time + description: | + Time the Instance was created in UTC. + output: true + - name: 'updateTime' + type: Time + description: | + Time the Instance was updated in UTC. + output: true + - name: 'uid' + type: String + description: | + The system-generated UID of the resource. + output: true + - name: 'labels' + type: KeyValueLabels + description: 'User-defined labels for the alloydb instance.' + immutable: false + - name: 'annotations' + type: KeyValueAnnotations + description: + 'Annotations to allow client tools to store small amount of arbitrary + data. This is distinct from labels.' + - name: 'state' + type: String + description: | + The current state of the alloydb instance. + output: true + - name: 'displayName' + type: String + description: | + User-settable and human-readable display name for the Instance. + ignore_read: true + - name: 'gceZone' + type: String + description: | + The Compute Engine zone that the instance should serve from, per https://cloud.google.com/compute/docs/regions-zones This can ONLY be specified for ZONAL instances. If present for a REGIONAL instance, an error will be thrown. If this is absent for a ZONAL instance, instance is created in a random zone with available capacity. + - name: 'reconciling' + type: Boolean + description: | + Set to true if the current state of Instance does not match the user's intended state, and the service is actively updating the resource to reconcile them. This can happen due to user-triggered updates or system actions like failover or maintenance. + output: true + - name: 'databaseFlags' + type: KeyValuePairs + description: + 'Database flags. Set at instance level. * They are copied from primary + instance on read instance creation. * Read instances can set new or + override existing flags that are relevant for reads, e.g. for enabling + columnar cache on a read instance. Flags set on read instance may or may + not be present on primary.' + default_from_api: true + - name: 'availabilityType' + type: Enum + description: | + 'Availability type of an Instance. Defaults to REGIONAL for both primary and read instances. + Note that primary and read instances can have different availability types. + Only READ_POOL instance supports ZONAL type. Users can't specify the zone for READ_POOL instance. + Zone is automatically chosen from the list of zones in the region specified. + Read pool of size 1 can only have zonal availability. Read pools with node count of 2 or more + can have regional availability (nodes are present in 2 or more zones in a region).' + default_from_api: true + enum_values: + - 'AVAILABILITY_TYPE_UNSPECIFIED' + - 'ZONAL' + - 'REGIONAL' + - name: 'instanceType' + type: Enum + description: | + The type of the instance. + If the instance type is READ_POOL, provide the associated PRIMARY/SECONDARY instance in the `depends_on` meta-data attribute. + If the instance type is SECONDARY, point to the cluster_type of the associated secondary cluster instead of mentioning SECONDARY. + Example: {instance_type = google_alloydb_cluster..cluster_type} instead of {instance_type = SECONDARY} + If the instance type is SECONDARY, the terraform delete instance operation does not delete the secondary instance but abandons it instead. + Use deletion_policy = "FORCE" in the associated secondary cluster and delete the cluster forcefully to delete the secondary cluster as well its associated secondary instance. + Users can undo the delete secondary instance action by importing the deleted secondary instance by calling terraform import. + required: true + immutable: true + enum_values: + - 'PRIMARY' + - 'READ_POOL' + - 'SECONDARY' + - name: 'ipAddress' + type: String + description: | + The IP address for the Instance. This is the connection endpoint for an end-user application. + output: true + - name: 'queryInsightsConfig' + type: NestedObject + description: 'Configuration for query insights.' + default_from_api: true + properties: + - name: 'queryStringLength' + type: Integer + description: 'Query string length. The default value is 1024. Any integer between 256 and 4500 is considered valid.' + - name: 'recordApplicationTags' + type: Boolean + description: 'Record application tags for an instance. This flag is turned "on" by default.' + - name: 'recordClientAddress' + type: Boolean + description: 'Record client address for an instance. Client address is PII information. This flag is turned "on" by default.' + - name: 'queryPlansPerMinute' + type: Integer + description: 'Number of query execution plans captured by Insights per minute for all queries combined. The default value is 5. Any integer between 0 and 20 is considered valid.' + - name: 'readPoolConfig' + type: NestedObject + description: 'Read pool specific config. If the instance type is READ_POOL, this configuration must be provided.' + properties: + - name: 'nodeCount' + type: Integer + description: + 'Read capacity, i.e. number of nodes in a read pool instance.' + - name: 'machineConfig' + type: NestedObject + description: + 'Configurations for the machines that host the underlying database engine.' + default_from_api: true + properties: + - name: 'cpuCount' + type: Integer + description: "The number of CPU's in the VM instance." + + default_from_api: true + - name: 'clientConnectionConfig' + type: NestedObject + description: | + Client connection specific configurations. + default_from_api: true + properties: + - name: 'requireConnectors' + type: Boolean + description: | + Configuration to enforce connectors only (ex: AuthProxy) connections to the database. + - name: 'sslConfig' + type: NestedObject + description: | + SSL config option for this instance. + default_from_api: true + properties: + - name: 'sslMode' + type: Enum + description: 'SSL mode. Specifies client-server SSL/TLS connection behavior.' + default_from_api: true + enum_values: + - 'ENCRYPTED_ONLY' + - 'ALLOW_UNENCRYPTED_AND_ENCRYPTED' + - name: 'pscInstanceConfig' + type: NestedObject + description: | + Configuration for Private Service Connect (PSC) for the instance. + properties: + - name: 'serviceAttachmentLink' + type: String + description: | + The service attachment created when Private Service Connect (PSC) is enabled for the instance. + The name of the resource will be in the format of + `projects//regions//serviceAttachments/` + output: true + - name: 'allowedConsumerProjects' + type: Array + description: | + List of consumer projects that are allowed to create PSC endpoints to service-attachments to this instance. + These should be specified as project numbers only. + item_type: + type: String + item_validation: + regex: '^\d+$' + - name: 'pscDnsName' + type: String + description: | + The DNS name of the instance for PSC connectivity. + Name convention: ...alloydb-psc.goog + output: true + - name: 'networkConfig' + type: NestedObject + description: | + Instance level network configuration. + properties: + - name: 'authorizedExternalNetworks' + type: Array + description: | + A list of external networks authorized to access this instance. This + field is only allowed to be set when `enable_public_ip` is set to + true. + required_with: + - 'networkConfig.0.enablePublicIp' + item_type: + type: NestedObject + properties: + - name: 'cidrRange' + type: String + description: 'CIDR range for one authorized network of the instance.' + - name: 'enablePublicIp' + type: Boolean + description: | + Enabling public ip for the instance. If a user wishes to disable this, + please also clear the list of the authorized external networks set on + the same instance. + - name: 'publicIpAddress' + type: String + description: | + The public IP addresses for the Instance. This is available ONLY when + networkConfig.enablePublicIp is set to true. This is the connection + endpoint for an end-user application. + output: true diff --git a/mmv1/products/alloydb/go_User.yaml b/mmv1/products/alloydb/go_User.yaml new file mode 100644 index 000000000000..9c518aee6691 --- /dev/null +++ b/mmv1/products/alloydb/go_User.yaml @@ -0,0 +1,128 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'User' +description: 'A database user in an AlloyDB cluster.' +references: + guides: + 'AlloyDB': 'https://cloud.google.com/alloydb/docs/' + api: 'https://cloud.google.com/alloydb/docs/reference/rest/v1/projects.locations.clusters.users/create' +docs: +base_url: '{{cluster}}/users' +self_link: '{{cluster}}/users/{{user_id}}' +create_url: '{{cluster}}/users?userId={{user_id}}' +update_url: '{{cluster}}/users?userId={{user_id}}' +update_verb: 'POST' +import_format: + - 'projects/{{project}}/locations/{{location}}/clusters/{{cluster}}/users/{{user_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +custom_code: + custom_import: 'templates/terraform/custom_import/go/alloydb_user.go.tmpl' +skip_sweeper: true +examples: + - name: 'alloydb_user_builtin' + primary_resource_id: 'user1' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_cluster_pass: 'cluster_secret' + alloydb_instance_name: 'alloydb-instance' + alloydb_user_name: 'user1' + alloydb_user_pass: 'user_secret' + network_name: 'alloydb-network' + ignore_read_extra: + - 'password' + skip_test: true + - name: 'alloydb_user_builtin_test' + primary_resource_id: 'user1' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_cluster_pass: 'cluster_secret' + alloydb_instance_name: 'alloydb-instance' + alloydb_user_name: 'user1' + alloydb_user_pass: 'user_secret' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + ignore_read_extra: + - 'password' + skip_docs: true + - name: 'alloydb_user_iam' + primary_resource_id: 'user2' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + alloydb_cluster_pass: 'cluster_secret' + alloydb_user_name: 'user2@foo.com' + network_name: 'alloydb-network' + skip_test: true + - name: 'alloydb_user_iam_test' + primary_resource_id: 'user2' + vars: + alloydb_cluster_name: 'alloydb-cluster' + alloydb_instance_name: 'alloydb-instance' + alloydb_cluster_pass: 'cluster_secret' + alloydb_user_name: 'user2@foo.com' + network_name: 'alloydb-network' + test_vars_overrides: + 'network_name': 'acctest.BootstrapSharedServiceNetworkingConnection(t, "alloydbinstance-network-config-1")' + skip_docs: true +parameters: + - name: 'cluster' + type: ResourceRef + description: | + Identifies the alloydb cluster. Must be in the format + 'projects/{project}/locations/{location}/clusters/{cluster_id}' + url_param_only: true + required: true + immutable: true + resource: 'Cluster' + imports: 'name' + - name: 'userId' + type: String + description: | + The database role name of the user. + url_param_only: true + required: true + immutable: true + - name: 'userType' + type: Enum + description: | + The type of this user. + required: true + immutable: true + enum_values: + - 'ALLOYDB_BUILT_IN' + - 'ALLOYDB_IAM_USER' +properties: + - name: 'name' + type: String + description: | + Name of the resource in the form of projects/{project}/locations/{location}/clusters/{cluster}/users/{user}. + output: true + - name: 'password' + type: String + description: | + Password for this database user. + ignore_read: true + - name: 'databaseRoles' + type: Array + description: | + List of database roles this database user has. + item_type: + type: String diff --git a/mmv1/products/alloydb/go_product.yaml b/mmv1/products/alloydb/go_product.yaml new file mode 100644 index 000000000000..082d410a4bca --- /dev/null +++ b/mmv1/products/alloydb/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Alloydb' +display_name: 'AlloyDB' +versions: + - name: 'beta' + base_url: 'https://alloydb.googleapis.com/v1beta/' + - name: 'ga' + base_url: 'https://alloydb.googleapis.com/v1/' +scopes: + - 'https://www.googleapis.com/auth/cloud-identity' diff --git a/mmv1/templates/terraform/schema_property.go.tmpl b/mmv1/templates/terraform/schema_property.go.tmpl index 57bde43a980b..056ca339a765 100644 --- a/mmv1/templates/terraform/schema_property.go.tmpl +++ b/mmv1/templates/terraform/schema_property.go.tmpl @@ -106,6 +106,7 @@ Default value: {{ .ItemType.DefaultValue -}} {{ else if eq .ItemType.Type "String" -}} Elem: &schema.Schema{ Type: schema.Type{{ .ItemTypeClass -}}, + {{ template "ItemValidation" . -}} }, {{ else if eq .ItemType.Type "Enum" -}} Elem: &schema.Schema{ @@ -124,6 +125,7 @@ Default value: {{ .ItemType.DefaultValue -}} {{ else -}} Type: {{ .TFType .ItemType.Type }}, {{ end -}} + {{ template "ItemValidation" . -}} }, {{ end -}} {{ if .IsSet -}} @@ -180,3 +182,14 @@ Default value: {{ .ItemType.DefaultValue -}} }, {{- end -}} {{- end -}} +{{- define "ItemValidation" -}} + {{ if not .Output -}} + {{ if .ItemValidation -}} + {{ if .ItemValidation.Regex -}} + ValidateFunc: verify.ValidateRegexp(`{{ .ItemValidation.Regex -}}`), + {{ else if .ItemValidation.Function -}} + ValidateFunc: {{ .ItemValidation.Function -}}, + {{ end -}} + {{ end -}} + {{- end }} +{{- end -}} \ No newline at end of file diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 4a944e2a281c..88385854e478 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -240,6 +240,9 @@ async: message: '<%= object.async.error.message %>' <% end -%> <% end -%> +<% unless !object.async.include_project -%> + include_project: <%= object.async.include_project %> +<% end -%> <% end -%> <% end -%> <% diff --git a/mmv1/templates/terraform/yaml_conversion_field.erb b/mmv1/templates/terraform/yaml_conversion_field.erb index ef751e24e860..74d1ca54ba60 100644 --- a/mmv1/templates/terraform/yaml_conversion_field.erb +++ b/mmv1/templates/terraform/yaml_conversion_field.erb @@ -208,6 +208,15 @@ <% unless property.max_size.nil? -%> max_size: <%= property.max_size %> <% end -%> +<% unless property.item_validation.nil? -%> + item_validation: +<% unless property.item_validation.regex.nil? -%> + regex: '<%= property.item_validation.regex %>' +<% end -%> +<% unless property.item_validation.function.nil? -%> + function: '<%= property.item_validation.function %>' +<% end -%> +<% end -%> <% end -%> <% if property.is_a?(Api::Type::ResourceRef) -%> <% unless property.resource.nil? -%> From f53dfd68dbce629a86360a7785bc0aa20a78647d Mon Sep 17 00:00:00 2001 From: JoyceYingZhu Date: Tue, 2 Jul 2024 10:57:01 -0700 Subject: [PATCH 264/356] Add custom update to update metadata in a separate call (#11081) --- mmv1/products/vertexai/Index.yaml | 2 +- .../custom_update/vertex_ai_index.go.erb | 143 ++++++++++++++++++ 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 mmv1/templates/terraform/custom_update/vertex_ai_index.go.erb diff --git a/mmv1/products/vertexai/Index.yaml b/mmv1/products/vertexai/Index.yaml index 28efea9b6e76..e1082fb09920 100644 --- a/mmv1/products/vertexai/Index.yaml +++ b/mmv1/products/vertexai/Index.yaml @@ -69,7 +69,7 @@ examples: - metadata.0.contents_delta_uri - metadata.0.is_complete_overwrite custom_code: !ruby/object:Provider::Terraform::CustomCode - pre_update: templates/terraform/pre_update/vertex_ai_index.go.erb + custom_update: templates/terraform/custom_update/vertex_ai_index.go.erb parameters: - !ruby/object:Api::Type::String name: region diff --git a/mmv1/templates/terraform/custom_update/vertex_ai_index.go.erb b/mmv1/templates/terraform/custom_update/vertex_ai_index.go.erb new file mode 100644 index 000000000000..5cc8a2c70e5d --- /dev/null +++ b/mmv1/templates/terraform/custom_update/vertex_ai_index.go.erb @@ -0,0 +1,143 @@ +userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) +if err != nil { + return err +} + +billingProject := "" + +project, err := tpgresource.GetProject(d, config) +if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) +} +billingProject = project + +obj := make(map[string]interface{}) +displayNameProp, err := expandVertexAIIndexDisplayName(d.Get("display_name"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp +} +descriptionProp, err := expandVertexAIIndexDescription(d.Get("description"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp +} +metadataProp, err := expandVertexAIIndexMetadata(d.Get("metadata"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp +} +labelsProp, err := expandVertexAIIndexEffectiveLabels(d.Get("effective_labels"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp +} + +url, err := tpgresource.ReplaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{region}}/indexes/{{name}}") +if err != nil { + return err +} + +log.Printf("[DEBUG] Updating Index %q: %#v", d.Id(), obj) +headers := make(http.Header) +updateMask := []string{} + +if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") +} + +if d.HasChange("description") { + updateMask = append(updateMask, "description") +} + +if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") +} + +// err == nil indicates that the billing_project value was found +if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp +} + +// if updateMask is empty we are not updating anything so skip the post +if len(updateMask) > 0 { + log.Printf("[DEBUG] Updating first Index with updateMask: %#v", updateMask) + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating first Index %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating first Index %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Index", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } +} + +secondUpdateMask := []string{} +// 'If `contents_delta_gcs_uri` is set as part of `index.metadata`, +// then no other Index fields can be also updated as part of the same update call.' +// Metadata update need to be done in a separate update call. +if d.HasChange("metadata") { + secondUpdateMask = append(secondUpdateMask, "metadata") +} + +// if secondUpdateMask is empty we are not updating anything so skip the post +if len(secondUpdateMask) > 0 { + log.Printf("[DEBUG] Updating second Index with updateMask: %#v", secondUpdateMask) + // Override updateMask with secondUpdateMask + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(secondUpdateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error Updating second Index %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished Updating second Index %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Index", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } +} + +return resourceVertexAIIndexRead(d, meta) From c6b1031f4ddedec128651a8e177854102267e259 Mon Sep 17 00:00:00 2001 From: Shuya Ma <87669292+shuyama1@users.noreply.github.com> Date: Tue, 2 Jul 2024 12:18:23 -0700 Subject: [PATCH 265/356] only check if commit exists in sync branches in downstream builds (#11097) --- .ci/magician/cmd/generate_downstream.go | 33 +++++++++++++------------ 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/.ci/magician/cmd/generate_downstream.go b/.ci/magician/cmd/generate_downstream.go index fd28cf0978db..64ef6857f538 100644 --- a/.ci/magician/cmd/generate_downstream.go +++ b/.ci/magician/cmd/generate_downstream.go @@ -97,23 +97,24 @@ func execGenerateDownstream(baseBranch, command, repo, version, ref string, gh G if baseBranch == "" { baseBranch = "main" } - - var syncBranchPrefix string - if repo == "terraform" { - if version == "beta" { - syncBranchPrefix = "tpgb-sync" - } else if version == "ga" { - syncBranchPrefix = "tpg-sync" + if command == "downstream" { + var syncBranchPrefix string + if repo == "terraform" { + if version == "beta" { + syncBranchPrefix = "tpgb-sync" + } else if version == "ga" { + syncBranchPrefix = "tpg-sync" + } + } else if repo == "terraform-google-conversion" { + syncBranchPrefix = "tgc-sync" + } else if repo == "tf-oics" { + syncBranchPrefix = "tf-oics-sync" + } + syncBranch := getSyncBranch(syncBranchPrefix, baseBranch) + if syncBranchHasCommit(ref, syncBranch, rnr) { + fmt.Printf("Sync branch %s already has commit %s, skipping generation\n", syncBranch, ref) + os.Exit(0) } - } else if repo == "terraform-google-conversion" { - syncBranchPrefix = "tgc-sync" - } else if repo == "tf-oics" { - syncBranchPrefix = "tf-oics-sync" - } - syncBranch := getSyncBranch(syncBranchPrefix, baseBranch) - if syncBranchHasCommit(ref, syncBranch, rnr) { - fmt.Printf("Sync branch %s already has commit %s, skipping generation\n", syncBranch, ref) - os.Exit(0) } mmLocalPath := filepath.Join(rnr.GetCWD(), "..", "..") From 8a8b747838c011346e78393512947d07744d62c5 Mon Sep 17 00:00:00 2001 From: Samir Ribeiro <42391123+Samir-Cit@users.noreply.github.com> Date: Tue, 2 Jul 2024 16:37:16 -0300 Subject: [PATCH 266/356] Add Address Group support for Cloud Armor (#11059) --- .../networksecurity/AddressGroup.yaml | 21 +++++++++++++++++++ ...security_address_groups_cloud_armor.tf.erb | 10 +++++++++ 2 files changed, 31 insertions(+) create mode 100644 mmv1/templates/terraform/examples/network_security_address_groups_cloud_armor.tf.erb diff --git a/mmv1/products/networksecurity/AddressGroup.yaml b/mmv1/products/networksecurity/AddressGroup.yaml index 55962e476403..ab9f6fc7f42c 100644 --- a/mmv1/products/networksecurity/AddressGroup.yaml +++ b/mmv1/products/networksecurity/AddressGroup.yaml @@ -71,6 +71,14 @@ examples: resource_name: "my-address-groups" test_env_vars: project: :PROJECT_NAME + - !ruby/object:Provider::Terraform::Examples + min_version: "beta" + name: "network_security_address_groups_cloud_armor" + primary_resource_id: "default" + vars: + resource_name: "my-address-groups" + test_env_vars: + project: :PROJECT_NAME parameters: - !ruby/object:Api::Type::String name: parent @@ -133,3 +141,16 @@ properties: required: true description: | Capacity of the Address Group. + - !ruby/object:Api::Type::Array + min_version: "beta" + name: "purpose" + description: | + List of supported purposes of the Address Group. + item_type: !ruby/object:Api::Type::Enum + name: 'undefined' + description: | + This field only has a name and description because of MM + limitations. It should not appear in downstreams. + values: + - :DEFAULT + - :CLOUD_ARMOR diff --git a/mmv1/templates/terraform/examples/network_security_address_groups_cloud_armor.tf.erb b/mmv1/templates/terraform/examples/network_security_address_groups_cloud_armor.tf.erb new file mode 100644 index 000000000000..123dd50e92b6 --- /dev/null +++ b/mmv1/templates/terraform/examples/network_security_address_groups_cloud_armor.tf.erb @@ -0,0 +1,10 @@ +resource "google_network_security_address_group" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta + name = "<%= ctx[:vars]['resource_name'] %>" + parent = "projects/<%= ctx[:test_env_vars]['project'] %>" + location = "global" + type = "IPV4" + capacity = "100" + purpose = ["CLOUD_ARMOR"] + items = ["208.80.154.224/32"] +} From 237d389e02bea45664a82c24e68abeb7273a70f0 Mon Sep 17 00:00:00 2001 From: Thomas Rodgers Date: Tue, 2 Jul 2024 13:20:08 -0700 Subject: [PATCH 267/356] Migrate orgpolicy policy to mmv1 (#10759) --- mmv1/products/orgpolicy/Policy.yaml | 222 +++++++++ .../constants/org_policy_policy.go.erb | 5 + .../terraform/custom_expand/enum_bool.go.erb | 25 + .../terraform/custom_flatten/enum_bool.go.erb | 6 + .../encoders/org_policy_policy.go.erb | 17 + .../org_policy_policy_dry_run_spec.tf.erb | 6 +- .../examples/org_policy_policy_enforce.tf.erb | 16 + .../examples/org_policy_policy_folder.tf.erb | 17 + .../org_policy_policy_organization.tf.erb | 8 + .../examples/org_policy_policy_project.tf.erb | 30 ++ .../pre_read/org_policy_policy.go.erb | 21 + .../pre_update/org_policy_policy.go.tmpl | 30 ++ .../resource_org_policy_policy_test.go | 455 ++++++++++++++++++ .../transport/error_retry_predicates.go | 15 + .../basic.cloudresourcemanager.folder.json | 4 - .../basic.cloudresourcemanager.project.json | 4 - .../api/orgpolicy/samples/enforce.policy.json | 11 - .../api/orgpolicy/samples/enforce_policy.yaml | 27 -- .../api/orgpolicy/samples/folder.policy.json | 12 - .../api/orgpolicy/samples/folder_policy.yaml | 31 -- .../samples/organization.policy.json | 7 - .../samples/organization_dry_run.policy.json | 13 - .../samples/organization_dry_run_policy.yaml | 26 - .../samples/organization_policy.yaml | 26 - .../api/orgpolicy/samples/project.policy.json | 23 - .../api/orgpolicy/samples/project_policy.yaml | 31 -- .../samples/update_enforce.policy.json | 11 - .../samples/update_folder.policy.json | 23 - .../samples/update_organization.policy.json | 12 - .../update_organization_dry_run.policy.json | 14 - .../samples/update_project.policy.json | 23 - tpgtools/overrides/orgpolicy/beta/policy.yaml | 15 - .../orgpolicy/beta/tpgtools_product.yaml | 5 - tpgtools/overrides/orgpolicy/policy.yaml | 15 - .../samples/policy/dry_run_spec.yaml | 3 - .../orgpolicy/samples/policy/meta.yaml | 8 - 36 files changed, 870 insertions(+), 347 deletions(-) create mode 100644 mmv1/products/orgpolicy/Policy.yaml create mode 100644 mmv1/templates/terraform/constants/org_policy_policy.go.erb create mode 100644 mmv1/templates/terraform/custom_expand/enum_bool.go.erb create mode 100644 mmv1/templates/terraform/custom_flatten/enum_bool.go.erb create mode 100644 mmv1/templates/terraform/encoders/org_policy_policy.go.erb rename tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl => mmv1/templates/terraform/examples/org_policy_policy_dry_run_spec.tf.erb (79%) create mode 100644 mmv1/templates/terraform/examples/org_policy_policy_enforce.tf.erb create mode 100644 mmv1/templates/terraform/examples/org_policy_policy_folder.tf.erb create mode 100644 mmv1/templates/terraform/examples/org_policy_policy_organization.tf.erb create mode 100644 mmv1/templates/terraform/examples/org_policy_policy_project.tf.erb create mode 100644 mmv1/templates/terraform/pre_read/org_policy_policy.go.erb create mode 100644 mmv1/templates/terraform/pre_update/org_policy_policy.go.tmpl create mode 100644 mmv1/third_party/terraform/services/orgpolicy/resource_org_policy_policy_test.go delete mode 100755 tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.folder.json delete mode 100755 tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.project.json delete mode 100755 tpgtools/api/orgpolicy/samples/enforce.policy.json delete mode 100755 tpgtools/api/orgpolicy/samples/enforce_policy.yaml delete mode 100755 tpgtools/api/orgpolicy/samples/folder.policy.json delete mode 100755 tpgtools/api/orgpolicy/samples/folder_policy.yaml delete mode 100755 tpgtools/api/orgpolicy/samples/organization.policy.json delete mode 100644 tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json delete mode 100644 tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml delete mode 100755 tpgtools/api/orgpolicy/samples/organization_policy.yaml delete mode 100755 tpgtools/api/orgpolicy/samples/project.policy.json delete mode 100755 tpgtools/api/orgpolicy/samples/project_policy.yaml delete mode 100755 tpgtools/api/orgpolicy/samples/update_enforce.policy.json delete mode 100755 tpgtools/api/orgpolicy/samples/update_folder.policy.json delete mode 100755 tpgtools/api/orgpolicy/samples/update_organization.policy.json delete mode 100644 tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json delete mode 100755 tpgtools/api/orgpolicy/samples/update_project.policy.json delete mode 100644 tpgtools/overrides/orgpolicy/beta/policy.yaml delete mode 100644 tpgtools/overrides/orgpolicy/beta/tpgtools_product.yaml delete mode 100644 tpgtools/overrides/orgpolicy/policy.yaml delete mode 100644 tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml delete mode 100644 tpgtools/overrides/orgpolicy/samples/policy/meta.yaml diff --git a/mmv1/products/orgpolicy/Policy.yaml b/mmv1/products/orgpolicy/Policy.yaml new file mode 100644 index 000000000000..0cbd23b4590e --- /dev/null +++ b/mmv1/products/orgpolicy/Policy.yaml @@ -0,0 +1,222 @@ +# Copyright 2023 Google Inc. +# Licensed under the Apache License, Version 2.0 (the License); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: Policy +base_url: '{{parent}}/policies' +create_url: '{{parent}}/policies' +self_link: '{{parent}}/policies/{{name}}' +import_format: + ['{{%parent}}/policies/{{name}}'] +update_verb: :PATCH +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/resource-manager/docs/organization-policy/creating-managing-custom-constraints' + 'Supported Services': 'https://cloud.google.com/resource-manager/docs/organization-policy/custom-constraint-supported-services' + api: 'https://cloud.google.com/resource-manager/docs/reference/orgpolicy/rest/v2/organizations.policies' +description: "Defines an organization policy which is used to specify constraints for configurations of Google Cloud resources." +error_retry_predicates: ['transport_tpg.IsOrgpolicyRetryableError'] +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'org_policy_policy_enforce' + primary_resource_id: 'primary' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'org_policy_policy_folder' + primary_resource_id: 'primary' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'org_policy_policy_organization' + primary_resource_id: 'primary' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'org_policy_policy_project' + primary_resource_id: 'primary' + skip_test: true + - !ruby/object:Provider::Terraform::Examples + name: 'org_policy_policy_dry_run_spec' + primary_resource_id: 'primary' + skip_test: true +custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: templates/terraform/constants/org_policy_policy.go.erb + encoder: templates/terraform/encoders/org_policy_policy.go.erb + pre_read: templates/terraform/pre_read/org_policy_policy.go.erb + pre_update: templates/terraform/pre_update/org_policy_policy.go.tmpl +parameters: + - !ruby/object:Api::Type::String + name: parent + description: The parent of the resource. + url_param_only: true + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' +properties: + - !ruby/object:Api::Type::String + name: name + description: 'Immutable. The resource name of the Policy. Must be one of the following forms, where constraint_name is the name of the constraint which this Policy configures: * `projects/{project_number}/policies/{constraint_name}` * `folders/{folder_id}/policies/{constraint_name}` * `organizations/{organization_id}/policies/{constraint_name}` For example, "projects/123/policies/compute.disableSerialPortAccess". Note: `projects/{project_id}/policies/{constraint_name}` is also an acceptable name for API requests, but responses will return the name using the equivalent project number.' + required: true + immutable: true + ignore_read: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + - !ruby/object:Api::Type::NestedObject + name: spec + description: Basic information about the Organization Policy. + properties: + - !ruby/object:Api::Type::String + name: etag + description: An opaque tag indicating the current version of the `Policy`, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the `Policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current `Policy` to use when executing a read-modify-write loop. When the `Policy` is returned from a `GetEffectivePolicy` request, the `etag` will be unset. + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that `Policy`. + output: true + - !ruby/object:Api::Type::Array + name: rules + description: 'Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set `enforced` to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.' + item_type: !ruby/object:Api::Type::NestedObject + name: rule + description: 'Up to 10 PolicyRules are allowed. In Policies for boolean constraints, the following requirements apply: - There must be one and only one PolicyRule where condition is unset. - BooleanPolicyRules with conditions must set `enforced` to the opposite of the PolicyRule without a condition. - During policy evaluation, PolicyRules with conditions that are true for a target resource take precedence.' + properties: + - !ruby/object:Api::Type::NestedObject + name: values + description: List of values to be used for this PolicyRule. This field can be set only in Policies for list constraints. + properties: + - !ruby/object:Api::Type::Array + name: allowedValues + description: List of values allowed at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: deniedValues + description: List of values denied at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: allowAll + description: Setting this to `"TRUE"` means that all values are allowed. This field can be set only in Policies for list constraints. + custom_expand: 'templates/terraform/custom_expand/enum_bool.go.erb' + custom_flatten: 'templates/terraform/custom_flatten/enum_bool.go.erb' + send_empty_value: true + - !ruby/object:Api::Type::String + name: denyAll + description: Setting this to `"TRUE"` means that all values are denied. This field can be set only in Policies for list constraints. + custom_expand: 'templates/terraform/custom_expand/enum_bool.go.erb' + custom_flatten: 'templates/terraform/custom_flatten/enum_bool.go.erb' + send_empty_value: true + - !ruby/object:Api::Type::String + name: enforce + description: If `"TRUE"`, then the `Policy` is enforced. If `"FALSE"`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints. + custom_expand: 'templates/terraform/custom_expand/enum_bool.go.erb' + custom_flatten: 'templates/terraform/custom_flatten/enum_bool.go.erb' + send_empty_value: true + - !ruby/object:Api::Type::NestedObject + name: condition + description: 'A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr'' must include from 1 to 10 subexpressions, joined by the "||" or "&&" operators. Each subexpression must be of the form "resource.matchTag(''/tag_key_short_name, ''tag_value_short_name'')". or "resource.matchTagId(''tagKeys/key_id'', ''tagValues/value_id'')". where key_name and value_name are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: "resource.matchTag(''123456789/environment, ''prod'')". or "resource.matchTagId(''tagKeys/123'', ''tagValues/456'')".' + properties: + - !ruby/object:Api::Type::String + name: expression + description: Textual representation of an expression in Common Expression Language syntax. + diff_suppress_func: resourceOrgpolicyPolicyRulesConditionExpressionDiffSuppress + - !ruby/object:Api::Type::String + name: title + description: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: description + description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: location + description: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::Boolean + name: inheritFromParent + description: Determines the inheritance behavior for this `Policy`. If `inherit_from_parent` is true, PolicyRules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this Policy becomes the new root for evaluation. This field can be set only for Policies which configure list constraints. + send_empty_value: true + - !ruby/object:Api::Type::Boolean + name: reset + description: Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific `Constraint` at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false. + send_empty_value: true + - !ruby/object:Api::Type::NestedObject + name: dryRunSpec + description: Dry-run policy. Audit-only policy, can be used to monitor how the policy would have impacted the existing and future resources if it's enforced. + properties: + - !ruby/object:Api::Type::String + name: etag + description: An opaque tag indicating the current version of the policy, used for concurrency control. This field is ignored if used in a `CreatePolicy` request. When the policy` is returned from either a `GetPolicy` or a `ListPolicies` request, this `etag` indicates the version of the current policy to use when executing a read-modify-write loop. When the policy is returned from a `GetEffectivePolicy` request, the `etag` will be unset. + output: true + - !ruby/object:Api::Type::String + name: updateTime + description: Output only. The time stamp this was previously updated. This represents the last time a call to `CreatePolicy` or `UpdatePolicy` was made for that policy. + output: true + - !ruby/object:Api::Type::Array + name: rules + description: 'In policies for boolean constraints, the following requirements apply: - There must be one and only one policy rule where condition is unset. - Boolean policy rules with conditions must set `enforced` to the opposite of the policy rule without a condition. - During policy evaluation, policy rules with conditions that are true for a target resource take precedence.' + item_type: !ruby/object:Api::Type::NestedObject + name: rule + description: 'In policies for boolean constraints, the following requirements apply: - There must be one and only one policy rule where condition is unset. - Boolean policy rules with conditions must set `enforced` to the opposite of the policy rule without a condition. - During policy evaluation, policy rules with conditions that are true for a target resource take precedence.' + properties: + - !ruby/object:Api::Type::NestedObject + name: values + description: List of values to be used for this policy rule. This field can be set only in policies for list constraints. + properties: + - !ruby/object:Api::Type::Array + name: allowedValues + description: List of values allowed at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::Array + name: deniedValues + description: List of values denied at this resource. + item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: allowAll + description: Setting this to `"TRUE"` means that all values are allowed. This field can be set only in Policies for list constraints. + custom_expand: 'templates/terraform/custom_expand/enum_bool.go.erb' + custom_flatten: 'templates/terraform/custom_flatten/enum_bool.go.erb' + send_empty_value: true + - !ruby/object:Api::Type::String + name: denyAll + description: Setting this to `"TRUE"` means that all values are denied. This field can be set only in Policies for list constraints. + custom_expand: 'templates/terraform/custom_expand/enum_bool.go.erb' + custom_flatten: 'templates/terraform/custom_flatten/enum_bool.go.erb' + send_empty_value: true + - !ruby/object:Api::Type::String + name: enforce + description: If `"TRUE"`, then the `Policy` is enforced. If `"FALSE"`, then any configuration is acceptable. This field can be set only in Policies for boolean constraints. + custom_expand: 'templates/terraform/custom_expand/enum_bool.go.erb' + custom_flatten: 'templates/terraform/custom_flatten/enum_bool.go.erb' + send_empty_value: true + - !ruby/object:Api::Type::NestedObject + name: condition + description: 'A condition which determines whether this rule is used in the evaluation of the policy. When set, the `expression` field in the `Expr'' must include from 1 to 10 subexpressions, joined by the "||" or "&&" operators. Each subexpression must be of the form "resource.matchTag(''/tag_key_short_name, ''tag_value_short_name'')". or "resource.matchTagId(''tagKeys/key_id'', ''tagValues/value_id'')". where key_name and value_name are the resource names for Label Keys and Values. These names are available from the Tag Manager Service. An example expression is: "resource.matchTag(''123456789/environment, ''prod'')". or "resource.matchTagId(''tagKeys/123'', ''tagValues/456'')".' + properties: + - !ruby/object:Api::Type::String + name: expression + description: Textual representation of an expression in Common Expression Language syntax. + diff_suppress_func: resourceOrgpolicyPolicyRulesConditionExpressionDiffSuppress + - !ruby/object:Api::Type::String + name: title + description: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. + - !ruby/object:Api::Type::String + name: description + description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. + - !ruby/object:Api::Type::String + name: location + description: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. + - !ruby/object:Api::Type::Boolean + name: inheritFromParent + description: Determines the inheritance behavior for this policy. If `inherit_from_parent` is true, policy rules set higher up in the hierarchy (up to the closest root) are inherited and present in the effective policy. If it is false, then no rules are inherited, and this policy becomes the new root for evaluation. This field can be set only for policies which configure list constraints. + send_empty_value: true + - !ruby/object:Api::Type::Boolean + name: reset + description: Ignores policies set above this resource and restores the `constraint_default` enforcement behavior of the specific constraint at this resource. This field can be set in policies for either list or boolean constraints. If set, `rules` must be empty and `inherit_from_parent` must be set to false. + send_empty_value: true + - !ruby/object:Api::Type::String + name: etag + description: Optional. An opaque tag indicating the current state of the policy, used for concurrency control. This 'etag' is computed by the server based on the value of other fields, and may be sent on update and delete requests to ensure the client has an up-to-date value before proceeding. + output: true diff --git a/mmv1/templates/terraform/constants/org_policy_policy.go.erb b/mmv1/templates/terraform/constants/org_policy_policy.go.erb new file mode 100644 index 000000000000..e6bd256ca587 --- /dev/null +++ b/mmv1/templates/terraform/constants/org_policy_policy.go.erb @@ -0,0 +1,5 @@ +func resourceOrgpolicyPolicyRulesConditionExpressionDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + oldReplaced := strings.ReplaceAll(strings.ReplaceAll(old, "Labels", "TagId"), "label", "tag") + newReplaced := strings.ReplaceAll(strings.ReplaceAll(new, "Labels", "TagId"), "label", "tag") + return oldReplaced == newReplaced +} diff --git a/mmv1/templates/terraform/custom_expand/enum_bool.go.erb b/mmv1/templates/terraform/custom_expand/enum_bool.go.erb new file mode 100644 index 000000000000..e8153ccee90e --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/enum_bool.go.erb @@ -0,0 +1,25 @@ +<%- # the license inside this block applies to this file + # Copyright 2021 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-%> +func expand<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + b, err := strconv.ParseBool(v.(string)) + if err != nil { + return nil, nil + } + return b, nil +} diff --git a/mmv1/templates/terraform/custom_flatten/enum_bool.go.erb b/mmv1/templates/terraform/custom_flatten/enum_bool.go.erb new file mode 100644 index 000000000000..ef3917b5f086 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/enum_bool.go.erb @@ -0,0 +1,6 @@ +func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return "" + } + return strings.ToUpper(strconv.FormatBool(v.(bool))) +} diff --git a/mmv1/templates/terraform/encoders/org_policy_policy.go.erb b/mmv1/templates/terraform/encoders/org_policy_policy.go.erb new file mode 100644 index 000000000000..249fa5ab61c7 --- /dev/null +++ b/mmv1/templates/terraform/encoders/org_policy_policy.go.erb @@ -0,0 +1,17 @@ +<%- # the license inside this block applies to this file + # Copyright 2024 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-%> +name := d.Get("name").(string) +d.Set("name", tpgresource.GetResourceNameFromSelfLink(name)) +return obj, nil diff --git a/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl b/mmv1/templates/terraform/examples/org_policy_policy_dry_run_spec.tf.erb similarity index 79% rename from tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl rename to mmv1/templates/terraform/examples/org_policy_policy_dry_run_spec.tf.erb index 1c66f04c389b..a5a1bc790d2c 100644 --- a/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.tf.tmpl +++ b/mmv1/templates/terraform/examples/org_policy_policy_dry_run_spec.tf.erb @@ -1,6 +1,6 @@ resource "google_org_policy_custom_constraint" "constraint" { name = "custom.disableGkeAutoUpgrade%{random_suffix}" - parent = "organizations/{{org_id}}" + parent = "organizations/123456789" display_name = "Disable GKE auto upgrade" description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." @@ -11,8 +11,8 @@ resource "google_org_policy_custom_constraint" "constraint" { } resource "google_org_policy_policy" "primary" { - name = "organizations/{{org_id}}/policies/${google_org_policy_custom_constraint.constraint.name}" - parent = "organizations/{{org_id}}" + name = "organizations/123456789/policies/${google_org_policy_custom_constraint.constraint.name}" + parent = "organizations/123456789" spec { rules { diff --git a/mmv1/templates/terraform/examples/org_policy_policy_enforce.tf.erb b/mmv1/templates/terraform/examples/org_policy_policy_enforce.tf.erb new file mode 100644 index 000000000000..fa739896cb30 --- /dev/null +++ b/mmv1/templates/terraform/examples/org_policy_policy_enforce.tf.erb @@ -0,0 +1,16 @@ +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/iam.disableServiceAccountKeyUpload" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + enforce = "FALSE" + } + } +} + +resource "google_project" "basic" { + project_id = "id" + name = "id" + org_id = "123456789" +} diff --git a/mmv1/templates/terraform/examples/org_policy_policy_folder.tf.erb b/mmv1/templates/terraform/examples/org_policy_policy_folder.tf.erb new file mode 100644 index 000000000000..c924f1226c53 --- /dev/null +++ b/mmv1/templates/terraform/examples/org_policy_policy_folder.tf.erb @@ -0,0 +1,17 @@ +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = true + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/123456789" + display_name = "folder" +} diff --git a/mmv1/templates/terraform/examples/org_policy_policy_organization.tf.erb b/mmv1/templates/terraform/examples/org_policy_policy_organization.tf.erb new file mode 100644 index 000000000000..d5d73b2ac6c7 --- /dev/null +++ b/mmv1/templates/terraform/examples/org_policy_policy_organization.tf.erb @@ -0,0 +1,8 @@ +resource "google_org_policy_policy" "primary" { + name = "organizations/123456789/policies/gcp.detailedAuditLoggingMode" + parent = "organizations/123456789" + + spec { + reset = true + } +} diff --git a/mmv1/templates/terraform/examples/org_policy_policy_project.tf.erb b/mmv1/templates/terraform/examples/org_policy_policy_project.tf.erb new file mode 100644 index 000000000000..de076a51c3f9 --- /dev/null +++ b/mmv1/templates/terraform/examples/org_policy_policy_project.tf.erb @@ -0,0 +1,30 @@ +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + location = "sample-location.log" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "id" + name = "id" + org_id = "123456789" +} diff --git a/mmv1/templates/terraform/pre_read/org_policy_policy.go.erb b/mmv1/templates/terraform/pre_read/org_policy_policy.go.erb new file mode 100644 index 000000000000..eea754c96478 --- /dev/null +++ b/mmv1/templates/terraform/pre_read/org_policy_policy.go.erb @@ -0,0 +1,21 @@ +<%- # the license inside this block applies to this file + # Copyright 2024 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-%> +// Call the encoder to shorten name even if the resource is created by another provider version. +resourceOrgPolicyPolicyEncoder(d, nil, nil) +// Re-create the url. +url, err = tpgresource.ReplaceVars(d, config, "{{OrgPolicyBasePath}}{{parent}}/policies/{{name}}") +if err != nil { + return err +} diff --git a/mmv1/templates/terraform/pre_update/org_policy_policy.go.tmpl b/mmv1/templates/terraform/pre_update/org_policy_policy.go.tmpl new file mode 100644 index 000000000000..325e52fd16b6 --- /dev/null +++ b/mmv1/templates/terraform/pre_update/org_policy_policy.go.tmpl @@ -0,0 +1,30 @@ +<%- # the license inside this block applies to this file + # Copyright 2024 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-%> +updateMask := []string{} + +if d.HasChange("spec") { + updateMask = append(updateMask, "policy.spec") +} + +if d.HasChange("dry_run_spec") { + updateMask = append(updateMask, "policy.dryRunSpec") +} + +// updateMask is a URL parameter but not present in the schema, so ReplaceVars +// won't set it +url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) +if err != nil { + return err +} diff --git a/mmv1/third_party/terraform/services/orgpolicy/resource_org_policy_policy_test.go b/mmv1/third_party/terraform/services/orgpolicy/resource_org_policy_policy_test.go new file mode 100644 index 000000000000..078705bd22d9 --- /dev/null +++ b/mmv1/third_party/terraform/services/orgpolicy/resource_org_policy_policy_test.go @@ -0,0 +1,455 @@ +package orgpolicy_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func TestAccOrgPolicyPolicy_EnforcePolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_EnforcePolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_FolderPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_FolderPolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + { + Config: testAccOrgPolicyPolicy_FolderPolicyUpdate0(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_OrganizationPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_OrganizationPolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + { + Config: testAccOrgPolicyPolicy_OrganizationPolicyUpdate0(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_ProjectPolicy(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_ProjectPolicy(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + { + Config: testAccOrgPolicyPolicy_ProjectPolicyUpdate0(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} +func TestAccOrgPolicyPolicy_DryRunSpecHandWritten(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckOrgPolicyPolicyDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccOrgPolicyPolicy_DryRunSpecHandWritten(context), + }, + { + ResourceName: "google_org_policy_policy.primary", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "spec.0.rules.0.condition.0.expression"}, + }, + }, + }) +} + +func testAccOrgPolicyPolicy_EnforcePolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/iam.disableServiceAccountKeyUpload" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + enforce = "FALSE" + } + } +} + +resource "google_project" "basic" { + project_id = "tf-test-id%{random_suffix}" + name = "tf-test-id%{random_suffix}" + org_id = "%{org_id}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_FolderPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = true + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder%{random_suffix}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_FolderPolicyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = false + + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/%{org_id}" + display_name = "tf-test-folder%{random_suffix}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_OrganizationPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.tfTest%{random_suffix}" + parent = "organizations/%{org_id}" + display_name = "Disable GKE auto upgrade" + description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." + + action_type = "ALLOW" + condition = "resource.management.autoUpgrade == false" + method_types = ["CREATE", "UPDATE"] + resource_types = ["container.googleapis.com/NodePool"] +} + +resource "google_org_policy_policy" "primary" { + name = "organizations/%{org_id}/policies/${google_org_policy_custom_constraint.constraint.name}" + parent = "organizations/%{org_id}" + + spec { + reset = true + } +} +`, context) +} + +func testAccOrgPolicyPolicy_OrganizationPolicyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.tfTest%{random_suffix}" + parent = "organizations/%{org_id}" + display_name = "Disable GKE auto upgrade" + description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." + + action_type = "ALLOW" + condition = "resource.management.autoUpgrade == false" + method_types = ["CREATE", "UPDATE"] + resource_types = ["container.googleapis.com/NodePool"] +} + +resource "google_org_policy_policy" "primary" { + name = "organizations/%{org_id}/policies/${google_org_policy_custom_constraint.constraint.name}" + parent = "organizations/%{org_id}" + + spec { + reset = false + + rules { + enforce = "TRUE" + } + } +} +`, context) +} + +func testAccOrgPolicyPolicy_ProjectPolicy(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + location = "sample-location.log" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "tf-test-id%{random_suffix}" + name = "tf-test-id%{random_suffix}" + org_id = "%{org_id}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_ProjectPolicyUpdate0(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A new sample condition for the policy" + expression = "false" + location = "new-sample-location.log" + title = "new-sample-condition" + } + + values { + allowed_values = ["projects/new-allowed-project"] + denied_values = ["projects/new-denied-project"] + } + } + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "tf-test-id%{random_suffix}" + name = "tf-test-id%{random_suffix}" + org_id = "%{org_id}" +} + + +`, context) +} + +func testAccOrgPolicyPolicy_DryRunSpecHandWritten(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.disableGkeAutoUpgrade%{random_suffix}" + parent = "organizations/%{org_id}" + display_name = "Disable GKE auto upgrade" + description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." + + action_type = "ALLOW" + condition = "resource.management.autoUpgrade == false" + method_types = ["CREATE"] + resource_types = ["container.googleapis.com/NodePool"] +} + +resource "google_org_policy_policy" "primary" { + name = "organizations/%{org_id}/policies/${google_org_policy_custom_constraint.constraint.name}" + parent = "organizations/%{org_id}" + + spec { + rules { + enforce = "FALSE" + } + } + dry_run_spec { + inherit_from_parent = false + reset = false + rules { + enforce = "FALSE" + } + } +} + +`, context) +} + +func testAccCheckOrgPolicyPolicyDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_org_policy_policy" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := acctest.GoogleProviderConfig(t) + + url, err := tpgresource.ReplaceVarsForTest(config, rs, "{{OrgPolicyBasePath}}{{parent}}/policies/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "GET", + Project: billingProject, + RawURL: url, + UserAgent: config.UserAgent, + ErrorRetryPredicates: []transport_tpg.RetryErrorPredicateFunc{transport_tpg.IsOrgpolicyRetryableError}, + }) + if err == nil { + return fmt.Errorf("OrgPolicyPolicy still exists at %s", url) + } + } + + return nil + } +} diff --git a/mmv1/third_party/terraform/transport/error_retry_predicates.go b/mmv1/third_party/terraform/transport/error_retry_predicates.go index ff42b4458c81..5f9c9c3ae6cd 100644 --- a/mmv1/third_party/terraform/transport/error_retry_predicates.go +++ b/mmv1/third_party/terraform/transport/error_retry_predicates.go @@ -492,6 +492,21 @@ func IsAppEngineRetryableError(err error) (bool, string) { return false, "" } +// Retry if Orgpolicy operation returns a 403 with a specific message +// indicating the parent resource does not exist. +func IsOrgpolicyRetryableError(err error) (bool, string) { + if gerr, ok := err.(*googleapi.Error); ok { + if gerr.Code != 403 { + return false, "" + } + pattern := regexp.MustCompile("Permission 'orgpolicy\\.policy\\.[a-z]*' denied on resource '//[a-z]*\\.googleapis\\.com/(projects|folders)/[a-z0-9-]*/policies/[a-zA-Z.]*' \\(or it may not exist\\)\\.") + if pattern.MatchString(gerr.Body) { + return true, "Waiting for parent resource to be ready" + } + } + return false, "" +} + // Bigtable uses gRPC and thus does not return errors of type *googleapi.Error. // Instead the errors returned are *status.Error. See the types of codes returned // here (https://pkg.go.dev/google.golang.org/grpc/codes#Code). diff --git a/tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.folder.json b/tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.folder.json deleted file mode 100755 index be10e310985f..000000000000 --- a/tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.folder.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "parent": "organizations/{{org_id}}", - "displayName": "{{folder}}" -} diff --git a/tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.project.json b/tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.project.json deleted file mode 100755 index ecb8396416a9..000000000000 --- a/tpgtools/api/orgpolicy/samples/basic.cloudresourcemanager.project.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "name": "{{id}}", - "parent": "organizations/{{org_id}}" -} diff --git a/tpgtools/api/orgpolicy/samples/enforce.policy.json b/tpgtools/api/orgpolicy/samples/enforce.policy.json deleted file mode 100755 index 78ec2b9f8647..000000000000 --- a/tpgtools/api/orgpolicy/samples/enforce.policy.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}/policies/iam.disableServiceAccountKeyUpload", - "parent": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}", - "spec": { - "rules": [ - { - "enforce": false - } - ] - } -} diff --git a/tpgtools/api/orgpolicy/samples/enforce_policy.yaml b/tpgtools/api/orgpolicy/samples/enforce_policy.yaml deleted file mode 100755 index 2527f30a897a..000000000000 --- a/tpgtools/api/orgpolicy/samples/enforce_policy.yaml +++ /dev/null @@ -1,27 +0,0 @@ -# Copyright 2021 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -dependencies: -- samples/basic.cloudresourcemanager.project.json -description: A test of an enforce orgpolicy policy for a project -name: enforce_policy -resource: samples/enforce.policy.json -type: policy -variables: -- name: id - type: resource_name -- name: org_id - type: org_id -versions: -- ga -- beta diff --git a/tpgtools/api/orgpolicy/samples/folder.policy.json b/tpgtools/api/orgpolicy/samples/folder.policy.json deleted file mode 100755 index 37f224f72fce..000000000000 --- a/tpgtools/api/orgpolicy/samples/folder.policy.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "{{ref:basic.cloudresourcemanager.folder.json:name}}/policies/gcp.resourceLocations", - "parent": "{{ref:basic.cloudresourcemanager.folder.json:name}}", - "spec": { - "rules": [ - { - "denyAll": true - } - ], - "inheritFromParent": true - } -} diff --git a/tpgtools/api/orgpolicy/samples/folder_policy.yaml b/tpgtools/api/orgpolicy/samples/folder_policy.yaml deleted file mode 100755 index 4a43fbb9052b..000000000000 --- a/tpgtools/api/orgpolicy/samples/folder_policy.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2021 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -dependencies: -- samples/basic.cloudresourcemanager.folder.json -description: A test of an orgpolicy policy for a folder -name: folder_policy -resource: samples/folder.policy.json -type: policy -updates: -- dependencies: - - samples/basic.cloudresourcemanager.folder.json - resource: samples/update_folder.policy.json -variables: -- name: org_id - type: org_id -- name: folder - type: resource_name -versions: -- ga -- beta diff --git a/tpgtools/api/orgpolicy/samples/organization.policy.json b/tpgtools/api/orgpolicy/samples/organization.policy.json deleted file mode 100755 index 7166fc4138fa..000000000000 --- a/tpgtools/api/orgpolicy/samples/organization.policy.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "organizations/{{org_id}}/policies/gcp.detailedAuditLoggingMode", - "parent": "organizations/{{org_id}}", - "spec": { - "reset": true - } -} diff --git a/tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json b/tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json deleted file mode 100644 index ef93418792e8..000000000000 --- a/tpgtools/api/orgpolicy/samples/organization_dry_run.policy.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "name": "organizations/{{org_id}}/policies/gcp.resourceLocations", - "parent": "organizations/{{org_id}}", - "dryRunSpec": { - "rules": [ - { - "denyAll": true - } - ], - "reset": true, - "inheritFromParent": false - } -} \ No newline at end of file diff --git a/tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml b/tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml deleted file mode 100644 index 809c10dde32e..000000000000 --- a/tpgtools/api/orgpolicy/samples/organization_dry_run_policy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2023 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -name: organization_dry_run_policy -description: A test of an dry run policy for an organization -type: policy -versions: -- ga -- beta -resource: samples/organization_dry_run.policy.json -updates: -- resource: samples/update_organization_dry_run.policy.json - dependencies: [] -variables: -- name: org_id - type: org_id \ No newline at end of file diff --git a/tpgtools/api/orgpolicy/samples/organization_policy.yaml b/tpgtools/api/orgpolicy/samples/organization_policy.yaml deleted file mode 100755 index 9a8425662e39..000000000000 --- a/tpgtools/api/orgpolicy/samples/organization_policy.yaml +++ /dev/null @@ -1,26 +0,0 @@ -# Copyright 2021 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -description: A test of an orgpolicy policy for an organization -name: organization_policy -resource: samples/organization.policy.json -type: policy -updates: -- dependencies: [] - resource: samples/update_organization.policy.json -variables: -- name: org_id - type: org_id -versions: -- ga -- beta diff --git a/tpgtools/api/orgpolicy/samples/project.policy.json b/tpgtools/api/orgpolicy/samples/project.policy.json deleted file mode 100755 index c09ff64a34ab..000000000000 --- a/tpgtools/api/orgpolicy/samples/project.policy.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "name": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}/policies/gcp.resourceLocations", - "parent": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}", - "spec": { - "rules": [ - { - "condition": { - "title": "sample-condition", - "description": "A sample condition for the policy", - "expression": "resource.matchLabels('labelKeys/123', 'labelValues/345')", - "location": "sample-location.log" - }, - "values": { - "allowedValues": ["projects/allowed-project"], - "deniedValues": ["projects/denied-project"] - } - }, - { - "allowAll": true - } - ] - } -} diff --git a/tpgtools/api/orgpolicy/samples/project_policy.yaml b/tpgtools/api/orgpolicy/samples/project_policy.yaml deleted file mode 100755 index 07de469a48ee..000000000000 --- a/tpgtools/api/orgpolicy/samples/project_policy.yaml +++ /dev/null @@ -1,31 +0,0 @@ -# Copyright 2021 Google LLC. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -dependencies: -- samples/basic.cloudresourcemanager.project.json -description: A test of an orgpolicy policy for a project -name: project_policy -resource: samples/project.policy.json -type: policy -updates: -- dependencies: - - samples/basic.cloudresourcemanager.project.json - resource: samples/update_project.policy.json -variables: -- name: id - type: resource_name -- name: org_id - type: org_id -versions: -- ga -- beta diff --git a/tpgtools/api/orgpolicy/samples/update_enforce.policy.json b/tpgtools/api/orgpolicy/samples/update_enforce.policy.json deleted file mode 100755 index 93c20359349a..000000000000 --- a/tpgtools/api/orgpolicy/samples/update_enforce.policy.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "name": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}/policies/iam.disableServiceAccountKeyUpload", - "parent": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}", - "spec": { - "rules": [ - { - "enforce": true - } - ] - } -} diff --git a/tpgtools/api/orgpolicy/samples/update_folder.policy.json b/tpgtools/api/orgpolicy/samples/update_folder.policy.json deleted file mode 100755 index 5fa14597ba24..000000000000 --- a/tpgtools/api/orgpolicy/samples/update_folder.policy.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "name": "{{ref:basic.cloudresourcemanager.folder.json:name}}/policies/gcp.resourceLocations", - "parent": "{{ref:basic.cloudresourcemanager.folder.json:name}}", - "spec": { - "rules": [ - { - "condition": { - "title": "sample-condition", - "description": "A sample condition for the policy", - "expression": "resource.matchLabels('labelKeys/123', 'labelValues/345')" - }, - "values": { - "allowedValues": ["projects/allowed-project"], - "deniedValues": ["projects/denied-project"] - } - }, - { - "allowAll": true - } - ], - "inheritFromParent": false - } -} diff --git a/tpgtools/api/orgpolicy/samples/update_organization.policy.json b/tpgtools/api/orgpolicy/samples/update_organization.policy.json deleted file mode 100755 index e3e9b3d83f01..000000000000 --- a/tpgtools/api/orgpolicy/samples/update_organization.policy.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "name": "organizations/{{org_id}}/policies/gcp.detailedAuditLoggingMode", - "parent": "organizations/{{org_id}}", - "spec": { - "rules": [ - { - "enforce": true - } - ], - "reset": false - } -} diff --git a/tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json b/tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json deleted file mode 100644 index fa4c032116be..000000000000 --- a/tpgtools/api/orgpolicy/samples/update_organization_dry_run.policy.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "name": "organizations/{{org_id}}/policies/gcp.resourceLocations", - "parent": "organizations/{{org_id}}", - "dryRunSpec": { - "rules": [ - { - "allowAll": true, - "enforce": true - } - ], - "reset": false, - "inheritFromParent": true - } -} \ No newline at end of file diff --git a/tpgtools/api/orgpolicy/samples/update_project.policy.json b/tpgtools/api/orgpolicy/samples/update_project.policy.json deleted file mode 100755 index 3e64a93e30ac..000000000000 --- a/tpgtools/api/orgpolicy/samples/update_project.policy.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "name": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}/policies/gcp.resourceLocations", - "parent": "projects/{{ref:basic.cloudresourcemanager.project.json:name}}", - "spec": { - "rules": [ - { - "condition": { - "title": "new-sample-condition", - "description": "A new sample condition for the policy", - "expression": "false", - "location": "new-sample-location.log" - }, - "values": { - "allowedValues": ["projects/new-allowed-project"], - "deniedValues": ["projects/new-denied-project"] - } - }, - { - "denyAll": true - } - ] - } -} diff --git a/tpgtools/overrides/orgpolicy/beta/policy.yaml b/tpgtools/overrides/orgpolicy/beta/policy.yaml deleted file mode 100644 index 08ff9e6b371f..000000000000 --- a/tpgtools/overrides/orgpolicy/beta/policy.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- type: CUSTOM_IMPORT_FUNCTION - details: - function: tpgdclresource.ResourceOrgPolicyPolicyCustomImport -- type: ENUM_BOOL - field: spec.rules.allow_all -- type: ENUM_BOOL - field: spec.rules.deny_all -- type: ENUM_BOOL - field: spec.rules.enforce -- type: ENUM_BOOL - field: dry_run_spec.rules.allow_all -- type: ENUM_BOOL - field: dry_run_spec.rules.deny_all -- type: ENUM_BOOL - field: dry_run_spec.rules.enforce \ No newline at end of file diff --git a/tpgtools/overrides/orgpolicy/beta/tpgtools_product.yaml b/tpgtools/overrides/orgpolicy/beta/tpgtools_product.yaml deleted file mode 100644 index 21c439217d43..000000000000 --- a/tpgtools/overrides/orgpolicy/beta/tpgtools_product.yaml +++ /dev/null @@ -1,5 +0,0 @@ -## product level overrides - -- type: PRODUCT_BASE_PATH - details: - skip: true diff --git a/tpgtools/overrides/orgpolicy/policy.yaml b/tpgtools/overrides/orgpolicy/policy.yaml deleted file mode 100644 index 08ff9e6b371f..000000000000 --- a/tpgtools/overrides/orgpolicy/policy.yaml +++ /dev/null @@ -1,15 +0,0 @@ -- type: CUSTOM_IMPORT_FUNCTION - details: - function: tpgdclresource.ResourceOrgPolicyPolicyCustomImport -- type: ENUM_BOOL - field: spec.rules.allow_all -- type: ENUM_BOOL - field: spec.rules.deny_all -- type: ENUM_BOOL - field: spec.rules.enforce -- type: ENUM_BOOL - field: dry_run_spec.rules.allow_all -- type: ENUM_BOOL - field: dry_run_spec.rules.deny_all -- type: ENUM_BOOL - field: dry_run_spec.rules.enforce \ No newline at end of file diff --git a/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml b/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml deleted file mode 100644 index 48afc1aedb93..000000000000 --- a/tpgtools/overrides/orgpolicy/samples/policy/dry_run_spec.yaml +++ /dev/null @@ -1,3 +0,0 @@ -variables: - - name: "org_id" - type: "org_id" \ No newline at end of file diff --git a/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml b/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml deleted file mode 100644 index fe632768fe17..000000000000 --- a/tpgtools/overrides/orgpolicy/samples/policy/meta.yaml +++ /dev/null @@ -1,8 +0,0 @@ -ignore_read: - - name - - "spec.0.rules.0.condition.0.expression" -# The feature for this sample is not ready -test_hide: -- organization_dry_run_policy.yaml -doc_hide: -- organization_dry_run_policy.yaml From 621ab00228e2402416bf75276bca5fb6080cdadb Mon Sep 17 00:00:00 2001 From: Sam Levenick Date: Tue, 2 Jul 2024 16:54:11 -0400 Subject: [PATCH 268/356] Update membership.go (#11099) --- .ci/magician/github/membership.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index d320a8c6a1ca..c38a935c1360 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -62,11 +62,6 @@ var ( startDate: newDate(2024, 4, 11, pdtLoc), endDate: newDate(2024, 6, 14, pdtLoc), }, - { - id: "slevenick", - startDate: newDate(2024, 4, 20, pdtLoc), - endDate: newDate(2024, 4, 27, pdtLoc), - }, { id: "ScottSuarez", startDate: newDate(2024, 4, 30, pdtLoc), @@ -87,6 +82,11 @@ var ( startDate: newDate(2024, 6, 26, pdtLoc), endDate: newDate(2024, 7, 22, pdtLoc), }, + { + id: "slevenick", + startDate: newDate(2024, 7, 5, pdtLoc), + endDate: newDate(2024, 7, 16, pdtLoc), + }, } ) From baa571435805ea0fa62fce56aad5230a8533d71f Mon Sep 17 00:00:00 2001 From: kkram01 Date: Wed, 3 Jul 2024 02:48:16 +0530 Subject: [PATCH 269/356] add project_number field to featureview (#10857) --- .../FeatureOnlineStoreFeatureview.yaml | 16 ++ ..._feature_view_ignore_project_number.go.erb | 17 +++ ...linestore_featureview_cross_project.tf.erb | 141 ++++++++++++++++++ 3 files changed, 174 insertions(+) create mode 100644 mmv1/templates/terraform/custom_flatten/vertex_ai_feature_view_ignore_project_number.go.erb create mode 100644 mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_cross_project.tf.erb diff --git a/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml b/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml index ef350a823964..cd9503c51069 100644 --- a/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml +++ b/mmv1/products/vertexai/FeatureOnlineStoreFeatureview.yaml @@ -60,6 +60,17 @@ examples: primary_resource_id: 'featureview_featureregistry' vars: name: 'example_feature_view_feature_registry' + - !ruby/object:Provider::Terraform::Examples + name: 'vertex_ai_featureonlinestore_featureview_cross_project' + external_providers: ["time"] + primary_resource_id: 'cross_project_featureview' + ignore_read_extra: + - feature_registry_source.0.project_number + test_env_vars: + org_id: :ORG_ID + billing_account: :BILLING_ACCT + vars: + name: 'example_cross_project_featureview' - !ruby/object:Provider::Terraform::Examples name: 'vertex_ai_featureonlinestore_featureview_with_vector_search' primary_resource_id: 'featureview_vector_search' @@ -160,6 +171,11 @@ properties: description: | Identifiers of features under the feature group. item_type: Api::Type::String + - !ruby/object:Api::Type::String + name: 'projectNumber' + description: | + The project number of the parent project of the feature Groups. + custom_flatten: templates/terraform/custom_flatten/vertex_ai_feature_view_ignore_project_number.go.erb - !ruby/object:Api::Type::NestedObject name: 'vectorSearchConfig' description: | diff --git a/mmv1/templates/terraform/custom_flatten/vertex_ai_feature_view_ignore_project_number.go.erb b/mmv1/templates/terraform/custom_flatten/vertex_ai_feature_view_ignore_project_number.go.erb new file mode 100644 index 000000000000..f932939c0ab2 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/vertex_ai_feature_view_ignore_project_number.go.erb @@ -0,0 +1,17 @@ +<%# The license inside this block applies to this file. + # Copyright 2023 Google Inc. + # Licensed under the Apache License, Version 2.0 (the "License"); + # you may not use this file except in compliance with the License. + # You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. +-%> +func flatten<%= prefix -%><%= titlelize_property(property) -%>(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("feature_registry_source.0.project_number") +} diff --git a/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_cross_project.tf.erb b/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_cross_project.tf.erb new file mode 100644 index 000000000000..cd3b9f230e57 --- /dev/null +++ b/mmv1/templates/terraform/examples/vertex_ai_featureonlinestore_featureview_cross_project.tf.erb @@ -0,0 +1,141 @@ +data "google_project" "test_project" { +} + +resource "google_project" "project" { + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "<%= ctx[:test_env_vars]['org_id'] %>" + billing_account = "<%= ctx[:test_env_vars]['billing_account'] %>" +} + +resource "time_sleep" "wait_60_seconds" { + depends_on = [google_project.project] + + create_duration = "60s" +} + +resource "time_sleep" "wait_30_seconds" { + depends_on = [google_bigquery_dataset_iam_member.viewer] + + create_duration = "30s" +} + +resource "google_project_service" "vertexai" { + service = "aiplatform.googleapis.com" + project = google_project.project.project_id + timeouts { + create = "30m" + update = "40m" + } + disable_on_destroy = false + # Needed for CI tests for permissions to propagate, should not be needed for actual usage + depends_on = [time_sleep.wait_60_seconds] +} + +resource "google_bigquery_dataset_iam_member" "viewer" { + project = data.google_project.test_project.project_id + dataset_id = google_bigquery_dataset.sample_dataset.dataset_id + role = "roles/bigquery.dataViewer" + member = "serviceAccount:service-${google_project.project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com" + depends_on = [google_vertex_ai_feature_online_store.featureonlinestore] +} + +resource "google_vertex_ai_feature_online_store" "featureonlinestore" { + name = "<%= ctx[:vars]['name'] %>" + project = google_project.project.project_id + labels = { + foo = "bar" + } + region = "us-central1" + bigtable { + auto_scaling { + min_node_count = 1 + max_node_count = 2 + cpu_utilization_target = 80 + } + } + depends_on = [google_project_service.vertexai] +} + +resource "google_bigquery_dataset" "sample_dataset" { + dataset_id = "<%= ctx[:vars]['name'] %>" + friendly_name = "test" + description = "This is a test description" + location = "US" +} + +resource "google_bigquery_table" "sample_table" { + deletion_protection = false + dataset_id = google_bigquery_dataset.sample_dataset.dataset_id + table_id = "<%= ctx[:vars]['name'] %>" + + schema = <", + "type": "STRING", + "mode": "NULLABLE" + }, + { + "name": "feature_timestamp", + "type": "TIMESTAMP", + "mode": "NULLABLE" + } +] +EOF +} + +resource "google_vertex_ai_feature_group" "sample_feature_group" { + name = "<%= ctx[:vars]['name'] %>" + description = "A sample feature group" + region = "us-central1" + labels = { + label-one = "value-one" + } + big_query { + big_query_source { + # The source table must have a column named 'feature_timestamp' of type TIMESTAMP. + input_uri = "bq://${google_bigquery_table.sample_table.project}.${google_bigquery_table.sample_table.dataset_id}.${google_bigquery_table.sample_table.table_id}" + } + entity_id_columns = ["feature_id"] + } +} + + + +resource "google_vertex_ai_feature_group_feature" "sample_feature" { + name = "<%= ctx[:vars]['name'] %>" + region = "us-central1" + feature_group = google_vertex_ai_feature_group.sample_feature_group.name + description = "A sample feature" + labels = { + label-one = "value-one" + } +} + + +resource "google_vertex_ai_feature_online_store_featureview" "<%= ctx[:primary_resource_id] %>" { + name = "<%= ctx[:vars]['name'] %>" + project = google_project.project.project_id + region = "us-central1" + feature_online_store = google_vertex_ai_feature_online_store.featureonlinestore.name + sync_config { + cron = "0 0 * * *" + } + feature_registry_source { + + feature_groups { + feature_group_id = google_vertex_ai_feature_group.sample_feature_group.name + feature_ids = [google_vertex_ai_feature_group_feature.sample_feature.name] +} + project_number = data.google_project.test_project.number + + } + depends_on = [google_project_service.vertexai, time_sleep.wait_30_seconds] +} + From d102e14626fdbc2e8bda31cb60a36c4681bdff9c Mon Sep 17 00:00:00 2001 From: Maurice Wittek Date: Wed, 3 Jul 2024 17:59:08 +0200 Subject: [PATCH 270/356] Move Globaladdress.Labels to GA (#11093) --- mmv1/products/compute/GlobalAddress.yaml | 1 - mmv1/products/compute/go_GlobalAddress.yaml | 1 - 2 files changed, 2 deletions(-) diff --git a/mmv1/products/compute/GlobalAddress.yaml b/mmv1/products/compute/GlobalAddress.yaml index de7ef1a6bd01..c268004792ac 100644 --- a/mmv1/products/compute/GlobalAddress.yaml +++ b/mmv1/products/compute/GlobalAddress.yaml @@ -93,7 +93,6 @@ properties: Labels to apply to this address. A list of key->value pairs. update_verb: :POST update_url: 'projects/{{project}}/global/addresses/{{name}}/setLabels' - min_version: beta - !ruby/object:Api::Type::Fingerprint name: 'labelFingerprint' description: | diff --git a/mmv1/products/compute/go_GlobalAddress.yaml b/mmv1/products/compute/go_GlobalAddress.yaml index a53c5b276399..7e56e69dc9e4 100644 --- a/mmv1/products/compute/go_GlobalAddress.yaml +++ b/mmv1/products/compute/go_GlobalAddress.yaml @@ -91,7 +91,6 @@ properties: type: KeyValueLabels description: | Labels to apply to this address. A list of key->value pairs. - min_version: 'beta' immutable: false update_url: 'projects/{{project}}/global/addresses/{{name}}/setLabels' update_verb: 'POST' From 29605d39508a21e65d3f9a97d8ccb373c01e8c04 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 3 Jul 2024 09:20:05 -0700 Subject: [PATCH 271/356] Fix compute images in tests (#11095) --- .../examples/apphub_workload_basic.tf.erb | 2 +- .../examples/apphub_workload_full.tf.erb | 2 +- ...rnal_http_lb_mig_backend_custom_header.tf.erb | 2 +- .../external_ssl_proxy_lb_mig_backend.tf.erb | 2 +- .../external_tcp_proxy_lb_mig_backend.tf.erb | 2 +- ...obal_internal_http_lb_with_mig_backend.tf.erb | 4 ++-- .../examples/go/apphub_workload_basic.tf.tmpl | 2 +- .../examples/go/apphub_workload_full.tf.tmpl | 2 +- ...nal_http_lb_mig_backend_custom_header.tf.tmpl | 2 +- .../go/external_ssl_proxy_lb_mig_backend.tf.tmpl | 2 +- .../go/external_tcp_proxy_lb_mig_backend.tf.tmpl | 2 +- ...bal_internal_http_lb_with_mig_backend.tf.tmpl | 4 ++-- .../go/int_https_lb_https_redirect.tf.tmpl | 4 ++-- .../go/internal_http_lb_with_mig_backend.tf.tmpl | 4 ++-- .../internal_tcp_udp_lb_with_mig_backend.tf.tmpl | 4 ++-- ...ork_services_lb_route_extension_basic.tf.tmpl | 2 +- ...k_services_lb_traffic_extension_basic.tf.tmpl | 2 +- .../regional_external_http_load_balancer.tf.tmpl | 2 +- .../go/target_instance_custom_network.tf.tmpl | 2 +- .../examples/int_https_lb_https_redirect.tf.erb | 4 ++-- .../internal_http_lb_with_mig_backend.tf.erb | 4 ++-- .../internal_tcp_udp_lb_with_mig_backend.tf.erb | 4 ++-- ...work_services_lb_route_extension_basic.tf.erb | 2 +- ...rk_services_lb_traffic_extension_basic.tf.erb | 2 +- .../regional_external_http_load_balancer.tf.erb | 2 +- .../target_instance_custom_network.tf.erb | 2 +- ...ata_source_apphub_discovered_workload_test.go | 2 +- .../apphub/resource_apphub_workload_test.go | 2 +- ...resource_compute_forwarding_rule_test.go.tmpl | 4 ++-- ...pute_instance_from_machine_image_test.go.tmpl | 14 +++++++------- ...source_compute_instance_template_test.go.tmpl | 16 ++++++++-------- .../go/resource_compute_instance_test.go.tmpl | 16 ++++++++-------- ...compute_region_instance_template_test.go.tmpl | 16 ++++++++-------- .../resource_compute_forwarding_rule_test.go.erb | 4 ++-- ...mpute_instance_from_machine_image_test.go.erb | 14 +++++++------- ...esource_compute_instance_template_test.go.erb | 16 ++++++++-------- .../resource_compute_instance_test.go.erb | 16 ++++++++-------- ..._compute_region_instance_template_test.go.erb | 16 ++++++++-------- .../compute/resource_compute_snapshot_test.go | 2 +- ...e_network_services_lb_route_extension_test.go | 4 ++-- ...network_services_lb_traffic_extension_test.go | 4 ++-- 41 files changed, 107 insertions(+), 107 deletions(-) diff --git a/mmv1/templates/terraform/examples/apphub_workload_basic.tf.erb b/mmv1/templates/terraform/examples/apphub_workload_basic.tf.erb index 0c9a57b3ce94..0a18d7b2798f 100644 --- a/mmv1/templates/terraform/examples/apphub_workload_basic.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_workload_basic.tf.erb @@ -84,7 +84,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb b/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb index a81dd6326175..1f33aa4b7f6a 100644 --- a/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb +++ b/mmv1/templates/terraform/examples/apphub_workload_full.tf.erb @@ -105,7 +105,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb b/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb index 64ef8ef62a79..3cad95b5c4d5 100644 --- a/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb +++ b/mmv1/templates/terraform/examples/external_http_lb_mig_backend_custom_header.tf.erb @@ -82,7 +82,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb b/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb index 56a6d98288a8..7c1c975e8038 100644 --- a/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/external_ssl_proxy_lb_mig_backend.tf.erb @@ -120,7 +120,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb b/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb index 98b971e9fd46..519a8e587633 100644 --- a/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/external_tcp_proxy_lb_mig_backend.tf.erb @@ -82,7 +82,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb b/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb index 52c06538d796..021d98ee7b97 100644 --- a/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/global_internal_http_lb_with_mig_backend.tf.erb @@ -84,7 +84,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -177,7 +177,7 @@ resource "google_compute_instance" "vm-test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/go/apphub_workload_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_workload_basic.tf.tmpl index 4f1d7067e14b..9006e8db8ea4 100644 --- a/mmv1/templates/terraform/examples/go/apphub_workload_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_workload_basic.tf.tmpl @@ -84,7 +84,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl index 4a6d65efbcab..9d179903e1a8 100644 --- a/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apphub_workload_full.tf.tmpl @@ -105,7 +105,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl index 1f81adddbce2..488cb2603138 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl @@ -83,7 +83,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl index 19cd2b27e99a..dada3d4e2bfb 100644 --- a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl @@ -121,7 +121,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl index 08408558c900..714938b64518 100644 --- a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl @@ -83,7 +83,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl index e02dd389ad34..cc6d212cfbbc 100644 --- a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl @@ -85,7 +85,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -178,7 +178,7 @@ resource "google_compute_instance" "vm-test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl index 325ae36b4a6e..f0e27c29df46 100644 --- a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl @@ -138,7 +138,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -231,7 +231,7 @@ resource "google_compute_instance" "default" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl index 17b0e5512661..17c4821eb155 100644 --- a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl @@ -90,7 +90,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -184,7 +184,7 @@ resource "google_compute_instance" "vm-test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl index b6fb1079cf3b..d28ddcf47664 100644 --- a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl @@ -60,7 +60,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -173,7 +173,7 @@ resource "google_compute_instance" "vm_test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl index b47eec932fa1..fbb8cf36001d 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl @@ -101,7 +101,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl index abc31633df6b..43b8c0660272 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl @@ -90,7 +90,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl index 7c1bd95b0e04..d4cb8da4e16a 100644 --- a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl @@ -76,7 +76,7 @@ resource "google_compute_instance_template" "default" { boot = true device_name = "persistent-disk-0" mode = "READ_WRITE" - source_image = "projects/debian-cloud/global/images/family/debian-10" + source_image = "projects/debian-cloud/global/images/family/debian-12" type = "PERSISTENT" } labels = { diff --git a/mmv1/templates/terraform/examples/go/target_instance_custom_network.tf.tmpl b/mmv1/templates/terraform/examples/go/target_instance_custom_network.tf.tmpl index 51be27d66939..41d29bc7246a 100644 --- a/mmv1/templates/terraform/examples/go/target_instance_custom_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_instance_custom_network.tf.tmpl @@ -12,7 +12,7 @@ data "google_compute_network" "target-vm" { data "google_compute_image" "vmimage" { provider = google-beta - family = "debian-10" + family = "debian-12" project = "debian-cloud" } diff --git a/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb b/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb index ceb616c816b2..3e140403fc30 100644 --- a/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb +++ b/mmv1/templates/terraform/examples/int_https_lb_https_redirect.tf.erb @@ -137,7 +137,7 @@ resource "google_compute_instance_template" "default" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -230,7 +230,7 @@ resource "google_compute_instance" "default" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb b/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb index 67090495776b..92c56b49f52f 100644 --- a/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/internal_http_lb_with_mig_backend.tf.erb @@ -89,7 +89,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -183,7 +183,7 @@ resource "google_compute_instance" "vm-test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb b/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb index ef673e831077..191acdbb30f3 100644 --- a/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb +++ b/mmv1/templates/terraform/examples/internal_tcp_udp_lb_with_mig_backend.tf.erb @@ -59,7 +59,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -172,7 +172,7 @@ resource "google_compute_instance" "vm_test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb b/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb index 9eb60a35f993..c399190f5aca 100644 --- a/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_services_lb_route_extension_basic.tf.erb @@ -100,7 +100,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb b/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb index 899eaab56e66..5c5a4e78fc55 100644 --- a/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb +++ b/mmv1/templates/terraform/examples/network_services_lb_traffic_extension_basic.tf.erb @@ -90,7 +90,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb b/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb index 59fb83b35491..e8b369119734 100644 --- a/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb +++ b/mmv1/templates/terraform/examples/regional_external_http_load_balancer.tf.erb @@ -64,7 +64,7 @@ resource "google_compute_instance_template" "default" { boot = true device_name = "persistent-disk-0" mode = "READ_WRITE" - source_image = "projects/debian-cloud/global/images/family/debian-10" + source_image = "projects/debian-cloud/global/images/family/debian-12" type = "PERSISTENT" } labels = { diff --git a/mmv1/templates/terraform/examples/target_instance_custom_network.tf.erb b/mmv1/templates/terraform/examples/target_instance_custom_network.tf.erb index 5a2c74e1562a..f84dd6007ac6 100644 --- a/mmv1/templates/terraform/examples/target_instance_custom_network.tf.erb +++ b/mmv1/templates/terraform/examples/target_instance_custom_network.tf.erb @@ -12,7 +12,7 @@ data "google_compute_network" "target-vm" { data "google_compute_image" "vmimage" { provider = google-beta - family = "debian-10" + family = "debian-12" project = "debian-cloud" } diff --git a/mmv1/third_party/terraform/services/apphub/data_source_apphub_discovered_workload_test.go b/mmv1/third_party/terraform/services/apphub/data_source_apphub_discovered_workload_test.go index c305079251a0..58a56b1fb664 100644 --- a/mmv1/third_party/terraform/services/apphub/data_source_apphub_discovered_workload_test.go +++ b/mmv1/third_party/terraform/services/apphub/data_source_apphub_discovered_workload_test.go @@ -101,7 +101,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/third_party/terraform/services/apphub/resource_apphub_workload_test.go b/mmv1/third_party/terraform/services/apphub/resource_apphub_workload_test.go index 2897307d1a93..e5be61d33afe 100644 --- a/mmv1/third_party/terraform/services/apphub/resource_apphub_workload_test.go +++ b/mmv1/third_party/terraform/services/apphub/resource_apphub_workload_test.go @@ -136,7 +136,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl index de0f67775ddc..3c9f8f0598a5 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_forwarding_rule_test.go.tmpl @@ -447,7 +447,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -560,7 +560,7 @@ resource "google_compute_instance" "vm_test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl index 3e43a631427d..99107d8a9b65 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_from_machine_image_test.go.tmpl @@ -251,7 +251,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -306,7 +306,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -368,7 +368,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -422,7 +422,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -481,7 +481,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -552,7 +552,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -629,7 +629,7 @@ resource "google_compute_instance" "vm" { project = google_project.project.project_id boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index 9d4d7dfae60d..86941e061494 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -2606,8 +2606,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_with375GbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { name = "tf-test-instance-template-%s" @@ -2641,8 +2641,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_with18TbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { @@ -3297,8 +3297,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_shieldedVmConfig(suffix string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { @@ -3483,8 +3483,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_enableDisplay(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 3e71b164f462..19d5ecc9b76a 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -6468,7 +6468,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_advancedMachineFeatures(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-10" + family = "debian-12" project = "debian-cloud" } @@ -6496,7 +6496,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_advancedMachineFeaturesUpdated(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-10" + family = "debian-12" project = "debian-cloud" } @@ -7619,8 +7619,8 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_shieldedVmConfig(instance string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance" "foobar" { @@ -7920,8 +7920,8 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_enableDisplay(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance" "foobar" { @@ -7949,8 +7949,8 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_enableDisplayUpdated(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl index ddbcb20d5f58..e7b80d0a346d 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -2114,8 +2114,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_with375GbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { name = "tf-test-instance-template-%s" @@ -2143,8 +2143,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_with18TbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { @@ -2823,8 +2823,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_shieldedVmConfig(suffix string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { @@ -3015,8 +3015,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_enableDisplay(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb index d39c8291820d..ce662cf78061 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_forwarding_rule_test.go.erb @@ -448,7 +448,7 @@ resource "google_compute_instance_template" "instance_template" { } } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -561,7 +561,7 @@ resource "google_compute_instance" "vm_test" { } boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb index c918e8f9cabe..96ed96c31e34 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_from_machine_image_test.go.erb @@ -252,7 +252,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -307,7 +307,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -369,7 +369,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -423,7 +423,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -482,7 +482,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -553,7 +553,7 @@ resource "google_compute_instance" "vm" { boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } @@ -630,7 +630,7 @@ resource "google_compute_instance" "vm" { project = google_project.project.project_id boot_disk { initialize_params { - image = "debian-cloud/debian-10" + image = "debian-cloud/debian-12" } } diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb index f66aee66a6a9..12a109e91711 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_template_test.go.erb @@ -2603,8 +2603,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_with375GbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { name = "tf-test-instance-template-%s" @@ -2638,8 +2638,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_with18TbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { @@ -3294,8 +3294,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_shieldedVmConfig(suffix string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { @@ -3480,8 +3480,8 @@ resource "google_compute_instance_template" "foobar" { func testAccComputeInstanceTemplate_enableDisplay(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance_template" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb index 2f8eadb0c39d..b689d4da52e6 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_instance_test.go.erb @@ -6465,7 +6465,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_advancedMachineFeatures(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-10" + family = "debian-12" project = "debian-cloud" } @@ -6493,7 +6493,7 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_advancedMachineFeaturesUpdated(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-10" + family = "debian-12" project = "debian-cloud" } @@ -7616,8 +7616,8 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_shieldedVmConfig(instance string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance" "foobar" { @@ -7917,8 +7917,8 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_enableDisplay(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance" "foobar" { @@ -7946,8 +7946,8 @@ resource "google_compute_instance" "foobar" { func testAccComputeInstance_enableDisplayUpdated(instance string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_instance" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb index 4d79d38f47db..580fc41abe1c 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_region_instance_template_test.go.erb @@ -2111,8 +2111,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_with375GbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { name = "tf-test-instance-template-%s" @@ -2140,8 +2140,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_with18TbScratchDisk(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family ="debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { @@ -2820,8 +2820,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_shieldedVmConfig(suffix string, enableSecureBoot bool, enableVtpm bool, enableIntegrityMonitoring bool) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { @@ -3012,8 +3012,8 @@ resource "google_compute_region_instance_template" "foobar" { func testAccComputeRegionInstanceTemplate_enableDisplay(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "centos-7" - project = "centos-cloud" + family = "debian-12" + project = "debian-cloud" } resource "google_compute_region_instance_template" "foobar" { diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_snapshot_test.go b/mmv1/third_party/terraform/services/compute/resource_compute_snapshot_test.go index 76bed0cfac43..4d7f0dfd13d7 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_snapshot_test.go +++ b/mmv1/third_party/terraform/services/compute/resource_compute_snapshot_test.go @@ -94,7 +94,7 @@ resource "google_compute_snapshot" "foobar" { func testAccComputeSnapshot_encryptionCMEK(snapshotName string, diskName string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { - family = "debian-10" + family = "debian-12" project = "debian-cloud" } diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go index 615125529460..5d9ad8a5dee0 100644 --- a/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_route_extension_test.go @@ -145,7 +145,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -504,7 +504,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } diff --git a/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_traffic_extension_test.go b/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_traffic_extension_test.go index 23be8baec0e1..e4706174725f 100644 --- a/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_traffic_extension_test.go +++ b/mmv1/third_party/terraform/services/networkservices/resource_network_services_lb_traffic_extension_test.go @@ -135,7 +135,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } @@ -451,7 +451,7 @@ resource "google_compute_instance_template" "instance_template" { } disk { - source_image = "debian-cloud/debian-10" + source_image = "debian-cloud/debian-12" auto_delete = true boot = true } From 05a0cb385e37a9f76a8f09fbffa3b45ce60e4327 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 3 Jul 2024 12:05:34 -0700 Subject: [PATCH 272/356] fixed permadiffs on `environment_variables` in cloudfunctions2 function (#11092) --- mmv1/products/cloudfunctions2/Function.yaml | 2 ++ .../constants/cloudfunctions2_function.go.erb | 14 ++++++++++++++ 2 files changed, 16 insertions(+) create mode 100644 mmv1/templates/terraform/constants/cloudfunctions2_function.go.erb diff --git a/mmv1/products/cloudfunctions2/Function.yaml b/mmv1/products/cloudfunctions2/Function.yaml index 1f2fe20fb289..ecfcac7a7e6b 100644 --- a/mmv1/products/cloudfunctions2/Function.yaml +++ b/mmv1/products/cloudfunctions2/Function.yaml @@ -52,6 +52,7 @@ import_format: taint_resource_on_failed_create: true autogen_async: true custom_code: !ruby/object:Provider::Terraform::CustomCode + constants: 'templates/terraform/constants/cloudfunctions2_function.go.erb' encoder: 'templates/terraform/encoders/cloudfunctions2_runtime_update_policy.go.erb' examples: - !ruby/object:Provider::Terraform::Examples @@ -559,6 +560,7 @@ properties: description: 'Environment variables that shall be available during function execution.' + diff_suppress_func: 'environmentVariablesDiffSuppress' - !ruby/object:Api::Type::Integer name: 'maxInstanceCount' description: | diff --git a/mmv1/templates/terraform/constants/cloudfunctions2_function.go.erb b/mmv1/templates/terraform/constants/cloudfunctions2_function.go.erb new file mode 100644 index 000000000000..c447509eff60 --- /dev/null +++ b/mmv1/templates/terraform/constants/cloudfunctions2_function.go.erb @@ -0,0 +1,14 @@ +// Suppress diffs for the system environment variables +func environmentVariablesDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if k == "service_config.0.environment_variables.LOG_EXECUTION_ID" && new == "" { + return true + } + + // Let diff be determined by environment_variables (above) + if strings.HasPrefix(k, "service_config.0.environment_variables.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} From 88197fb2b61fd0d58050d8d071594e5b5750a0c2 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 3 Jul 2024 13:46:53 -0700 Subject: [PATCH 273/356] Revert "Fix `content` in `google_storage_bucket_object_content` datasource" (#11106) --- ...ta_source_storage_bucket_object_content.go | 1 + ...urce_storage_bucket_object_content_test.go | 27 ------------------- ...torage_bucket_object_content.html.markdown | 2 +- 3 files changed, 2 insertions(+), 28 deletions(-) diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go index fb1e36827c4d..af56c3c89295 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content.go @@ -18,6 +18,7 @@ func DataSourceGoogleStorageBucketObjectContent() *schema.Resource { tpgresource.AddRequiredFieldsToSchema(dsSchema, "bucket") tpgresource.AddRequiredFieldsToSchema(dsSchema, "name") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "content") return &schema.Resource{ Read: dataSourceGoogleStorageBucketObjectContentRead, diff --git a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go index e6e6e4fa3301..5b40c4ee1089 100644 --- a/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go +++ b/mmv1/third_party/terraform/services/storage/data_source_storage_bucket_object_content_test.go @@ -47,30 +47,3 @@ resource "google_storage_bucket" "contenttest" { force_destroy = true }`, content, bucket) } - -func TestAccDataSourceStorageBucketObjectContent_Issue15717(t *testing.T) { - - bucket := "tf-bucket-object-content-" + acctest.RandString(t, 10) - content := "qwertyuioasdfghjk1234567!!@#$*" - - config := fmt.Sprintf(` -%s - -output "output" { - value = replace(data.google_storage_bucket_object_content.default.content, "q", "Q") -}`, testAccDataSourceStorageBucketObjectContent_Basic(content, bucket)) - - acctest.VcrTest(t, resource.TestCase{ - PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), - Steps: []resource.TestStep{ - { - Config: config, - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttrSet("data.google_storage_bucket_object_content.default", "content"), - resource.TestCheckResourceAttr("data.google_storage_bucket_object_content.default", "content", content), - ), - }, - }, - }) -} diff --git a/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown b/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown index 203c6d42fdd6..551f22540bbd 100644 --- a/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown +++ b/mmv1/third_party/terraform/website/docs/d/storage_bucket_object_content.html.markdown @@ -41,4 +41,4 @@ The following arguments are supported: The following attributes are exported: -* `content` - (Computed) The content of the object. +* `content` - (Computed) [Content-Language](https://tools.ietf.org/html/rfc7231#section-3.1.3.2) of the object content. From 691695bf2327180e127ce4e19ff93e1db0558987 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fatih=20T=C3=BCrken?= <103541666+turkenf@users.noreply.github.com> Date: Wed, 3 Jul 2024 23:58:16 +0300 Subject: [PATCH 274/356] Add missing quota in bigtable_authorized_view doc example (#11103) --- .../website/docs/r/bigtable_authorized_view.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/third_party/terraform/website/docs/r/bigtable_authorized_view.html.markdown b/mmv1/third_party/terraform/website/docs/r/bigtable_authorized_view.html.markdown index b69ad6f4b349..a08d26f7039d 100644 --- a/mmv1/third_party/terraform/website/docs/r/bigtable_authorized_view.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/bigtable_authorized_view.html.markdown @@ -64,7 +64,7 @@ resource "google_bigtable_authorized_view" "authorized_view" { } subset_view { - row_prefixes = [base64encode("prefix#)] + row_prefixes = [base64encode("prefix#")] family_subsets { family_name = "family-first" From aa0fe465572e7407d3352a34841f346916de786f Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Wed, 3 Jul 2024 16:20:03 -0700 Subject: [PATCH 275/356] Fixed the permadiffs in purpose of networksecurity address (#11105) --- mmv1/products/networksecurity/AddressGroup.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/mmv1/products/networksecurity/AddressGroup.yaml b/mmv1/products/networksecurity/AddressGroup.yaml index ab9f6fc7f42c..20e7bd5f3f0b 100644 --- a/mmv1/products/networksecurity/AddressGroup.yaml +++ b/mmv1/products/networksecurity/AddressGroup.yaml @@ -146,6 +146,7 @@ properties: name: "purpose" description: | List of supported purposes of the Address Group. + default_from_api: true item_type: !ruby/object:Api::Type::Enum name: 'undefined' description: | From 0c19eb40341fb5ccbbb876c44315552444b66e8f Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 8 Jul 2024 07:37:30 -0700 Subject: [PATCH 276/356] Go rewrite apigateway product (#11102) --- mmv1/api/resource/iam_policy.rb | 2 +- .../apigateway/{ApiResource.yaml => Api.yaml} | 0 mmv1/products/apigateway/go_Api.yaml | 115 +++++++ mmv1/products/apigateway/go_ApiConfig.yaml | 289 ++++++++++++++++++ mmv1/products/apigateway/go_Gateway.yaml | 132 ++++++++ mmv1/products/apigateway/go_product.yaml | 34 +++ mmv1/template-converter.go | 1 + .../go/api_gateway_api_config.tf.tmpl | 1 + .../terraform/iam/go/iam_attributes.go.tmpl | 2 +- ...ttributes.tf.erb => iam_attributes.go.erb} | 0 .../terraform/resource_iam.html.markdown.tmpl | 3 +- mmv1/templates/terraform/yaml_conversion.erb | 3 + .../go/compute_instance_helpers.go.tmpl | 28 +- .../go/resource_compute_instance.go.tmpl | 10 +- ...resource_compute_instance_template.go.tmpl | 6 +- ...rce_compute_instance_template_test.go.tmpl | 8 - .../go/resource_compute_instance_test.go.tmpl | 8 - ...e_compute_region_instance_template.go.tmpl | 2 +- ...pute_region_instance_template_test.go.tmpl | 9 - ...rkstations_workstation_config_test.go.tmpl | 4 +- 20 files changed, 597 insertions(+), 60 deletions(-) rename mmv1/products/apigateway/{ApiResource.yaml => Api.yaml} (100%) create mode 100644 mmv1/products/apigateway/go_Api.yaml create mode 100644 mmv1/products/apigateway/go_ApiConfig.yaml create mode 100644 mmv1/products/apigateway/go_Gateway.yaml create mode 100644 mmv1/products/apigateway/go_product.yaml rename mmv1/templates/terraform/iam/{iam_attributes.tf.erb => iam_attributes.go.erb} (100%) diff --git a/mmv1/api/resource/iam_policy.rb b/mmv1/api/resource/iam_policy.rb index 2acb14185d0d..951bd123da00 100644 --- a/mmv1/api/resource/iam_policy.rb +++ b/mmv1/api/resource/iam_policy.rb @@ -138,7 +138,7 @@ def validate check :import_format, type: Array, item_type: String check( :example_config_body, - type: String, default: 'templates/terraform/iam/iam_attributes.tf.erb' + type: String, default: 'templates/terraform/iam/iam_attributes.go.erb' ) check :iam_policy_version, type: String check :min_version, type: String diff --git a/mmv1/products/apigateway/ApiResource.yaml b/mmv1/products/apigateway/Api.yaml similarity index 100% rename from mmv1/products/apigateway/ApiResource.yaml rename to mmv1/products/apigateway/Api.yaml diff --git a/mmv1/products/apigateway/go_Api.yaml b/mmv1/products/apigateway/go_Api.yaml new file mode 100644 index 000000000000..97832cc6be8d --- /dev/null +++ b/mmv1/products/apigateway/go_Api.yaml @@ -0,0 +1,115 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Api' +description: | + A consumable API that can be used by multiple Gateways. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/api-gateway/docs/quickstart' + api: 'https://cloud.google.com/api-gateway/docs/reference/rest/v1beta/projects.locations.apis' +docs: +base_url: 'projects/{{project}}/locations/global/apis' +self_link: 'projects/{{project}}/locations/global/apis/{{api_id}}' +create_url: 'projects/{{project}}/locations/global/apis?apiId={{api_id}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - apiId +iam_policy: + method_name_separator: ':' + allowed_iam_role: 'roles/apigateway.viewer' + parent_resource_attribute: 'api' + base_url: 'projects/{{project}}/locations/global/apis/{{api}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/global/apis/{{api}}' + - '{{project}}/{{api}}' + - '{{api}}' +custom_code: +examples: + - name: 'apigateway_api_basic' + primary_resource_id: 'api' + primary_resource_name: 'fmt.Sprintf("tf-test-my-api%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + - name: 'apigateway_api_full' + primary_resource_id: 'api' + primary_resource_name: 'fmt.Sprintf("tf-test-my-api%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + skip_docs: true +parameters: + - name: 'apiId' + type: String + description: | + Identifier to assign to the API. Must be unique within scope of the parent resource(project) + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The resource name of the API. Format `projects/{{project}}/locations/global/apis/{{apiId}}` + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + A user-visible name for the API. + min_version: 'beta' + default_from_api: true + - name: 'managedService' + type: String + description: | + Immutable. The name of a Google Managed Service ( https://cloud.google.com/service-infrastructure/docs/glossary#managed). + If not specified, a new Service will automatically be created in the same project as this API. + min_version: 'beta' + immutable: true + default_from_api: true + - name: 'createTime' + type: Time + description: Creation timestamp in RFC3339 text format. + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Resource labels to represent user-provided metadata. + min_version: 'beta' + immutable: false diff --git a/mmv1/products/apigateway/go_ApiConfig.yaml b/mmv1/products/apigateway/go_ApiConfig.yaml new file mode 100644 index 000000000000..353500ec8445 --- /dev/null +++ b/mmv1/products/apigateway/go_ApiConfig.yaml @@ -0,0 +1,289 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ApiConfig' +description: | + An API Configuration is an association of an API Controller Config and a Gateway Config +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/api-gateway/docs/creating-api-config' + api: 'https://cloud.google.com/api-gateway/docs/reference/rest/v1beta/projects.locations.apis.configs' +docs: + optional_properties: | + * `api_config_id_prefix` - (Optional) Creates a unique name beginning with the + specified prefix. If this and api_config_id are unspecified, a random value is chosen for the name. +base_url: 'projects/{{project}}/locations/global/apis/{{api}}/configs' +self_link: 'projects/{{project}}/locations/global/apis/{{api}}/configs/{{api_config_id}}' +create_url: 'projects/{{project}}/locations/global/apis/{{api}}/configs?apiConfigId={{api_config_id}}' +update_verb: 'PATCH' +update_mask: true + +read_query_params: '?view=FULL' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - apiConfigId +iam_policy: + skip_import_test: true + method_name_separator: ':' + allowed_iam_role: 'roles/apigateway.viewer' + parent_resource_attribute: 'api_config' + base_url: 'projects/{{project}}/locations/global/apis/{{api}}/configs/{{api_config}}' + example_config_body: 'templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl' + import_format: + - 'projects/{{project}}/locations/global/apis/{{api}}/configs/{{api_config}}' + - '{{project}}/{{api}}/{{api_config}}' + - '{{api}}/{{api_config}}' + - '{{api_config}}' +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/api_config.tmpl' + encoder: 'templates/terraform/encoders/go/api_config.go.tmpl' +examples: + - name: 'apigateway_api_config_basic' + primary_resource_id: 'api_cfg' + primary_resource_name: 'fmt.Sprintf("tf-test-my-api%s", context["random_suffix"]), fmt.Sprintf("tf-test-my-config%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + config_id: 'my-config' + - name: 'apigateway_api_config_full' + primary_resource_id: 'api_cfg' + primary_resource_name: 'fmt.Sprintf("tf-test-my-api%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + config_id: 'my-config' + skip_docs: true + - name: 'apigateway_api_config_grpc' + primary_resource_id: 'api_cfg' + primary_resource_name: 'fmt.Sprintf("tf-test-my-api%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + config_id: 'my-config' + ignore_read_extra: + - 'grpc_services.0.file_descriptor_set' + - name: 'apigateway_api_config_grpc_full' + primary_resource_id: 'api_cfg' + min_version: 'beta' + vars: + api_id: 'my-api' + config_id: 'my-config' + skip_docs: true +parameters: + - name: 'api' + type: String + description: | + The API to attach the config to. + min_version: 'beta' + url_param_only: true + required: true + immutable: true + - name: 'apiConfigId' + type: String + description: | + Identifier to assign to the API Config. Must be unique within scope of the parent resource(api). + min_version: 'beta' + url_param_only: true + immutable: true + default_from_api: true +properties: + - name: 'name' + type: String + description: The resource name of the API Config. + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + A user-visible name for the API. + min_version: 'beta' + default_from_api: true + - name: 'serviceConfigId' + type: String + description: | + The ID of the associated Service Config (https://cloud.google.com/service-infrastructure/docs/glossary#config). + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Resource labels to represent user-provided metadata. + min_version: 'beta' + immutable: false + - name: 'gatewayConfig' + type: NestedObject + description: | + Immutable. Gateway specific configuration. + If not specified, backend authentication will be set to use OIDC authentication using the default compute service account + min_version: 'beta' + immutable: true + ignore_read: true + properties: + - name: 'backendConfig' + type: NestedObject + description: | + Backend settings that are applied to all backends of the Gateway. + min_version: 'beta' + required: true + properties: + - name: 'googleServiceAccount' + type: String + description: | + Google Cloud IAM service account used to sign OIDC tokens for backends that have authentication configured + (https://cloud.google.com/service-infrastructure/docs/service-management/reference/rest/v1/services.configs#backend). + min_version: 'beta' + required: true + immutable: true + - name: 'openapiDocuments' + type: Array + description: | + OpenAPI specification documents. If specified, grpcServices and managedServiceConfigs must not be included. + min_version: 'beta' + exactly_one_of: + - 'openapi_documents' + - 'grpc_services' + item_type: + type: NestedObject + properties: + - name: 'document' + type: NestedObject + description: The OpenAPI Specification document file. + min_version: 'beta' + required: true + properties: + - name: 'path' + type: String + description: | + The file path (full or relative path). This is typically the path of the file when it is uploaded. + min_version: 'beta' + required: true + immutable: true + - name: 'contents' + type: String + description: | + Base64 encoded content of the file. + min_version: 'beta' + required: true + immutable: true + validation: + function: 'verify.ValidateBase64String' + - name: 'grpcServices' + type: Array + description: | + gRPC service definition files. If specified, openapiDocuments must not be included. + min_version: 'beta' + ignore_read: true + exactly_one_of: + - 'openapi_documents' + - 'grpc_services' + required_with: + - 'managed_service_configs' + item_type: + type: NestedObject + properties: + - name: 'fileDescriptorSet' + type: NestedObject + description: | + Input only. File descriptor set, generated by protoc. + To generate, use protoc with imports and source info included. For an example test.proto file, the following command would put the value in a new file named out.pb. + + $ protoc --include_imports --include_source_info test.proto -o out.pb + min_version: 'beta' + required: true + immutable: true + properties: + - name: 'path' + type: String + description: | + The file path (full or relative path). This is typically the path of the file when it is uploaded. + min_version: 'beta' + required: true + immutable: true + - name: 'contents' + type: String + description: | + Base64 encoded content of the file. + min_version: 'beta' + required: true + immutable: true + validation: + function: 'verify.ValidateBase64String' + - name: 'source' + type: Array + description: | + Uncompiled proto files associated with the descriptor set, used for display purposes (server-side compilation is not supported). These should match the inputs to 'protoc' command used to generate fileDescriptorSet. + min_version: 'beta' + item_type: + type: NestedObject + properties: + - name: 'path' + type: String + description: | + The file path (full or relative path). This is typically the path of the file when it is uploaded. + min_version: 'beta' + required: true + immutable: true + - name: 'contents' + type: String + description: | + Base64 encoded content of the file. + min_version: 'beta' + required: true + immutable: true + validation: + function: 'verify.ValidateBase64String' + - name: 'managedServiceConfigs' + type: Array + description: | + Optional. Service Configuration files. At least one must be included when using gRPC service definitions. See https://cloud.google.com/endpoints/docs/grpc/grpc-service-config#service_configuration_overview for the expected file contents. + If multiple files are specified, the files are merged with the following rules: * All singular scalar fields are merged using "last one wins" semantics in the order of the files uploaded. * Repeated fields are concatenated. * Singular embedded messages are merged using these rules for nested fields. + min_version: 'beta' + required_with: + - 'grpc_services' + item_type: + type: NestedObject + properties: + - name: 'path' + type: String + description: | + The file path (full or relative path). This is typically the path of the file when it is uploaded. + min_version: 'beta' + required: true + immutable: true + - name: 'contents' + type: String + description: | + Base64 encoded content of the file. + min_version: 'beta' + required: true + immutable: true diff --git a/mmv1/products/apigateway/go_Gateway.yaml b/mmv1/products/apigateway/go_Gateway.yaml new file mode 100644 index 000000000000..ad642df0cd63 --- /dev/null +++ b/mmv1/products/apigateway/go_Gateway.yaml @@ -0,0 +1,132 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Gateway' +description: | + A consumable API that can be used by multiple Gateways. +min_version: 'beta' +references: + guides: + 'Official Documentation': 'https://cloud.google.com/api-gateway/docs/quickstart' + api: 'https://cloud.google.com/api-gateway/docs/reference/rest/v1beta/projects.locations.apis' +docs: +base_url: 'projects/{{project}}/locations/{{region}}/gateways' +self_link: 'projects/{{project}}/locations/{{region}}/gateways/{{gateway_id}}' +create_url: 'projects/{{project}}/locations/{{region}}/gateways?gatewayId={{gateway_id}}' +update_verb: 'PATCH' +update_mask: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +identity: + - gatewayId +iam_policy: + skip_import_test: true + method_name_separator: ':' + allowed_iam_role: 'roles/apigateway.viewer' + parent_resource_attribute: 'gateway' + base_url: 'projects/{{project}}/locations/{{region}}/gateways/{{gateway}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{region}}/gateways/{{gateway}}' + - '{{project}}/{{region}}/{{gateway}}' + - '{{region}}/{{gateway}}' + - '{{gateway}}' +custom_code: +examples: + - name: 'apigateway_gateway_basic' + primary_resource_id: 'api_gw' + primary_resource_name: 'fmt.Sprintf("tf-test-my-gateway%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + config_id: 'my-config' + gateway_id: 'my-gateway' + - name: 'apigateway_gateway_full' + primary_resource_id: 'api_gw' + primary_resource_name: 'fmt.Sprintf("tf-test-my-gateway%s", context["random_suffix"])' + min_version: 'beta' + vars: + api_id: 'my-api' + config_id: 'my-config' + gateway_id: 'my-gateway' + skip_docs: true +parameters: + - name: 'region' + type: String + description: | + The region of the gateway for the API. + min_version: 'beta' + url_param_only: true + immutable: true + default_from_api: true + - name: 'gatewayId' + type: String + description: | + Identifier to assign to the Gateway. Must be unique within scope of the parent resource(project). + min_version: 'beta' + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Resource name of the Gateway. Format: projects/{project}/locations/{region}/gateways/{gateway} + min_version: 'beta' + output: true + - name: 'displayName' + type: String + description: | + A user-visible name for the API. + min_version: 'beta' + default_from_api: true + - name: 'apiConfig' + type: String + description: | + Resource name of the API Config for this Gateway. Format: projects/{project}/locations/global/apis/{api}/configs/{apiConfig}. + When changing api configs please ensure the new config is a new resource and the + [lifecycle](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle) rule `create_before_destroy` is set. + min_version: 'beta' + required: true + diff_suppress_func: 'tpgresource.CompareResourceNames' + - name: 'defaultHostname' + type: String + description: + The default API Gateway host name of the form + {gatewayId}-{hash}.{region_code}.gateway.dev. + min_version: 'beta' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + Resource labels to represent user-provided metadata. + min_version: 'beta' + immutable: false diff --git a/mmv1/products/apigateway/go_product.yaml b/mmv1/products/apigateway/go_product.yaml new file mode 100644 index 000000000000..4e2b99e2e925 --- /dev/null +++ b/mmv1/products/apigateway/go_product.yaml @@ -0,0 +1,34 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ApiGateway' +display_name: 'API Gateway' +versions: + - name: 'beta' + base_url: 'https://apigateway.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' +async: + type: "OpAsync" + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index b6809faad156..3193d5d0841c 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -692,6 +692,7 @@ func checkExceptionList(filePath string) bool { "constants/router_nat_validate_action_active_range.go", "unordered_list_customize_diff", "default_if_empty", + "iam/example_config_body/go/api_gateway_api_config.tf.tmpl", // TODO: remove the following files from the exception list after all of the services are migrated to Go // It will generate diffs when partial services are migrated. diff --git a/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl b/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl index 6c023f4146fb..56ac3cb6a3b8 100644 --- a/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl +++ b/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl @@ -1,2 +1,3 @@ + {{- ""}} api = google_api_gateway_api_config.api_cfg.api api_config = google_api_gateway_api_config.api_cfg.api_config_id \ No newline at end of file diff --git a/mmv1/templates/terraform/iam/go/iam_attributes.go.tmpl b/mmv1/templates/terraform/iam/go/iam_attributes.go.tmpl index e82109494228..091a0ac5ff44 100644 --- a/mmv1/templates/terraform/iam/go/iam_attributes.go.tmpl +++ b/mmv1/templates/terraform/iam/go/iam_attributes.go.tmpl @@ -2,4 +2,4 @@ {{- $ids := $.IamSelfLinkIdentifiers }} {{- range $i, $attribue := $.IamAttributes}} {{ $attribue }} = {{ $.IamParentSourceType }}.{{ $primaryResourceId }}.{{ (index $ids $i)}} -{{- end -}} \ No newline at end of file +{{- end }} \ No newline at end of file diff --git a/mmv1/templates/terraform/iam/iam_attributes.tf.erb b/mmv1/templates/terraform/iam/iam_attributes.go.erb similarity index 100% rename from mmv1/templates/terraform/iam/iam_attributes.tf.erb rename to mmv1/templates/terraform/iam/iam_attributes.go.erb diff --git a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl index 312e02f6b37a..09d1c0dda3e7 100644 --- a/mmv1/templates/terraform/resource_iam.html.markdown.tmpl +++ b/mmv1/templates/terraform/resource_iam.html.markdown.tmpl @@ -201,9 +201,8 @@ resource "{{ $.IamTerraformName }}_member" "member" { ## Argument Reference The following arguments are supported: - {{ range $param := $.IamSelfLinkProperties }} -{{- if eq $param.Name "name" -}} +{{- if eq $param.Name "name" }} * `{{ $.IamParentResourceName }}` - (Required) Used to find the parent resource to bind the IAM policy to {{- else if or (eq (underscore $param.Name) "region") (eq (underscore $param.Name) "zone") }} * `{{ underscore $param.Name }}` - (Optional) {{ $param.Description }} Used to find the parent resource to bind the IAM policy to. If not specified, diff --git a/mmv1/templates/terraform/yaml_conversion.erb b/mmv1/templates/terraform/yaml_conversion.erb index 88385854e478..b3f02d9b5cff 100644 --- a/mmv1/templates/terraform/yaml_conversion.erb +++ b/mmv1/templates/terraform/yaml_conversion.erb @@ -325,6 +325,9 @@ iam_policy: <% unless object.iam_policy.base_url.nil? -%> base_url: '<%= object.iam_policy.base_url %>' <% end -%> +<% unless object.iam_policy.example_config_body.nil? -%> + example_config_body: '<%= object.convert_go_file( object.iam_policy.example_config_body ) %>' +<% end -%> <% unless object.iam_policy.self_link.nil? -%> self_link: '<%= object.iam_policy.self_link %>' <% end -%> diff --git a/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl b/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl index d6bced2b7fee..682eff707497 100644 --- a/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/compute_instance_helpers.go.tmpl @@ -133,7 +133,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.InstanceTerminationAction = v.(string) scheduling.ForceSendFields = append(scheduling.ForceSendFields, "InstanceTerminationAction") } -{{- if ne $.TargetVersionName "ga" }} if v, ok := original["max_run_duration"]; ok { transformedMaxRunDuration, err := expandComputeMaxRunDuration(v) if err != nil { @@ -142,9 +141,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.MaxRunDuration = transformedMaxRunDuration scheduling.ForceSendFields = append(scheduling.ForceSendFields, "MaxRunDuration") } - if v, ok := original["maintenance_interval"]; ok { - scheduling.MaintenanceInterval = v.(string) - } if v, ok := original["on_instance_stop_action"]; ok { transformedOnInstanceStopAction, err := expandComputeOnInstanceStopAction(v) @@ -154,6 +150,10 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { scheduling.OnInstanceStopAction = transformedOnInstanceStopAction scheduling.ForceSendFields = append(scheduling.ForceSendFields, "OnInstanceStopAction") } +{{- if ne $.TargetVersionName "ga" }} + if v, ok := original["maintenance_interval"]; ok { + scheduling.MaintenanceInterval = v.(string) + } {{- end }} if v, ok := original["local_ssd_recovery_timeout"]; ok { transformedLocalSsdRecoveryTimeout, err := expandComputeLocalSsdRecoveryTimeout(v) @@ -166,7 +166,6 @@ func expandScheduling(v interface{}) (*compute.Scheduling, error) { return scheduling, nil } -{{ if ne $.TargetVersionName `ga` -}} func expandComputeMaxRunDuration(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) duration := compute.Duration{} @@ -218,7 +217,6 @@ func expandComputeOnInstanceStopAction(v interface{}) (*compute.SchedulingOnInst return &onInstanceStopAction, nil } -{{- end }} func expandComputeLocalSsdRecoveryTimeout(v interface{}) (*compute.Duration, error) { l := v.([]interface{}) @@ -266,16 +264,18 @@ func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { schedulingMap["automatic_restart"] = *resp.AutomaticRestart } -{{ if ne $.TargetVersionName `ga` -}} if resp.MaxRunDuration != nil { schedulingMap["max_run_duration"] = flattenComputeMaxRunDuration(resp.MaxRunDuration) } - if resp.MaintenanceInterval != "" { - schedulingMap["maintenance_interval"] = resp.MaintenanceInterval - } + if resp.OnInstanceStopAction != nil { schedulingMap["on_instance_stop_action"] = flattenOnInstanceStopAction(resp.OnInstanceStopAction) } + +{{ if ne $.TargetVersionName `ga` -}} + if resp.MaintenanceInterval != "" { + schedulingMap["maintenance_interval"] = resp.MaintenanceInterval + } {{- end }} if resp.LocalSsdRecoveryTimeout != nil { @@ -295,7 +295,6 @@ func flattenScheduling(resp *compute.Scheduling) []map[string]interface{} { return []map[string]interface{}{schedulingMap} } -{{ if ne $.TargetVersionName `ga` -}} func flattenComputeMaxRunDuration(v *compute.Duration) []interface{} { if v == nil { return nil @@ -314,7 +313,6 @@ func flattenOnInstanceStopAction(v *compute.SchedulingOnInstanceStopAction) []in transformed["discard_local_ssd"] = v.DiscardLocalSsd return []interface{}{transformed} } -{{- end }} func flattenComputeLocalSsdRecoveryTimeout(v *compute.Duration) []interface{} { if v == nil { @@ -687,11 +685,7 @@ func schedulingHasChangeRequiringReboot(d *schema.ResourceData) bool { oScheduling := o.([]interface{})[0].(map[string]interface{}) newScheduling := n.([]interface{})[0].(map[string]interface{}) -{{ if ne $.TargetVersionName `ga` -}} return hasNodeAffinitiesChanged(oScheduling, newScheduling) || hasMaxRunDurationChanged(oScheduling, newScheduling) -{{- else }} - return hasNodeAffinitiesChanged(oScheduling, newScheduling) -{{- end }} } // Terraform doesn't correctly calculate changes on schema.Set, so we do it manually @@ -733,7 +727,6 @@ func schedulingHasChangeWithoutReboot(d *schema.ResourceData) bool { return false } -{{ if ne $.TargetVersionName `ga` -}} func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) bool { oMrd := oScheduling["max_run_duration"].([]interface{}) nMrd := nScheduling["max_run_duration"].([]interface{}) @@ -757,7 +750,6 @@ func hasMaxRunDurationChanged(oScheduling, nScheduling map[string]interface{}) b return false } -{{- end }} func hasNodeAffinitiesChanged(oScheduling, newScheduling map[string]interface{}) bool { oldNAs := oScheduling["node_affinities"].(*schema.Set).List() diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl index 5789b30f00ef..e917d05913f6 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance.go.tmpl @@ -82,10 +82,10 @@ var ( "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", "scheduling.0.instance_termination_action", -{{- if ne $.TargetVersionName "ga" }} "scheduling.0.max_run_duration", - "scheduling.0.maintenance_interval", "scheduling.0.on_instance_stop_action", +{{- if ne $.TargetVersionName "ga" }} + "scheduling.0.maintenance_interval", {{- end }} "scheduling.0.local_ssd_recovery_timeout", } @@ -825,7 +825,6 @@ func ResourceComputeInstance() *schema.Resource { AtLeastOneOf: schedulingKeys, Description: `Specifies the action GCE should take when SPOT VM is preempted.`, }, -{{- if ne $.TargetVersionName "ga" }} "max_run_duration" : { Type: schema.TypeList, Optional: true, @@ -871,6 +870,7 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, +{{- if ne $.TargetVersionName "ga" }} "maintenance_interval": { Type: schema.TypeString, Optional: true, @@ -2432,11 +2432,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err desiredStatus := d.Get("desired_status").(string) if statusBeforeUpdate == "RUNNING" && desiredStatus != "TERMINATED" && !d.Get("allow_stopping_for_update").(bool) { -{{- if ne $.TargetVersionName "ga" }} return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities, scheduling.max_run_duration " + -{{- else }} - return fmt.Errorf("Changing the machine_type, min_cpu_platform, service_account, enable_display, shielded_instance_config, scheduling.node_affinities " + -{{- end }} "or network_interface.[#d].(network/subnetwork/subnetwork_project) or advanced_machine_features on a started instance requires stopping it. " + "To acknowledge this, please set allow_stopping_for_update = true in your config. " + "You can also stop it by setting desired_status = \"TERMINATED\", but the instance will not be restarted after the update.") diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl index 5937612209e1..7914f0c9dafe 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template.go.tmpl @@ -34,10 +34,10 @@ var ( "scheduling.0.min_node_cpus", "scheduling.0.provisioning_model", "scheduling.0.instance_termination_action", -{{- if ne $.TargetVersionName "ga" }} "scheduling.0.max_run_duration", - "scheduling.0.maintenance_interval", "scheduling.0.on_instance_stop_action", +{{- if ne $.TargetVersionName "ga" }} + "scheduling.0.maintenance_interval", {{- end }} "scheduling.0.local_ssd_recovery_timeout", } @@ -714,7 +714,6 @@ Google Cloud KMS.`, AtLeastOneOf: schedulingInstTemplateKeys, Description: `Specifies the action GCE should take when SPOT VM is preempted.`, }, -{{- if ne $.TargetVersionName "ga" }} "max_run_duration" : { Type: schema.TypeList, Optional: true, @@ -760,6 +759,7 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, +{{- if ne $.TargetVersionName "ga" }} "maintenance_interval" : { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl index 86941e061494..3706902ae507 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_template_test.go.tmpl @@ -1075,7 +1075,6 @@ func TestAccComputeInstanceTemplate_spot(t *testing.T) { }) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccComputeInstanceTemplate_spot_maxRunDuration_deleteTerminationAction(t *testing.T) { t.Parallel() @@ -1218,7 +1217,6 @@ func TestAccComputeInstanceTemplate_spot_maxRunDuration(t *testing.T) { }, }) } -{{- end }} func TestAccComputeInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -1769,7 +1767,6 @@ func testAccCheckComputeInstanceTemplateInstanceTerminationAction(instanceTempla } -{{ if ne $.TargetVersionName `ga` -}} func testAccCheckComputeInstanceTemplateMaxRunDuration(instanceTemplate *compute.InstanceTemplate, instance_max_run_duration_want compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.MaxRunDuration, instance_max_run_duration_want) { @@ -1779,7 +1776,6 @@ func testAccCheckComputeInstanceTemplateMaxRunDuration(instanceTemplate *compute return nil } } -{{- end }} func testAccCheckComputeInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate *compute.InstanceTemplate, instance_local_ssd_recovery_timeout_want compute.Duration) resource.TestCheckFunc { @@ -3845,12 +3841,10 @@ resource "google_compute_instance_template" "foobar" { automatic_restart = false provisioning_model = "SPOT" instance_termination_action = "%s" -{{- if ne $.TargetVersionName "ga" }} max_run_duration { nanos = 123 seconds = 60 } -{{- end }} } @@ -3865,7 +3859,6 @@ resource "google_compute_instance_template" "foobar" { `, suffix, instanceTerminationAction) } -{{ if ne $.TargetVersionName `ga` -}} func testAccComputeInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3913,7 +3906,6 @@ resource "google_compute_instance_template" "foobar" { } `, suffix) } -{{- end }} func testAccComputeInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl index 19d5ecc9b76a..a1f438ae5356 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_instance_test.go.tmpl @@ -2635,7 +2635,6 @@ func TestAccComputeInstance_spotVM_update(t *testing.T) { }) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccComputeInstance_maxRunDuration_update(t *testing.T) { t.Parallel() @@ -2823,7 +2822,6 @@ func TestAccComputeInstance_spotVM_maxRunDuration_update(t *testing.T) { }, }) } -{{- end }} func TestAccComputeInstance_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -3779,7 +3777,6 @@ func testAccCheckComputeResourcePolicy(instance *compute.Instance, scheduleName } } -{{ if ne $.TargetVersionName `ga` -}} func testAccCheckComputeInstanceMaxRunDuration(instance *compute.Instance, instanceMaxRunDurationWant compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { if instance == nil { @@ -3796,7 +3793,6 @@ func testAccCheckComputeInstanceMaxRunDuration(instance *compute.Instance, insta return nil } } -{{- end }} func testAccCheckComputeInstanceLocalSsdRecoveryTimeout(instance *compute.Instance, instanceLocalSsdRecoveryTiemoutWant compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -8344,7 +8340,6 @@ resource "google_compute_instance" "foobar" { `, instance) } -{{ if ne $.TargetVersionName `ga` -}} func testAccComputeInstance_standardVM_maxRunDuration(instance string, instanceTerminationAction string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -8455,7 +8450,6 @@ resource "google_compute_instance" "foobar" { } `, instance, instanceTerminationAction) } -{{- end }} func testAccComputeInstance_spotVM_maxRunDuration(instance string, instanceTerminationAction string) string { @@ -8485,12 +8479,10 @@ resource "google_compute_instance" "foobar" { automatic_restart = false preemptible = true instance_termination_action = "%s" -{{- if ne $.TargetVersionName "ga" }} max_run_duration { nanos = 123 seconds = 60 } -{{- end }} } } `, instance, instanceTerminationAction) diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl index 6ffd6a99546d..3439bb4abb7a 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template.go.tmpl @@ -674,7 +674,6 @@ Google Cloud KMS.`, AtLeastOneOf: schedulingInstTemplateKeys, Description: `Specifies the action GCE should take when SPOT VM is preempted.`, }, -{{- if ne $.TargetVersionName "ga" }} "max_run_duration" : { Type: schema.TypeList, Optional: true, @@ -720,6 +719,7 @@ be from 0 to 999,999,999 inclusive.`, }, }, }, +{{- if ne $.TargetVersionName "ga" }} "maintenance_interval" : { Type: schema.TypeString, Optional: true, diff --git a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl index e7b80d0a346d..7785eaf29f2b 100644 --- a/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl +++ b/mmv1/third_party/terraform/services/compute/go/resource_compute_region_instance_template_test.go.tmpl @@ -1024,7 +1024,6 @@ func TestAccComputeRegionInstanceTemplate_spot(t *testing.T) { }) } -{{ if ne $.TargetVersionName `ga` -}} func TestAccComputeRegionInstanceTemplate_spot_maxRunDuration(t *testing.T) { t.Parallel() @@ -1092,7 +1091,6 @@ func TestAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(t }, }) } -{{- end }} func TestAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(t *testing.T) { t.Parallel() @@ -1484,7 +1482,6 @@ func testAccCheckComputeRegionInstanceTemplateInstanceTerminationAction(instance } } -{{ if ne $.TargetVersionName `ga` -}} func testAccCheckComputeRegionInstanceTemplateMaxRunDuration(instanceTemplate *compute.InstanceTemplate, instance_max_run_duration_want compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { if !reflect.DeepEqual(*instanceTemplate.Properties.Scheduling.MaxRunDuration, instance_max_run_duration_want) { @@ -1494,7 +1491,6 @@ func testAccCheckComputeRegionInstanceTemplateMaxRunDuration(instanceTemplate *c return nil } } -{{- end }} func testAccCheckComputeRegionInstanceTemplateLocalSsdRecoveryTimeout(instanceTemplate *compute.InstanceTemplate, instance_local_ssd_recovery_timeout_want compute.Duration) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -3387,13 +3383,10 @@ resource "google_compute_region_instance_template" "foobar" { automatic_restart = false provisioning_model = "SPOT" instance_termination_action = "DELETE" -{{- if ne $.TargetVersionName "ga" }} max_run_duration { nanos = 123 seconds = 60 } -{{- end }} - } metadata = { @@ -3407,7 +3400,6 @@ resource "google_compute_region_instance_template" "foobar" { `, suffix) } -{{ if ne $.TargetVersionName `ga` -}} func testAccComputeRegionInstanceTemplate_maxRunDuration_onInstanceStopAction(suffix string) string { return fmt.Sprintf(` data "google_compute_image" "my_image" { @@ -3456,7 +3448,6 @@ resource "google_compute_region_instance_template" "foobar" { } `, suffix) } -{{- end }} func testAccComputeRegionInstanceTemplate_localSsdRecoveryTimeout(suffix string) string { return fmt.Sprintf(` diff --git a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl index 2cf6f91f2d2f..de3c3b343300 100644 --- a/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl +++ b/mmv1/third_party/terraform/services/workstations/go/resource_workstations_workstation_config_test.go.tmpl @@ -392,7 +392,6 @@ resource "google_workstations_workstation_config" "default" { gce_pd { disk_type = "pd-standard" source_image = google_compute_image.test_source_image.id - read_only = true } } @@ -767,6 +766,7 @@ resource "google_tags_tag_value" "tag_value1" { parent = "tagKeys/${google_tags_tag_key.tag_key1.name}" short_name = "%{value_short_name}" } + resource "google_compute_network" "default" { provider = google-beta name = "tf-test-workstation-cluster%{random_suffix}" @@ -1337,7 +1337,7 @@ func TestAccWorkstationsWorkstationConfig_vmTags(t *testing.T) { } func testAccWorkstationsWorkstationConfig_vmTags(context map[string]interface{}) string { -return acctest.Nprintf(` + return acctest.Nprintf(` data "google_project" "project" { provider = google-beta } From 67624be0cb3c3c0bca35bccae30fcf1f72377eeb Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Mon, 8 Jul 2024 16:25:09 +0100 Subject: [PATCH 277/356] Fix acc test `TestAccContainerNodePool_concurrent` with latest node version number (#11115) --- .../container/go/resource_container_node_pool_test.go.tmpl | 4 ++-- .../container/resource_container_node_pool_test.go.erb | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl index 352156d401a7..b9d633a9c8ce 100644 --- a/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl +++ b/mmv1/third_party/terraform/services/container/go/resource_container_node_pool_test.go.tmpl @@ -3976,7 +3976,7 @@ resource "google_container_node_pool" "np1" { location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 2 - version = "1.27.3-gke.1700" + version = "1.29.4-gke.1043002" } resource "google_container_node_pool" "np2" { @@ -3984,7 +3984,7 @@ resource "google_container_node_pool" "np2" { location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 2 - version = "1.27.3-gke.1700" + version = "1.29.4-gke.1043002" } `, cluster, networkName, subnetworkName, np1, np2) } diff --git a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb index c6b941511c48..6a8498b261d6 100644 --- a/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb +++ b/mmv1/third_party/terraform/services/container/resource_container_node_pool_test.go.erb @@ -3977,7 +3977,7 @@ resource "google_container_node_pool" "np1" { location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 2 - version = "1.27.3-gke.1700" + version = "1.29.4-gke.1043002" } resource "google_container_node_pool" "np2" { @@ -3985,7 +3985,7 @@ resource "google_container_node_pool" "np2" { location = "us-central1-a" cluster = google_container_cluster.cluster.name initial_node_count = 2 - version = "1.27.3-gke.1700" + version = "1.29.4-gke.1043002" } `, cluster, networkName, subnetworkName, np1, np2) } From 7e37f14c42f75d0c3b4721f5024464b6386cd5de Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Mon, 8 Jul 2024 13:07:05 -0500 Subject: [PATCH 278/356] go rewrite - refresh, appengine, and bigquery (#11119) --- mmv1/api/resource/examples.go | 6 + .../go_ApplicationUrlDispatchRules.yaml | 89 ++ mmv1/products/appengine/go_DomainMapping.yaml | 145 +++ mmv1/products/appengine/go_FirewallRule.yaml | 83 ++ .../appengine/go_FlexibleAppVersion.yaml | 844 +++++++++++++++++ mmv1/products/appengine/go_Service.yaml | 46 + .../appengine/go_ServiceNetworkSettings.yaml | 83 ++ .../appengine/go_ServiceSplitTraffic.yaml | 99 ++ .../appengine/go_StandardAppVersion.yaml | 448 +++++++++ mmv1/products/appengine/go_product.yaml | 24 + mmv1/products/bigquery/Dataset.yaml | 16 - mmv1/products/bigquery/DatasetAccess.yaml | 8 - mmv1/products/bigquery/Table.yaml | 14 +- mmv1/products/bigquery/go_Dataset.yaml | 407 ++++++++ mmv1/products/bigquery/go_DatasetAccess.yaml | 295 ++++++ mmv1/products/bigquery/go_Job.yaml | 877 ++++++++++++++++++ mmv1/products/bigquery/go_Routine.yaml | 325 +++++++ mmv1/products/bigquery/go_Table.yaml | 557 +++++++++++ mmv1/products/bigquery/go_product.yaml | 25 + mmv1/products/compute/go_Address.yaml | 1 + mmv1/products/compute/go_Autoscaler.yaml | 1 + mmv1/products/compute/go_BackendBucket.yaml | 2 + .../compute/go_BackendBucketSignedUrlKey.yaml | 1 + mmv1/products/compute/go_BackendService.yaml | 2 + .../go_BackendServiceSignedUrlKey.yaml | 1 + mmv1/products/compute/go_Disk.yaml | 2 + .../go_DiskResourcePolicyAttachment.yaml | 1 + .../compute/go_ExternalVpnGateway.yaml | 1 + mmv1/products/compute/go_Firewall.yaml | 1 + mmv1/products/compute/go_ForwardingRule.yaml | 1 + mmv1/products/compute/go_GlobalAddress.yaml | 1 + .../compute/go_GlobalForwardingRule.yaml | 1 + .../compute/go_GlobalNetworkEndpoint.yaml | 1 + .../go_GlobalNetworkEndpointGroup.yaml | 1 + mmv1/products/compute/go_HaVpnGateway.yaml | 1 + mmv1/products/compute/go_HealthCheck.yaml | 1 + mmv1/products/compute/go_HttpHealthCheck.yaml | 1 + .../products/compute/go_HttpsHealthCheck.yaml | 1 + mmv1/products/compute/go_Image.yaml | 2 + mmv1/products/compute/go_Instance.yaml | 2 + mmv1/products/compute/go_InstanceGroup.yaml | 1 + .../compute/go_InstanceGroupManager.yaml | 1 + .../compute/go_InstanceGroupMembership.yaml | 1 + .../compute/go_InstanceGroupNamedPort.yaml | 1 + .../products/compute/go_InstanceSettings.yaml | 1 + mmv1/products/compute/go_Interconnect.yaml | 1 + .../compute/go_InterconnectAttachment.yaml | 1 + mmv1/products/compute/go_MachineImage.yaml | 2 + .../compute/go_ManagedSslCertificate.yaml | 1 + mmv1/products/compute/go_Network.yaml | 1 + .../compute/go_NetworkAttachment.yaml | 1 + .../go_NetworkEdgeSecurityService.yaml | 1 + mmv1/products/compute/go_NetworkEndpoint.yaml | 1 + .../compute/go_NetworkEndpointGroup.yaml | 1 + .../products/compute/go_NetworkEndpoints.yaml | 1 + .../compute/go_NetworkFirewallPolicy.yaml | 1 + .../go_NetworkPeeringRoutesConfig.yaml | 1 + mmv1/products/compute/go_NodeGroup.yaml | 1 + mmv1/products/compute/go_NodeTemplate.yaml | 1 + mmv1/products/compute/go_PacketMirroring.yaml | 1 + .../compute/go_PerInstanceConfig.yaml | 1 + .../compute/go_ProjectCloudArmorTier.yaml | 1 + .../compute/go_PublicAdvertisedPrefix.yaml | 1 + .../compute/go_PublicDelegatedPrefix.yaml | 1 + .../products/compute/go_RegionAutoscaler.yaml | 1 + .../compute/go_RegionBackendService.yaml | 2 + .../products/compute/go_RegionCommitment.yaml | 1 + mmv1/products/compute/go_RegionDisk.yaml | 2 + ...go_RegionDiskResourcePolicyAttachment.yaml | 1 + .../compute/go_RegionHealthCheck.yaml | 1 + .../go_RegionInstanceGroupManager.yaml | 1 + .../compute/go_RegionNetworkEndpoint.yaml | 1 + .../go_RegionNetworkEndpointGroup.yaml | 1 + .../go_RegionNetworkFirewallPolicy.yaml | 1 + .../compute/go_RegionPerInstanceConfig.yaml | 1 + .../compute/go_RegionSecurityPolicy.yaml | 1 + .../compute/go_RegionSecurityPolicyRule.yaml | 1 + .../compute/go_RegionSslCertificate.yaml | 1 + mmv1/products/compute/go_RegionSslPolicy.yaml | 1 + .../compute/go_RegionTargetHttpProxy.yaml | 1 + .../compute/go_RegionTargetHttpsProxy.yaml | 1 + .../compute/go_RegionTargetTcpProxy.yaml | 1 + mmv1/products/compute/go_RegionUrlMap.yaml | 1 + mmv1/products/compute/go_Reservation.yaml | 1 + mmv1/products/compute/go_ResourcePolicy.yaml | 1 + mmv1/products/compute/go_Route.yaml | 1 + mmv1/products/compute/go_Router.yaml | 1 + mmv1/products/compute/go_RouterNat.yaml | 1 + .../compute/go_SecurityPolicyRule.yaml | 1 + .../compute/go_ServiceAttachment.yaml | 1 + mmv1/products/compute/go_Snapshot.yaml | 2 + mmv1/products/compute/go_SslCertificate.yaml | 1 + mmv1/products/compute/go_SslPolicy.yaml | 1 + mmv1/products/compute/go_Subnetwork.yaml | 2 + mmv1/products/compute/go_TargetGrpcProxy.yaml | 1 + mmv1/products/compute/go_TargetHttpProxy.yaml | 1 + .../products/compute/go_TargetHttpsProxy.yaml | 1 + mmv1/products/compute/go_TargetInstance.yaml | 1 + mmv1/products/compute/go_TargetSslProxy.yaml | 1 + mmv1/products/compute/go_TargetTcpProxy.yaml | 1 + mmv1/products/compute/go_UrlMap.yaml | 1 + mmv1/products/compute/go_VpnGateway.yaml | 1 + mmv1/products/compute/go_VpnTunnel.yaml | 1 + .../constants/go/bigquery_job.go.tmpl | 4 + .../go/cloudfunctions2_function.go.tmpl | 14 + .../constants/go/org_policy_policy.go.tmpl | 5 + .../custom_expand/go/enum_bool.go.tmpl | 23 + .../custom_flatten/go/enum_bool.go.tmpl | 6 + ...feature_view_ignore_project_number.go.tmpl | 15 + .../custom_update/go/vertex_ai_index.go.tmpl | 143 +++ .../encoders/go/org_policy_policy.go.tmpl | 15 + .../go/compute_packet_mirroring_full.tf.tmpl | 2 - .../examples/go/compute_reservation.tf.tmpl | 2 - ..._trigger_timespan_config_big_query.tf.tmpl | 43 + .../go/dns_managed_zone_basic.tf.tmpl | 2 - .../go/dns_managed_zone_private.tf.tmpl | 2 - ...ns_managed_zone_private_forwarding.tf.tmpl | 2 - .../go/dns_managed_zone_private_gke.tf.tmpl | 2 - .../dns_managed_zone_private_peering.tf.tmpl | 2 - .../go/dns_managed_zone_quickstart.tf.tmpl | 2 - ...dns_managed_zone_service_directory.tf.tmpl | 2 - .../examples/go/dns_policy_basic.tf.tmpl | 2 - .../examples/go/dns_record_set_basic.tf.tmpl | 2 - .../go/dns_response_policy_basic.tf.tmpl | 2 - .../go/dns_response_policy_rule_basic.tf.tmpl | 2 - ...xternal_cdn_lb_with_backend_bucket.tf.tmpl | 20 - .../go/external_http_lb_mig_backend.tf.tmpl | 18 - ..._http_lb_mig_backend_custom_header.tf.tmpl | 2 - .../external_ssl_proxy_lb_mig_backend.tf.tmpl | 2 - .../external_tcp_proxy_lb_mig_backend.tf.tmpl | 2 - .../examples/go/external_vpn_gateway.tf.tmpl | 2 - .../go/firewall_with_target_tags.tf.tmpl | 2 - .../go/flask_google_cloud_quickstart.tf.tmpl | 8 - ..._internal_http_lb_with_mig_backend.tf.tmpl | 2 - .../go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl | 2 - .../go/instance_custom_hostname.tf.tmpl | 3 - .../go/instance_settings_basic.tf.tmpl | 2 - .../instance_virtual_display_enabled.tf.tmpl | 4 - .../go/int_https_lb_https_redirect.tf.tmpl | 2 - .../internal_http_lb_with_mig_backend.tf.tmpl | 2 - ...ternal_tcp_udp_lb_with_mig_backend.tf.tmpl | 2 - ..._custom_firewall_enforcement_order.tf.tmpl | 2 - .../examples/go/network_custom_mtu.tf.tmpl | 2 - ...gement_connectivity_test_addresses.tf.tmpl | 2 - ...gement_connectivity_test_instances.tf.tmpl | 2 - ...ecurity_address_groups_cloud_armor.tf.tmpl | 10 + ..._services_lb_route_extension_basic.tf.tmpl | 4 - ...ervices_lb_traffic_extension_basic.tf.tmpl | 3 - .../go/org_policy_policy_dry_run_spec.tf.tmpl | 29 + .../go/org_policy_policy_enforce.tf.tmpl | 16 + .../go/org_policy_policy_folder.tf.tmpl | 17 + .../go/org_policy_policy_organization.tf.tmpl | 8 + .../go/org_policy_policy_project.tf.tmpl | 30 + ...rivate_service_connect_google_apis.tf.tmpl | 6 - .../go/privateca_capool_all_fields.tf.tmpl | 2 - .../go/privateca_capool_basic.tf.tmpl | 2 - ...vateca_certificate_authority_basic.tf.tmpl | 2 - ...teca_certificate_authority_byo_key.tf.tmpl | 2 - ...a_certificate_authority_custom_ski.tf.tmpl | 2 - ..._certificate_authority_subordinate.tf.tmpl | 2 - .../go/privateca_certificate_config.tf.tmpl | 2 - .../go/privateca_certificate_csr.tf.tmpl | 2 - .../privateca_certificate_custom_ski.tf.tmpl | 2 - ...privateca_certificate_no_authority.tf.tmpl | 2 - ...rivateca_certificate_with_template.tf.tmpl | 2 - .../examples/go/privateca_quickstart.tf.tmpl | 2 - .../go/privateca_template_basic.tf.tmpl | 2 - .../go/region_autoscaler_basic.tf.tmpl | 2 - .../go/region_target_tcp_proxy_basic.tf.tmpl | 3 - ...region_url_map_path_template_match.tf.tmpl | 2 - ...gional_external_http_load_balancer.tf.tmpl | 28 - .../examples/go/spot_instance_basic.tf.tmpl | 3 - .../examples/go/sql_database_basic.tf.tmpl | 2 - .../go/sql_database_deletion_policy.tf.tmpl | 2 - .../go/sql_database_instance_my_sql.tf.tmpl | 4 - .../go/sql_database_instance_postgres.tf.tmpl | 4 - .../sql_database_instance_sqlserver.tf.tmpl | 4 - .../examples/go/sql_instance_cmek.tf.tmpl | 14 - .../examples/go/sql_instance_ha.tf.tmpl | 6 - .../go/sql_instance_iam_condition.tf.tmpl | 2 - .../examples/go/sql_instance_labels.tf.tmpl | 6 - .../examples/go/sql_instance_pitr.tf.tmpl | 4 - .../examples/go/sql_instance_ssl_cert.tf.tmpl | 10 - ..._mysql_instance_authorized_network.tf.tmpl | 2 - .../go/sql_mysql_instance_backup.tf.tmpl | 2 - ...sql_mysql_instance_backup_location.tf.tmpl | 2 - ...ql_mysql_instance_backup_retention.tf.tmpl | 2 - .../go/sql_mysql_instance_clone.tf.tmpl | 4 - .../go/sql_mysql_instance_flags.tf.tmpl | 2 - .../go/sql_mysql_instance_public_ip.tf.tmpl | 2 - .../go/sql_mysql_instance_pvp.tf.tmpl | 2 - .../go/sql_mysql_instance_replica.tf.tmpl | 4 - ...stgres_instance_authorized_network.tf.tmpl | 2 - .../go/sql_postgres_instance_backup.tf.tmpl | 2 - ..._postgres_instance_backup_location.tf.tmpl | 2 - ...postgres_instance_backup_retention.tf.tmpl | 2 - .../go/sql_postgres_instance_clone.tf.tmpl | 4 - .../go/sql_postgres_instance_flags.tf.tmpl | 2 - .../sql_postgres_instance_public_ip.tf.tmpl | 2 - .../go/sql_postgres_instance_pvp.tf.tmpl | 2 - .../go/sql_postgres_instance_replica.tf.tmpl | 4 - ...server_instance_authorized_network.tf.tmpl | 2 - .../go/sql_sqlserver_instance_backup.tf.tmpl | 2 - ...sqlserver_instance_backup_location.tf.tmpl | 2 - ...qlserver_instance_backup_retention.tf.tmpl | 2 - .../go/sql_sqlserver_instance_clone.tf.tmpl | 4 - .../go/sql_sqlserver_instance_flags.tf.tmpl | 2 - .../sql_sqlserver_instance_public_ip.tf.tmpl | 2 - .../go/sql_sqlserver_instance_replica.tf.tmpl | 4 - .../go/sql_sqlserver_vm_instance.tf.tmpl | 4 - .../examples/go/storage_hmac_key.tf.tmpl | 2 - .../go/storage_make_data_public.tf.tmpl | 2 - .../examples/go/storage_new_bucket.tf.tmpl | 8 - .../storage_object_lifecycle_setting.tf.tmpl | 2 - .../go/storage_pubsub_notifications.tf.tmpl | 2 - .../go/storage_static_website.tf.tmpl | 6 - .../go/target_grpc_proxy_basic.tf.tmpl | 2 - .../go/target_http_proxy_basic.tf.tmpl | 2 - ...http_proxy_http_keep_alive_timeout.tf.tmpl | 2 - .../target_http_proxy_https_redirect.tf.tmpl | 2 - .../go/target_https_proxy_basic.tf.tmpl | 2 - ...ttps_proxy_http_keep_alive_timeout.tf.tmpl | 2 - .../go/target_https_proxy_mtls.tf.tmpl | 2 - .../go/target_ssl_proxy_basic.tf.tmpl | 2 - .../go/target_tcp_proxy_basic.tf.tmpl | 2 - .../go/url_map_bucket_and_service.tf.tmpl | 2 - .../go/url_map_header_based_routing.tf.tmpl | 2 - .../url_map_parameter_based_routing.tf.tmpl | 2 - .../go/url_map_path_template_match.tf.tmpl | 2 - .../go/url_map_traffic_director_path.tf.tmpl | 2 - ..._map_traffic_director_path_partial.tf.tmpl | 2 - .../go/url_map_traffic_director_route.tf.tmpl | 2 - ...map_traffic_director_route_partial.tf.tmpl | 2 - ...inestore_featureview_cross_project.tf.tmpl | 141 +++ .../go/workstation_config_basic.tf.tmpl | 20 +- .../go/api_gateway_api_config.tf.tmpl | 1 - .../pre_read/go/org_policy_policy.go.tmpl | 19 + ...pute_region_instance_template_test.go.tmpl | 2 +- 238 files changed, 5000 insertions(+), 414 deletions(-) create mode 100644 mmv1/products/appengine/go_ApplicationUrlDispatchRules.yaml create mode 100644 mmv1/products/appengine/go_DomainMapping.yaml create mode 100644 mmv1/products/appengine/go_FirewallRule.yaml create mode 100644 mmv1/products/appengine/go_FlexibleAppVersion.yaml create mode 100644 mmv1/products/appengine/go_Service.yaml create mode 100644 mmv1/products/appengine/go_ServiceNetworkSettings.yaml create mode 100644 mmv1/products/appengine/go_ServiceSplitTraffic.yaml create mode 100644 mmv1/products/appengine/go_StandardAppVersion.yaml create mode 100644 mmv1/products/appengine/go_product.yaml create mode 100644 mmv1/products/bigquery/go_Dataset.yaml create mode 100644 mmv1/products/bigquery/go_DatasetAccess.yaml create mode 100644 mmv1/products/bigquery/go_Job.yaml create mode 100644 mmv1/products/bigquery/go_Routine.yaml create mode 100644 mmv1/products/bigquery/go_Table.yaml create mode 100644 mmv1/products/bigquery/go_product.yaml create mode 100644 mmv1/templates/terraform/constants/go/bigquery_job.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/cloudfunctions2_function.go.tmpl create mode 100644 mmv1/templates/terraform/constants/go/org_policy_policy.go.tmpl create mode 100644 mmv1/templates/terraform/custom_expand/go/enum_bool.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/enum_bool.go.tmpl create mode 100644 mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_view_ignore_project_number.go.tmpl create mode 100644 mmv1/templates/terraform/custom_update/go/vertex_ai_index.go.tmpl create mode 100644 mmv1/templates/terraform/encoders/go/org_policy_policy.go.tmpl create mode 100644 mmv1/templates/terraform/examples/go/dlp_job_trigger_timespan_config_big_query.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/network_security_address_groups_cloud_armor.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/org_policy_policy_dry_run_spec.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/org_policy_policy_enforce.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/org_policy_policy_folder.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/org_policy_policy_organization.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/org_policy_policy_project.tf.tmpl create mode 100644 mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_featureview_cross_project.tf.tmpl create mode 100644 mmv1/templates/terraform/pre_read/go/org_policy_policy.go.tmpl diff --git a/mmv1/api/resource/examples.go b/mmv1/api/resource/examples.go index c669e2e89570..15f942026a23 100644 --- a/mmv1/api/resource/examples.go +++ b/mmv1/api/resource/examples.go @@ -260,6 +260,12 @@ func ExecuteTemplate(e any, templatePath string, appendNewline bool) string { templates := []string{ templatePath, "templates/terraform/expand_resource_ref.tmpl", + "templates/terraform/custom_flatten/go/bigquery_table_ref.go.tmpl", + "templates/terraform/flatten_property_method.go.tmpl", + "templates/terraform/expand_property_method.go.tmpl", + "templates/terraform/update_mask.go.tmpl", + "templates/terraform/nested_query.go.tmpl", + "templates/terraform/unordered_list_customize_diff.go.tmpl", } templateFileName := filepath.Base(templatePath) diff --git a/mmv1/products/appengine/go_ApplicationUrlDispatchRules.yaml b/mmv1/products/appengine/go_ApplicationUrlDispatchRules.yaml new file mode 100644 index 000000000000..9ce1764019ef --- /dev/null +++ b/mmv1/products/appengine/go_ApplicationUrlDispatchRules.yaml @@ -0,0 +1,89 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ApplicationUrlDispatchRules' +description: | + Rules to match an HTTP request and dispatch that request to a service. +references: + guides: + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps#UrlDispatchRule' +docs: +id_format: '{{project}}' +base_url: 'apps/{{project}}' +create_url: 'apps/{{project}}?updateMask=dispatch_rules' +create_verb: 'PATCH' +update_url: 'apps/{{project}}?updateMask=dispatch_rules' +update_verb: 'PATCH' +delete_url: 'apps/{{project}}?updateMask=dispatch_rules' +delete_verb: 'PATCH' +mutex: 'apps/{{project}}' +import_format: + - '{{project}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + test_check_destroy: 'templates/terraform/custom_check_destroy/go/appengine.go.tmpl' +skip_sweeper: true +error_retry_predicates: + + - 'transport_tpg.IsAppEngineRetryableError' +examples: + - name: 'app_engine_application_url_dispatch_rules_basic' + primary_resource_id: 'web_service' + vars: + bucket_name: 'appengine-test-bucket' +parameters: +properties: + - name: 'dispatchRules' + type: Array + description: | + Rules to match an HTTP request and dispatch that request to a service. + required: true + item_type: + type: NestedObject + properties: + - name: 'domain' + type: String + description: | + Domain name to match against. The wildcard "*" is supported if specified before a period: "*.". + Defaults to matching all domains: "*". + default_value: "*" + - name: 'path' + type: String + description: | + Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. + The sum of the lengths of the domain and path may not exceed 100 characters. + required: true + - name: 'service' + type: String + description: | + Pathname within the host. Must start with a "/". A single "*" can be included at the end of the path. + The sum of the lengths of the domain and path may not exceed 100 characters. + required: true diff --git a/mmv1/products/appengine/go_DomainMapping.yaml b/mmv1/products/appengine/go_DomainMapping.yaml new file mode 100644 index 000000000000..a1ea9451554d --- /dev/null +++ b/mmv1/products/appengine/go_DomainMapping.yaml @@ -0,0 +1,145 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DomainMapping' +description: | + A domain serving an App Engine application. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/appengine/docs/standard/python/mapping-custom-domains' + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.domainMappings' +docs: +id_format: 'apps/{{project}}/domainMappings/{{domain_name}}' +base_url: 'apps/{{project}}/domainMappings' +self_link: 'apps/{{project}}/domainMappings/{{domain_name}}' +update_verb: 'PATCH' +update_mask: true +mutex: 'apps/{{project}}' +import_format: + - 'apps/{{project}}/domainMappings/{{domain_name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + decoder: 'templates/terraform/decoders/go/app_engine_domain_mapping.go.tmpl' +examples: + - name: 'app_engine_domain_mapping_basic' + primary_resource_id: 'domain_mapping' + ignore_read_extra: + - 'ssl_settings.0.ssl_management_type' +parameters: + - name: 'overrideStrategy' + type: Enum + description: | + Whether the domain creation should override any existing mappings for this domain. + By default, overrides are rejected. + url_param_only: true + ignore_read: true + default_value: "STRICT" + enum_values: + - 'STRICT' + - 'OVERRIDE' + - name: 'domainName' + type: String + description: | + Relative name of the domain serving the application. Example: example.com. + api_name: id + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Full path to the DomainMapping resource in the API. Example: apps/myapp/domainMapping/example.com. + output: true + - name: 'sslSettings' + type: NestedObject + description: | + SSL configuration for this domain. If unconfigured, this domain will not serve with SSL. + default_from_api: true + update_mask_fields: + - 'ssl_settings.certificate_id' + - 'ssl_settings.ssl_management_type' + properties: + - name: 'certificateId' + type: String + description: | + ID of the AuthorizedCertificate resource configuring SSL for the application. Clearing this field will + remove SSL support. + By default, a managed certificate is automatically created for every domain mapping. To omit SSL support + or to configure SSL manually, specify `SslManagementType.MANUAL` on a `CREATE` or `UPDATE` request. You must be + authorized to administer the `AuthorizedCertificate` resource to manually map it to a DomainMapping resource. + Example: 12345. + default_from_api: true + - name: 'sslManagementType' + type: Enum + description: | + SSL management type for this domain. If `AUTOMATIC`, a managed certificate is automatically provisioned. + If `MANUAL`, `certificateId` must be manually specified in order to configure SSL for this domain. + required: true + enum_values: + - 'AUTOMATIC' + - 'MANUAL' + - name: 'pendingManagedCertificateId' + type: Enum + description: | + ID of the managed `AuthorizedCertificate` resource currently being provisioned, if applicable. Until the new + managed certificate has been successfully provisioned, the previous SSL state will be preserved. Once the + provisioning process completes, the `certificateId` field will reflect the new managed certificate and this + field will be left empty. To remove SSL support while there is still a pending managed certificate, clear the + `certificateId` field with an update request. + output: true + enum_values: + - 'AUTOMATIC' + - 'MANUAL' + - name: 'resourceRecords' + type: Array + description: | + The resource records required to configure this domain mapping. These records must be added to the domain's DNS + configuration in order to serve the application via this domain mapping. + output: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Relative name of the object affected by this record. Only applicable for CNAME records. Example: 'www'. + - name: 'rrdata' + type: String + description: | + Data for this record. Values vary by record type, as defined in RFC 1035 (section 5) and RFC 1034 (section 3.6.1). + - name: 'type' + type: Enum + description: | + Resource record type. Example: `AAAA`. + enum_values: + - 'A' + - 'AAAA' + - 'CNAME' diff --git a/mmv1/products/appengine/go_FirewallRule.yaml b/mmv1/products/appengine/go_FirewallRule.yaml new file mode 100644 index 000000000000..8b733d9a49d2 --- /dev/null +++ b/mmv1/products/appengine/go_FirewallRule.yaml @@ -0,0 +1,83 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FirewallRule' +description: | + A single firewall rule that is evaluated against incoming traffic + and provides an action to take on matched requests. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/appengine/docs/standard/python/creating-firewalls#creating_firewall_rules' + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.firewall.ingressRules' +docs: +base_url: 'apps/{{project}}/firewall/ingressRules' +self_link: 'apps/{{project}}/firewall/ingressRules/{{priority}}' +update_verb: 'PATCH' +update_mask: true +mutex: 'apps/{{project}}' +import_format: + - 'apps/{{project}}/firewall/ingressRules/{{priority}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'PollAsync' + check_response_func_existence: 'transport_tpg.PollCheckForExistence' + check_response_func_absence: 'transport_tpg.PollCheckForAbsence' + suppress_error: false + target_occurrences: 1 + actions: ['create'] +collection_url_key: 'ingressRules' +custom_code: +skip_sweeper: true +examples: + - name: 'app_engine_firewall_rule_basic' + primary_resource_id: 'rule' + vars: + project_id: 'ae-project' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' +parameters: + - name: 'priority' + type: Integer + description: | + A positive integer that defines the order of rule evaluation. + Rules with the lowest priority are evaluated first. + + A default rule at priority Int32.MaxValue matches all IPv4 and + IPv6 traffic when no previous rule matches. Only the action of + this rule can be modified by the user. +properties: + - name: 'description' + type: String + description: | + An optional string description of this rule. + required: false + - name: 'sourceRange' + type: String + description: | + IP address or range, defined using CIDR notation, of requests that this rule applies to. + required: true + - name: 'action' + type: Enum + description: | + The action to take if this rule matches. + required: true + enum_values: + - 'UNSPECIFIED_ACTION' + - 'ALLOW' + - 'DENY' diff --git a/mmv1/products/appengine/go_FlexibleAppVersion.yaml b/mmv1/products/appengine/go_FlexibleAppVersion.yaml new file mode 100644 index 000000000000..da141620af6c --- /dev/null +++ b/mmv1/products/appengine/go_FlexibleAppVersion.yaml @@ -0,0 +1,844 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'FlexibleAppVersion' +description: | + Flexible App Version resource to create a new version of flexible GAE Application. Based on Google Compute Engine, + the App Engine flexible environment automatically scales your app up and down while also balancing the load. + Learn about the differences between the standard environment and the flexible environment + at https://cloud.google.com/appengine/docs/the-appengine-environments. + + ~> **Note:** The App Engine flexible environment service account uses the member ID `service-[YOUR_PROJECT_NUMBER]@gae-api-prod.google.com.iam.gserviceaccount.com` + It should have the App Engine Flexible Environment Service Agent role, which will be applied when the `appengineflex.googleapis.com` service is enabled. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/appengine/docs/flexible' + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions' +docs: +id_format: 'apps/{{project}}/services/{{service}}/versions/{{version_id}}' +base_url: 'apps/{{project}}/services/{{service}}/versions' +self_link: 'apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL' +update_url: 'apps/{{project}}/services/{{service}}/versions' +update_verb: 'POST' +update_mask: false +delete_url: 'apps/{{project}}/services/{{service}}/versions/{{version_id}}' +mutex: 'apps/{{project}}' +import_format: + - 'apps/{{project}}/services/{{service}}/versions/{{version_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'appengine#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'versions' +custom_code: + encoder: 'templates/terraform/encoders/go/flex_app_version.go.tmpl' + custom_delete: 'templates/terraform/custom_delete/go/appversion_delete.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +error_retry_predicates: + + - 'transport_tpg.IsAppEngineRetryableError' +examples: + - name: 'app_engine_flexible_app_version' + primary_resource_id: 'myapp_v1' + vars: + bucket_name: 'appengine-static-content' + project: 'appeng-flex' + account_id: 'my-account' + test_env_vars: + org_id: 'ORG_ID' + billing_account: 'BILLING_ACCT' + ignore_read_extra: + - 'noop_on_destroy' + - 'deployment.0.zip' +virtual_fields: + - name: 'noop_on_destroy' + description: | + If set to `true`, the application version will not be deleted. + type: Boolean + default_value: false + - name: 'delete_service_on_destroy' + description: | + If set to `true`, the service will be deleted if it is the last version. + type: Boolean + default_value: false +parameters: + - name: 'service' + type: ResourceRef + description: | + AppEngine service resource. Can contain numbers, letters, and hyphens. + url_param_only: true + required: true + resource: 'Service' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Full path to the Version resource in the API. Example, "v1". + output: true + - name: 'version_id' + type: String + description: | + Relative name of the version within the service. For example, `v1`. Version names can contain only lowercase letters, numbers, or hyphens. + Reserved names,"default", "latest", and any name with the prefix "ah-". + api_name: id + immutable: true + - name: 'inboundServices' + type: Array + description: | + A list of the types of messages that this application is able to receive. + is_set: true + item_type: + type: Enum + description: | + One type of message that this application is able to receive. + enum_values: + - 'INBOUND_SERVICE_MAIL' + - 'INBOUND_SERVICE_MAIL_BOUNCE' + - 'INBOUND_SERVICE_XMPP_ERROR' + - 'INBOUND_SERVICE_XMPP_MESSAGE' + - 'INBOUND_SERVICE_XMPP_SUBSCRIBE' + - 'INBOUND_SERVICE_XMPP_PRESENCE' + - 'INBOUND_SERVICE_CHANNEL_PRESENCE' + - 'INBOUND_SERVICE_WARMUP' + - name: 'instanceClass' + type: String + description: | + Instance class that is used to run this version. Valid values are + AutomaticScaling: F1, F2, F4, F4_1G + ManualScaling: B1, B2, B4, B8, B4_1G + Defaults to F1 for AutomaticScaling and B1 for ManualScaling. + - name: 'network' + type: NestedObject + description: Extra network settings + properties: + - name: 'forwardedPorts' + type: Array + description: | + List of ports, or port pairs, to forward from the virtual machine to the application container. + item_type: + type: String + - name: 'instanceIpMode' + type: Enum + description: | + Prevent instances from receiving an ephemeral external IP address. + min_version: 'beta' + enum_values: + - 'EXTERNAL' + - 'INTERNAL' + - name: 'instanceTag' + type: String + description: | + Tag to apply to the instance during creation. + - name: 'name' + type: String + description: | + Google Compute Engine network where the virtual machines are created. Specify the short name, not the resource path. + required: true + - name: 'subnetwork' + type: String + description: | + Google Cloud Platform sub-network where the virtual machines are created. Specify the short name, not the resource path. + + If the network that the instance is being created in is a Legacy network, then the IP address is allocated from the IPv4Range. + If the network that the instance is being created in is an auto Subnet Mode Network, then only network name should be specified (not the subnetworkName) and the IP address is created from the IPCidrRange of the subnetwork that exists in that zone for that network. + If the network that the instance is being created in is a custom Subnet Mode Network, then the subnetworkName must be specified and the IP address is created from the IPCidrRange of the subnetwork. + If specified, the subnetwork must exist in the same region as the App Engine flexible environment application. + api_name: subnetworkName + - name: 'sessionAffinity' + type: Boolean + description: | + Enable session affinity. + - name: 'resources' + type: NestedObject + description: Machine resources for a version. + properties: + - name: 'cpu' + type: Integer + description: | + Number of CPU cores needed. + at_least_one_of: + - 'resources.0.cpu' + - 'resources.0.disk_gb' + - 'resources.0.memory_gb' + - 'resources.0.volumes' + - name: 'diskGb' + type: Integer + description: | + Disk size (GB) needed. + at_least_one_of: + - 'resources.0.cpu' + - 'resources.0.disk_gb' + - 'resources.0.memory_gb' + - 'resources.0.volumes' + - name: 'memoryGb' + type: Double + description: | + Memory (GB) needed. + at_least_one_of: + - 'resources.0.cpu' + - 'resources.0.disk_gb' + - 'resources.0.memory_gb' + - 'resources.0.volumes' + - name: 'volumes' + type: Array + description: | + List of ports, or port pairs, to forward from the virtual machine to the application container. + at_least_one_of: + - 'resources.0.cpu' + - 'resources.0.disk_gb' + - 'resources.0.memory_gb' + - 'resources.0.volumes' + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Unique name for the volume. + required: true + - name: 'volumeType' + type: String + description: | + Underlying volume type, e.g. 'tmpfs'. + required: true + - name: 'sizeGb' + type: Integer + description: | + Volume size in gigabytes. + required: true + - name: 'runtime' + type: String + description: | + Desired runtime. Example python27. + required: true + - name: 'runtimeChannel' + type: String + description: | + The channel of the runtime to use. Only available for some runtimes. + - name: 'flexibleRuntimeSettings' + type: NestedObject + description: Runtime settings for App Engine flexible environment. + properties: + - name: 'operatingSystem' + type: String + description: | + Operating System of the application runtime. + - name: 'runtimeVersion' + type: String + description: | + The runtime version of an App Engine flexible application. + - name: 'betaSettings' + type: KeyValuePairs + description: | + Metadata settings that are supplied to this version to enable beta runtime features. + ignore_read: true + - name: 'servingStatus' + type: Enum + description: | + Current serving status of this version. Only the versions with a SERVING status create instances and can be billed. + default_value: "SERVING" + enum_values: + - 'SERVING' + - 'STOPPED' + - name: 'runtimeApiVersion' + type: String + description: | + The version of the API in the given runtime environment. + Please see the app.yaml reference for valid values at `https://cloud.google.com/appengine/docs/standard//config/appref`\ + Substitute `` with `python`, `java`, `php`, `ruby`, `go` or `nodejs`. + default_from_api: true + - name: 'handlers' + type: Array + description: | + An ordered list of URL-matching patterns that should be applied to incoming requests. + The first matching URL handles the request and other request handlers are not attempted. + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'urlRegex' + type: String + description: | + URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. + All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path. + - name: 'securityLevel' + type: Enum + description: | + Security (HTTPS) enforcement for this URL. + required: false + enum_values: + - 'SECURE_DEFAULT' + - 'SECURE_NEVER' + - 'SECURE_OPTIONAL' + - 'SECURE_ALWAYS' + - name: 'login' + type: Enum + description: | + Methods to restrict access to a URL based on login status. + required: false + enum_values: + - 'LOGIN_OPTIONAL' + - 'LOGIN_ADMIN' + - 'LOGIN_REQUIRED' + - name: 'authFailAction' + type: Enum + description: | + Actions to take when the user is not logged in. + required: false + enum_values: + - 'AUTH_FAIL_ACTION_REDIRECT' + - 'AUTH_FAIL_ACTION_UNAUTHORIZED' + - name: 'redirectHttpResponseCode' + type: Enum + description: | + 30x code to use when performing redirects for the secure field. + required: false + enum_values: + - 'REDIRECT_HTTP_RESPONSE_CODE_301' + - 'REDIRECT_HTTP_RESPONSE_CODE_302' + - 'REDIRECT_HTTP_RESPONSE_CODE_303' + - 'REDIRECT_HTTP_RESPONSE_CODE_307' + - name: 'script' + type: NestedObject + description: | + Executes a script to handle the requests that match this URL pattern. + Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto". + properties: + - name: 'scriptPath' + type: String + description: | + Path to the script from the application root directory. + required: true + - name: 'staticFiles' + type: NestedObject + description: | + Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. + Static file handlers describe which files in the application directory are static files, and which URLs serve them. + properties: + - name: 'path' + type: String + description: | + Path to the static files matched by the URL pattern, from the application root directory. + The path can refer to text matched in groupings in the URL pattern. + - name: 'uploadPathRegex' + type: String + description: | + Regular expression that matches the file paths for all files that should be referenced by this handler. + - name: 'httpHeaders' + type: KeyValuePairs + description: | + HTTP headers to use for all responses from these URLs. + An object containing a list of "key:value" value pairs.". + - name: 'mimeType' + type: String + description: | + MIME type used to serve all files served by this handler. + Defaults to file-specific MIME types, which are derived from each file's filename extension. + - name: 'expiration' + type: String + description: | + Time a static file served by this handler should be cached by web proxies and browsers. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s". + Default is '0s' + default_value: "0s" + - name: 'requireMatchingFile' + type: Boolean + description: | + Whether this handler should match the request if the file referenced by the handler does not exist. + - name: 'applicationReadable' + type: Boolean + description: | + Whether files should also be uploaded as code data. By default, files declared in static file handlers are + uploaded as static data and are only served to end users; they cannot be read by the application. If enabled, + uploads are charged against both your code and static data storage resource quotas. + - name: 'runtimeMainExecutablePath' + type: String + description: | + The path or name of the app's main executable. + - name: 'serviceAccount' + type: String + description: | + The identity that the deployed version will run as. Admin API will use the App Engine Appspot service account as + default if this field is neither provided in app.yaml file nor through CLI flag. + default_from_api: true + - name: 'apiConfig' + type: NestedObject + description: | + Serving configuration for Google Cloud Endpoints. + properties: + - name: 'authFailAction' + type: Enum + description: | + Action to take when users access resources that require authentication. + default_value: "AUTH_FAIL_ACTION_REDIRECT" + enum_values: + - 'AUTH_FAIL_ACTION_REDIRECT' + - 'AUTH_FAIL_ACTION_UNAUTHORIZED' + - name: 'login' + type: Enum + description: | + Level of login required to access this resource. + default_value: "LOGIN_OPTIONAL" + enum_values: + - 'LOGIN_OPTIONAL' + - 'LOGIN_ADMIN' + - 'LOGIN_REQUIRED' + - name: 'script' + type: String + description: | + Path to the script from the application root directory. + required: true + - name: 'securityLevel' + type: Enum + description: | + Security (HTTPS) enforcement for this URL. + enum_values: + - 'SECURE_DEFAULT' + - 'SECURE_NEVER' + - 'SECURE_OPTIONAL' + - 'SECURE_ALWAYS' + - name: 'url' + type: String + description: | + URL to serve the endpoint at. + - name: 'envVariables' + type: KeyValuePairs + description: |- + Environment variables available to the application. As these are not returned in the API request, Terraform will not detect any changes made outside of the Terraform config. + ignore_read: true + - name: 'defaultExpiration' + type: String + description: | + Duration that static files should be cached by web proxies and browsers. + Only applicable if the corresponding StaticFilesHandler does not specify its own expiration time. + - name: 'readinessCheck' + type: NestedObject + description: | + Configures readiness health checking for instances. Unhealthy instances are not put into the backend traffic rotation. + required: true + properties: + - name: 'path' + type: String + description: | + The request path. + required: true + - name: 'host' + type: String + description: | + Host header to send when performing a HTTP Readiness check. Example: "myapp.appspot.com" + - name: 'failureThreshold' + type: Double + description: | + Number of consecutive failed checks required before removing traffic. Default: 2. + default_value: 2.0 + - name: 'successThreshold' + type: Double + description: | + Number of consecutive successful checks required before receiving traffic. Default: 2. + default_value: 2.0 + - name: 'checkInterval' + type: String + description: | + Interval between health checks. Default: "5s". + default_value: "5s" + - name: 'timeout' + type: String + description: | + Time before the check is considered failed. Default: "4s" + default_value: "4s" + - name: 'appStartTimeout' + type: String + description: | + A maximum time limit on application initialization, measured from moment the application successfully + replies to a healthcheck until it is ready to serve traffic. Default: "300s" + default_value: "300s" + - name: 'livenessCheck' + type: NestedObject + description: | + Health checking configuration for VM instances. Unhealthy instances are killed and replaced with new instances. + required: true + properties: + - name: 'path' + type: String + description: | + The request path. + required: true + - name: 'host' + type: String + description: | + Host header to send when performing a HTTP Readiness check. Example: "myapp.appspot.com" + - name: 'failureThreshold' + type: Double + description: | + Number of consecutive failed checks required before considering the VM unhealthy. Default: 4. + default_value: 4.0 + - name: 'successThreshold' + type: Double + description: | + Number of consecutive successful checks required before considering the VM healthy. Default: 2. + default_value: 2.0 + - name: 'checkInterval' + type: String + description: | + Interval between health checks. + default_value: "30s" + - name: 'timeout' + type: String + description: | + Time before the check is considered failed. Default: "4s" + default_value: "4s" + - name: 'initialDelay' + type: String + description: | + The initial delay before starting to execute the checks. Default: "300s" + default_value: "300s" + - name: 'nobuildFilesRegex' + type: String + description: | + Files that match this pattern will not be built into this version. Only applicable for Go runtimes. + - name: 'deployment' + type: NestedObject + description: | + Code and application artifacts that make up this version. + ignore_read: true + properties: + - name: 'zip' + type: NestedObject + description: 'Zip File' + at_least_one_of: + - 'deployment.0.zip' + - 'deployment.0.files' + - 'deployment.0.container' + properties: + - name: 'sourceUrl' + type: String + description: 'Source URL' + required: true + - name: 'filesCount' + type: Integer + description: 'files count' + - name: 'files' + type: Map + description: | + Manifest of the files stored in Google Cloud Storage that are included as part of this version. + All files must be readable using the credentials supplied with this call. + at_least_one_of: + - 'deployment.0.zip' + - 'deployment.0.files' + - 'deployment.0.container' + key_name: 'name' + key_description: | + name of file + value_type: + type: NestedObject + properties: + - name: 'sha1Sum' + type: String + description: | + SHA1 checksum of the file + - name: 'sourceUrl' + type: String + description: | + Source URL + required: true + - name: 'container' + type: NestedObject + description: 'The Docker image for the container that runs the version.' + default_from_api: true + at_least_one_of: + - 'deployment.0.zip' + - 'deployment.0.files' + - 'deployment.0.container' + properties: + - name: 'image' + type: String + description: | + URI to the hosted container image in Google Container Registry. The URI must be fully qualified and include a tag or digest. + Examples: "gcr.io/my-project/image:tag" or "gcr.io/my-project/image@digest" + required: true + - name: 'cloudBuildOptions' + type: NestedObject + description: + Options for the build operations performed as a part of the version + deployment. Only applicable when creating a version using source code + directly. + at_least_one_of: + - 'deployment.0.zip' + - 'deployment.0.files' + - 'deployment.0.container' + properties: + - name: 'appYamlPath' + type: String + description: | + Path to the yaml file used in deployment, used to determine runtime configuration details. + required: true + - name: 'cloudBuildTimeout' + type: String + description: | + The Cloud Build timeout used as part of any dependent builds performed by version creation. Defaults to 10 minutes. + + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + - name: 'endpointsApiService' + type: NestedObject + description: | + Code and application artifacts that make up this version. + properties: + - name: 'name' + type: String + description: | + Endpoints service name which is the name of the "service" resource in the Service Management API. + For example "myapi.endpoints.myproject.cloud.goog" + required: true + - name: 'configId' + type: String + description: | + Endpoints service configuration ID as specified by the Service Management API. For example "2016-09-19r1". + + By default, the rollout strategy for Endpoints is "FIXED". This means that Endpoints starts up with a particular configuration ID. + When a new configuration is rolled out, Endpoints must be given the new configuration ID. The configId field is used to give the configuration ID + and is required in this case. + + Endpoints also has a rollout strategy called "MANAGED". When using this, Endpoints fetches the latest configuration and does not need + the configuration ID. In this case, configId must be omitted. + - name: 'rolloutStrategy' + type: Enum + description: | + Endpoints rollout strategy. If FIXED, configId must be specified. If MANAGED, configId must be omitted. + default_value: "FIXED" + enum_values: + - 'FIXED' + - 'MANAGED' + - name: 'disableTraceSampling' + type: Boolean + description: + Enable or disable trace sampling. By default, this is set to false for + enabled. + default_value: false + - name: 'entrypoint' + type: NestedObject + description: | + The entrypoint for the application. + ignore_read: true + properties: + - name: 'shell' + type: String + description: | + The format should be a shell command that can be fed to bash -c. + required: true + - name: 'vpcAccessConnector' + type: NestedObject + description: | + Enables VPC connectivity for standard apps. + properties: + - name: 'name' + type: String + description: | + Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1. + required: true + - name: 'automaticScaling' + type: NestedObject + description: | + Automatic scaling is based on request rate, response latencies, and other application metrics. + exactly_one_of: + - 'automatic_scaling' + - 'manual_scaling' + properties: + - name: 'coolDownPeriod' + type: String + description: | + The time period that the Autoscaler should wait before it starts collecting information from a new instance. + This prevents the autoscaler from collecting information when the instance is initializing, + during which the collected usage would not be reliable. Default: 120s + default_value: "120s" + - name: 'cpuUtilization' + type: NestedObject + description: | + Target scaling by CPU usage. + required: true + properties: + - name: 'aggregationWindowLength' + type: String + description: | + Period of time over which CPU utilization is calculated. + - name: 'targetUtilization' + type: Double + description: | + Target CPU utilization ratio to maintain when scaling. Must be between 0 and 1. + required: true + - name: 'maxConcurrentRequests' + type: Integer + description: | + Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. + + Defaults to a runtime-specific value. + default_from_api: true + - name: 'maxIdleInstances' + type: Integer + description: | + Maximum number of idle instances that should be maintained for this version. + - name: 'maxTotalInstances' + type: Integer + description: | + Maximum number of instances that should be started to handle requests for this version. Default: 20 + default_value: 20 + - name: 'maxPendingLatency' + type: String + description: | + Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. + - name: 'minIdleInstances' + type: Integer + description: | + Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service. + - name: 'minTotalInstances' + type: Integer + description: | + Minimum number of running instances that should be maintained for this version. Default: 2 + default_value: 2 + - name: 'minPendingLatency' + type: String + description: | + Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. + - name: 'requestUtilization' + type: NestedObject + description: | + Target scaling by request utilization. + properties: + - name: 'targetRequestCountPerSecond' + type: String + description: | + Target requests per second. + at_least_one_of: + - 'automatic_scaling.0.request_utilization.0.target_request_count_per_second' + - 'automatic_scaling.0.request_utilization.0.target_concurrent_requests' + - name: 'targetConcurrentRequests' + type: Double + description: | + Target number of concurrent requests. + at_least_one_of: + - 'automatic_scaling.0.request_utilization.0.target_request_count_per_second' + - 'automatic_scaling.0.request_utilization.0.target_concurrent_requests' + - name: 'diskUtilization' + type: NestedObject + description: | + Target scaling by disk usage. + properties: + - name: 'targetWriteBytesPerSecond' + type: Integer + description: | + Target bytes written per second. + at_least_one_of: + - 'automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_write_ops_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_ops_per_second' + - name: 'targetWriteOpsPerSecond' + type: Integer + description: | + Target ops written per second. + at_least_one_of: + - 'automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_write_ops_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_ops_per_second' + - name: 'targetReadBytesPerSecond' + type: Integer + description: | + Target bytes read per second. + at_least_one_of: + - 'automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_write_ops_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_ops_per_second' + - name: 'targetReadOpsPerSecond' + type: Integer + description: | + Target ops read per seconds. + at_least_one_of: + - 'automatic_scaling.0.disk_utilization.0.target_write_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_write_ops_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_bytes_per_second' + - 'automatic_scaling.0.disk_utilization.0.target_read_ops_per_second' + - name: 'networkUtilization' + type: NestedObject + description: | + Target scaling by network usage. + properties: + - name: 'targetSentBytesPerSecond' + type: Integer + description: | + Target bytes sent per second. + at_least_one_of: + - 'automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_sent_packets_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_packets_per_second' + - name: 'targetSentPacketsPerSecond' + type: Integer + description: | + Target packets sent per second. + at_least_one_of: + - 'automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_sent_packets_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_packets_per_second' + - name: 'targetReceivedBytesPerSecond' + type: Integer + description: | + Target bytes received per second. + at_least_one_of: + - 'automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_sent_packets_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_packets_per_second' + - name: 'targetReceivedPacketsPerSecond' + type: Integer + description: | + Target packets received per second. + at_least_one_of: + - 'automatic_scaling.0.network_utilization.0.target_sent_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_sent_packets_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_bytes_per_second' + - 'automatic_scaling.0.network_utilization.0.target_received_packets_per_second' + - name: 'manualScaling' + type: NestedObject + description: | + A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time. + exactly_one_of: + - 'automatic_scaling' + - 'manual_scaling' + properties: + - name: 'instances' + type: Integer + description: | + Number of instances to assign to the service at the start. + + **Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 + Modules API set_num_instances() you must use `lifecycle.ignore_changes = ["manual_scaling"[0].instances]` to prevent drift detection. + required: true diff --git a/mmv1/products/appengine/go_Service.yaml b/mmv1/products/appengine/go_Service.yaml new file mode 100644 index 000000000000..e8ec7bec64ac --- /dev/null +++ b/mmv1/products/appengine/go_Service.yaml @@ -0,0 +1,46 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Service' +description: | + A Service resource is a logical component of an application that can share state and communicate in a secure fashion with other services. + For example, an application that handles customer requests might include separate services to handle tasks such as backend data analysis or API requests from mobile devices. + Each service has a collection of versions that define a specific set of code used to implement the functionality of that service. +exclude: true +references: + guides: + 'Official Documentation': 'https://cloud.google.com/appengine/docs/admin-api/deploying-overview' + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services' +docs: +base_url: 'apps/{{project}}/services' +self_link: 'apps/{{project}}/services/{{id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +parameters: +properties: + - name: 'name' + type: String + description: | + Full path to the Service resource in the API. Example apps/myapp/services/default. + This field is used in responses only. Any value specified here in a request is ignored. + output: true + - name: 'id' + type: String + description: | + Relative name of the service within the application. Example default. + output: true diff --git a/mmv1/products/appengine/go_ServiceNetworkSettings.yaml b/mmv1/products/appengine/go_ServiceNetworkSettings.yaml new file mode 100644 index 000000000000..bcb415b7f01a --- /dev/null +++ b/mmv1/products/appengine/go_ServiceNetworkSettings.yaml @@ -0,0 +1,83 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServiceNetworkSettings' +description: | + A NetworkSettings resource is a container for ingress settings for a version or service. +references: + guides: + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services' +docs: +id_format: 'apps/{{project}}/services/{{service}}' +base_url: 'apps/{{project}}/services' +self_link: 'apps/{{project}}/services/{{service}}' +create_url: 'apps/{{project}}/services/{{service}}?updateMask=networkSettings' +create_verb: 'PATCH' +update_url: 'apps/{{project}}/services/{{service}}' +update_verb: 'PATCH' +update_mask: true +skip_delete: true +mutex: 'apps/{{project}}' +import_format: + - 'apps/{{project}}/services/{{service}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'appengine#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +examples: + - name: 'app_engine_service_network_settings' + primary_resource_id: 'internalapp' + vars: + bucket_name: 'appengine-static-content' +parameters: +properties: + - name: 'service' + type: String + description: | + The name of the service these settings apply to. + api_name: id + required: true + - name: 'networkSettings' + type: NestedObject + description: | + Ingress settings for this service. Will apply to all versions. + required: true + properties: + - name: 'ingressTrafficAllowed' + type: Enum + description: | + The ingress settings for version or service. + default_value: "INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED" + enum_values: + - 'INGRESS_TRAFFIC_ALLOWED_UNSPECIFIED' + - 'INGRESS_TRAFFIC_ALLOWED_ALL' + - 'INGRESS_TRAFFIC_ALLOWED_INTERNAL_ONLY' + - 'INGRESS_TRAFFIC_ALLOWED_INTERNAL_AND_LB' diff --git a/mmv1/products/appengine/go_ServiceSplitTraffic.yaml b/mmv1/products/appengine/go_ServiceSplitTraffic.yaml new file mode 100644 index 000000000000..64267de811d0 --- /dev/null +++ b/mmv1/products/appengine/go_ServiceSplitTraffic.yaml @@ -0,0 +1,99 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'ServiceSplitTraffic' +description: | + Traffic routing configuration for versions within a single service. Traffic splits define how traffic directed to the service is assigned to versions. +references: + guides: + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services' +docs: +id_format: 'apps/{{project}}/services/{{service}}' +base_url: 'apps/{{project}}/services' +self_link: 'apps/{{project}}/services/{{service}}' +create_url: 'apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}&updateMask=split' +create_verb: 'PATCH' +update_url: 'apps/{{project}}/services/{{service}}?migrateTraffic={{migrate_traffic}}' +update_verb: 'PATCH' +update_mask: true +skip_delete: true +mutex: 'apps/{{project}}' +import_format: + - 'apps/{{project}}/services/{{service}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'appengine#operation' + path: 'name' + wait_ms: 1000 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +custom_code: + test_check_destroy: 'templates/terraform/custom_check_destroy/go/skip_delete_during_test.go.tmpl' +examples: + - name: 'app_engine_service_split_traffic' + primary_resource_id: 'liveapp' + vars: + service_id: 'default' + split.allocations.v1: '1' + bucket_name: 'appengine-static-content' + test_env_vars: + org_id: 'ORG_ID' +parameters: + - name: 'migrate_traffic' + type: Boolean + description: | + If set to true traffic will be migrated to this version. + url_param_only: true +properties: + - name: 'service' + type: String + description: | + The name of the service these settings apply to. + api_name: id + required: true + - name: 'split' + type: NestedObject + description: | + Mapping that defines fractional HTTP traffic diversion to different versions within the service. + required: true + ignore_read: true + properties: + - name: 'shardBy' + type: Enum + description: | + Mechanism used to determine which version a request is sent to. The traffic selection algorithm will be stable for either type until allocations are changed. + enum_values: + - 'UNSPECIFIED' + - 'COOKIE' + - 'IP' + - 'RANDOM' + - name: 'allocations' + type: KeyValuePairs + description: | + Mapping from version IDs within the service to fractional (0.000, 1] allocations of traffic for that version. Each version can be specified only once, but some versions in the service may not have any traffic allocation. Services that have traffic allocated cannot be deleted until either the service is deleted or their traffic allocation is removed. Allocations must sum to 1. Up to two decimal place precision is supported for IP-based splits and up to three decimal places is supported for cookie-based splits. + + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/float64_to_string.go.tmpl' diff --git a/mmv1/products/appengine/go_StandardAppVersion.yaml b/mmv1/products/appengine/go_StandardAppVersion.yaml new file mode 100644 index 000000000000..132fece11508 --- /dev/null +++ b/mmv1/products/appengine/go_StandardAppVersion.yaml @@ -0,0 +1,448 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'StandardAppVersion' +description: | + Standard App Version resource to create a new version of standard GAE Application. + Learn about the differences between the standard environment and the flexible environment + at https://cloud.google.com/appengine/docs/the-appengine-environments. + Currently supporting Zip and File Containers. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/appengine/docs/standard' + api: 'https://cloud.google.com/appengine/docs/admin-api/reference/rest/v1/apps.services.versions' +docs: +id_format: 'apps/{{project}}/services/{{service}}/versions/{{version_id}}' +base_url: 'apps/{{project}}/services/{{service}}/versions' +self_link: 'apps/{{project}}/services/{{service}}/versions/{{version_id}}?view=FULL' +create_url: 'apps/{{project}}/services/{{service}}/versions' +update_url: 'apps/{{project}}/services/{{service}}/versions' +update_verb: 'POST' +update_mask: false +delete_url: 'apps/{{project}}/services/{{service}}/versions/{{version_id}}' +mutex: 'apps/{{project}}' +import_format: + - 'apps/{{project}}/services/{{service}}/versions/{{version_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: 'projects/{{project}}/global/operations/{{op_id}}' + kind: 'appengine#operation' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 + result: + path: 'targetLink' + resource_inside_response: false + error: + path: 'error/errors' + message: 'message' +collection_url_key: 'versions' +custom_code: + custom_delete: 'templates/terraform/custom_delete/go/appversion_delete.go.tmpl' + test_check_destroy: 'templates/terraform/custom_check_destroy/go/appengine.go.tmpl' +error_retry_predicates: + + - 'transport_tpg.IsAppEngineRetryableError' +examples: + - name: 'app_engine_standard_app_version' + primary_resource_id: 'myapp_v1' + vars: + project_id: 'ae-project' + bucket_name: 'appengine-static-content' + service_name: 'ae-service' + account_id: 'my-account' + test_env_vars: + org_id: 'ORG_ID' + ignore_read_extra: + - 'delete_service_on_destroy' +virtual_fields: + - name: 'noop_on_destroy' + description: | + If set to `true`, the application version will not be deleted. + type: Boolean + default_value: false + - name: 'delete_service_on_destroy' + description: | + If set to `true`, the service will be deleted if it is the last version. + type: Boolean + default_value: false +parameters: + - name: 'service' + type: ResourceRef + description: | + AppEngine service resource + url_param_only: true + required: true + resource: 'Service' + imports: 'name' +properties: + - name: 'name' + type: String + description: | + Full path to the Version resource in the API. Example, "v1". + output: true + - name: 'version_id' + type: String + description: | + Relative name of the version within the service. For example, `v1`. Version names can contain only lowercase letters, numbers, or hyphens. Reserved names,"default", "latest", and any name with the prefix "ah-". + api_name: id + immutable: true + - name: 'runtime' + type: String + description: | + Desired runtime. Example python27. + required: true + - name: 'serviceAccount' + type: String + description: | + The identity that the deployed version will run as. Admin API will use the App Engine Appspot service account as default if this field is neither provided in app.yaml file nor through CLI flag. + default_from_api: true + - name: 'threadsafe' + type: Boolean + description: | + Whether multiple requests can be dispatched to this version at once. + ignore_read: true + - name: 'appEngineApis' + type: Boolean + description: | + Allows App Engine second generation runtimes to access the legacy bundled services. + - name: 'runtimeApiVersion' + type: String + description: | + The version of the API in the given runtime environment. + Please see the app.yaml reference for valid values at `https://cloud.google.com/appengine/docs/standard//config/appref`\ + Substitute `` with `python`, `java`, `php`, `ruby`, `go` or `nodejs`. + - name: 'handlers' + type: Array + description: | + An ordered list of URL-matching patterns that should be applied to incoming requests. + The first matching URL handles the request and other request handlers are not attempted. + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'urlRegex' + type: String + description: | + URL prefix. Uses regular expression syntax, which means regexp special characters must be escaped, but should not contain groupings. + All URLs that begin with this prefix are handled by this handler, using the portion of the URL after the prefix as part of the file path. + - name: 'securityLevel' + type: Enum + description: | + Security (HTTPS) enforcement for this URL. + required: false + enum_values: + - 'SECURE_DEFAULT' + - 'SECURE_NEVER' + - 'SECURE_OPTIONAL' + - 'SECURE_ALWAYS' + - name: 'login' + type: Enum + description: | + Methods to restrict access to a URL based on login status. + required: false + enum_values: + - 'LOGIN_OPTIONAL' + - 'LOGIN_ADMIN' + - 'LOGIN_REQUIRED' + - name: 'authFailAction' + type: Enum + description: | + Actions to take when the user is not logged in. + required: false + enum_values: + - 'AUTH_FAIL_ACTION_REDIRECT' + - 'AUTH_FAIL_ACTION_UNAUTHORIZED' + - name: 'redirectHttpResponseCode' + type: Enum + description: | + 30x code to use when performing redirects for the secure field. + required: false + enum_values: + - 'REDIRECT_HTTP_RESPONSE_CODE_301' + - 'REDIRECT_HTTP_RESPONSE_CODE_302' + - 'REDIRECT_HTTP_RESPONSE_CODE_303' + - 'REDIRECT_HTTP_RESPONSE_CODE_307' + - name: 'script' + type: NestedObject + description: | + Executes a script to handle the requests that match this URL pattern. + Only the auto value is supported for Node.js in the App Engine standard environment, for example "script:" "auto". + properties: + - name: 'scriptPath' + type: String + description: | + Path to the script from the application root directory. + required: true + - name: 'staticFiles' + type: NestedObject + description: | + Files served directly to the user for a given URL, such as images, CSS stylesheets, or JavaScript source files. Static file handlers describe which files in the application directory are static files, and which URLs serve them. + properties: + - name: 'path' + type: String + description: | + Path to the static files matched by the URL pattern, from the application root directory. The path can refer to text matched in groupings in the URL pattern. + - name: 'uploadPathRegex' + type: String + description: | + Regular expression that matches the file paths for all files that should be referenced by this handler. + - name: 'httpHeaders' + type: KeyValuePairs + description: | + HTTP headers to use for all responses from these URLs. + An object containing a list of "key:value" value pairs.". + - name: 'mimeType' + type: String + description: | + MIME type used to serve all files served by this handler. + Defaults to file-specific MIME types, which are derived from each file's filename extension. + - name: 'expiration' + type: String + description: | + Time a static file served by this handler should be cached by web proxies and browsers. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example "3.5s". + - name: 'requireMatchingFile' + type: Boolean + description: | + Whether this handler should match the request if the file referenced by the handler does not exist. + - name: 'applicationReadable' + type: Boolean + description: | + Whether files should also be uploaded as code data. By default, files declared in static file handlers are uploaded as + static data and are only served to end users; they cannot be read by the application. If enabled, uploads are charged + against both your code and static data storage resource quotas. + - name: 'libraries' + type: Array + description: | + Configuration for third-party Python runtime libraries that are required by the application. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + Name of the library. Example "django". + - name: 'version' + type: String + description: | + Version of the library to select, or "latest". + - name: 'envVariables' + type: KeyValuePairs + description: | + Environment variables available to the application. + ignore_read: true + - name: 'deployment' + type: NestedObject + description: | + Code and application artifacts that make up this version. + required: true + ignore_read: true + properties: + - name: 'zip' + type: NestedObject + description: 'Zip File' + required: false + at_least_one_of: + - 'deployment.0.zip' + - 'deployment.0.files' + properties: + - name: 'sourceUrl' + type: String + description: 'Source URL' + required: true + - name: 'filesCount' + type: Integer + description: 'files count' + required: false + - name: 'files' + type: Map + description: | + Manifest of the files stored in Google Cloud Storage that are included as part of this version. + All files must be readable using the credentials supplied with this call. + required: false + at_least_one_of: + - 'deployment.0.zip' + - 'deployment.0.files' + key_name: 'name' + key_description: | + name of file + value_type: + type: NestedObject + properties: + - name: 'sha1Sum' + type: String + description: | + SHA1 checksum of the file + - name: 'sourceUrl' + type: String + description: | + Source URL + required: true + - name: 'entrypoint' + type: NestedObject + description: | + The entrypoint for the application. + required: true + ignore_read: true + properties: + - name: 'shell' + type: String + description: | + The format should be a shell command that can be fed to bash -c. + required: true + - name: 'vpcAccessConnector' + type: NestedObject + description: | + Enables VPC connectivity for standard apps. + properties: + - name: 'name' + type: String + description: | + Full Serverless VPC Access Connector name e.g. /projects/my-project/locations/us-central1/connectors/c1. + required: true + - name: 'egressSetting' + type: String + description: | + The egress setting for the connector, controlling what traffic is diverted through it. + - name: 'inboundServices' + type: Array + description: | + A list of the types of messages that this application is able to receive. + is_set: true + item_type: + type: Enum + description: | + One type of message that this application is able to receive. + enum_values: + - 'INBOUND_SERVICE_MAIL' + - 'INBOUND_SERVICE_MAIL_BOUNCE' + - 'INBOUND_SERVICE_XMPP_ERROR' + - 'INBOUND_SERVICE_XMPP_MESSAGE' + - 'INBOUND_SERVICE_XMPP_SUBSCRIBE' + - 'INBOUND_SERVICE_XMPP_PRESENCE' + - 'INBOUND_SERVICE_CHANNEL_PRESENCE' + - 'INBOUND_SERVICE_WARMUP' + - name: 'instanceClass' + type: String + description: | + Instance class that is used to run this version. Valid values are + AutomaticScaling: F1, F2, F4, F4_1G + BasicScaling or ManualScaling: B1, B2, B4, B4_1G, B8 + Defaults to F1 for AutomaticScaling and B2 for ManualScaling and BasicScaling. If no scaling is specified, AutomaticScaling is chosen. + default_from_api: true + - name: 'automaticScaling' + type: NestedObject + description: | + Automatic scaling is based on request rate, response latencies, and other application metrics. + conflicts: + - basic_scaling + - manual_scaling + custom_flatten: 'templates/terraform/custom_flatten/go/appengine_standardappversion_automatic_scaling_handlenil.go.tmpl' + properties: + - name: 'maxConcurrentRequests' + type: Integer + description: | + Number of concurrent requests an automatic scaling instance can accept before the scheduler spawns a new instance. + + Defaults to a runtime-specific value. + - name: 'maxIdleInstances' + type: Integer + description: | + Maximum number of idle instances that should be maintained for this version. + - name: 'maxPendingLatency' + type: String + description: | + Maximum amount of time that a request should wait in the pending queue before starting a new instance to handle it. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + - name: 'minIdleInstances' + type: Integer + description: | + Minimum number of idle instances that should be maintained for this version. Only applicable for the default version of a service. + - name: 'minPendingLatency' + type: String + description: | + Minimum amount of time a request should wait in the pending queue before starting a new instance to handle it. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". + - name: 'standardSchedulerSettings' + type: NestedObject + description: | + Scheduler settings for standard environment. + properties: + - name: 'targetCpuUtilization' + type: Double + description: | + Target CPU utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value. + - name: 'targetThroughputUtilization' + type: Double + description: | + Target throughput utilization ratio to maintain when scaling. Should be a value in the range [0.50, 0.95], zero, or a negative value. + - name: 'minInstances' + type: Integer + description: | + Minimum number of instances to run for this version. Set to zero to disable minInstances configuration. + - name: 'maxInstances' + type: Integer + description: | + Maximum number of instances to run for this version. Set to zero to disable maxInstances configuration. + - name: 'basicScaling' + type: NestedObject + description: | + Basic scaling creates instances when your application receives requests. Each instance will be shut down when the application becomes idle. Basic scaling is ideal for work that is intermittent or driven by user activity. + conflicts: + - automatic_scaling + - manual_scaling + properties: + - name: 'idleTimeout' + type: String + description: | + Duration of time after the last request that an instance must wait before the instance is shut down. + A duration in seconds with up to nine fractional digits, terminated by 's'. Example: "3.5s". Defaults to 900s. + default_value: "900s" + - name: 'maxInstances' + type: Integer + description: | + Maximum number of instances to create for this version. Must be in the range [1.0, 200.0]. + required: true + - name: 'manualScaling' + type: NestedObject + description: | + A service with manual scaling runs continuously, allowing you to perform complex initialization and rely on the state of its memory over time. + conflicts: + - automatic_scaling + - basic_scaling + properties: + - name: 'instances' + type: Integer + description: | + Number of instances to assign to the service at the start. + + **Note:** When managing the number of instances at runtime through the App Engine Admin API or the (now deprecated) Python 2 + Modules API set_num_instances() you must use `lifecycle.ignore_changes = ["manual_scaling"[0].instances]` to prevent drift detection. + +# StandardAppVersion and FlexibleAppVersion use the same API endpoint (apps.services.versions) +# They are split apart as some of the fields will are necessary for one and not the other, and +# other fields may have different defaults. However, some fields are the same. If fixing a bug +# in one, please check the other for the same fix. + required: true diff --git a/mmv1/products/appengine/go_product.yaml b/mmv1/products/appengine/go_product.yaml new file mode 100644 index 000000000000..c8c6d3e2b51d --- /dev/null +++ b/mmv1/products/appengine/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'AppEngine' +display_name: 'App Engine' +versions: + - name: 'ga' + base_url: 'https://appengine.googleapis.com/v1/' + - name: 'beta' + base_url: 'https://appengine.googleapis.com/v1beta/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/bigquery/Dataset.yaml b/mmv1/products/bigquery/Dataset.yaml index 323e5f06d8b3..5e50f166bf05 100644 --- a/mmv1/products/bigquery/Dataset.yaml +++ b/mmv1/products/bigquery/Dataset.yaml @@ -153,17 +153,9 @@ properties: name: 'specialGroup' description: | A special group to grant access to. Possible values include: - - * `projectOwners`: Owners of the enclosing project. - - * `projectReaders`: Readers of the enclosing project. - - * `projectWriters`: Writers of the enclosing project. - - * `allAuthenticatedUsers`: All authenticated BigQuery users. - !ruby/object:Api::Type::String name: 'iamMember' @@ -277,8 +269,6 @@ properties: description: | The default lifetime of all tables in the dataset, in milliseconds. The minimum value is 3600000 milliseconds (one hour). - - Once this property is set, all newly-created tables in the dataset will have an `expirationTime` property set to the creation time plus the value in this property, and changing the value will only affect @@ -295,8 +285,6 @@ properties: description: | The default partition expiration for all partitioned tables in the dataset, in milliseconds. - - Once this property is set, all newly-created partitioned tables in the dataset will have an `expirationMs` property in the `timePartitioning` settings set to this value, and changing the value will only @@ -355,14 +343,10 @@ properties: description: | The geographic location where the dataset should reside. See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). - - There are two types of locations, regional or multi-regional. A regional location is a specific geographic place, such as Tokyo, and a multi-regional location is a large geographic area, such as the United States, that contains at least two geographic places. - - The default value is multi-regional location `US`. Changing this forces a new resource to be created. default_value: US diff --git a/mmv1/products/bigquery/DatasetAccess.yaml b/mmv1/products/bigquery/DatasetAccess.yaml index 6838cee0b263..6afd2d7acce8 100644 --- a/mmv1/products/bigquery/DatasetAccess.yaml +++ b/mmv1/products/bigquery/DatasetAccess.yaml @@ -158,17 +158,9 @@ properties: name: 'specialGroup' description: | A special group to grant access to. Possible values include: - - * `projectOwners`: Owners of the enclosing project. - - * `projectReaders`: Readers of the enclosing project. - - * `projectWriters`: Writers of the enclosing project. - - * `allAuthenticatedUsers`: All authenticated BigQuery users. exactly_one_of: - user_by_email diff --git a/mmv1/products/bigquery/Table.yaml b/mmv1/products/bigquery/Table.yaml index 9830238c0a5e..388d0cd0d9c1 100644 --- a/mmv1/products/bigquery/Table.yaml +++ b/mmv1/products/bigquery/Table.yaml @@ -43,6 +43,13 @@ examples: vars: dataset_id: 'dataset_id' table_id: 'table_id' +virtual_fields: + - !ruby/object:Api::Type::Boolean + name: 'allow_resource_tags_on_deletion' + min_version: beta + description: | + If set to true, it allows table deletion when there are still resource tags attached. + default_value: false parameters: # TODO(alexstephen): Remove once we have support for placing # nested object fields in URL @@ -540,10 +547,3 @@ properties: in the namespaced format, for example "123456789012/environment" where 123456789012 is the ID of the parent organization or project resource for this tag key. Tag value is expected to be the short name, for example "Production". -virtual_fields: - - !ruby/object:Api::Type::Boolean - name: 'allow_resource_tags_on_deletion' - min_version: beta - description: | - If set to true, it allows table deletion when there are still resource tags attached. - default_value: false diff --git a/mmv1/products/bigquery/go_Dataset.yaml b/mmv1/products/bigquery/go_Dataset.yaml new file mode 100644 index 000000000000..063c0f23623d --- /dev/null +++ b/mmv1/products/bigquery/go_Dataset.yaml @@ -0,0 +1,407 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Dataset' +kind: 'bigquery#dataset' +description: | + Datasets allow you to organize and control access to your tables. +references: + guides: + 'Datasets Intro': 'https://cloud.google.com/bigquery/docs/datasets-intro' + api: 'https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets' +docs: + warning: | + You must specify the role field using the legacy format `OWNER` instead of `roles/bigquery.dataOwner`. + The API does accept both formats but it will always return the legacy format which results in Terraform + showing permanent diff on each plan and apply operation. +base_url: 'projects/{{project}}/datasets' +self_link: 'projects/{{project}}/datasets/{{dataset_id}}' +has_self_link: true +delete_url: 'projects/{{project}}/datasets/{{dataset_id}}?deleteContents={{delete_contents_on_destroy}}' +import_format: + - 'projects/{{project}}/datasets/{{dataset_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: + constants: 'templates/terraform/constants/go/bigquery_dataset.go.tmpl' +skip_sweeper: true +examples: + - name: 'bigquery_dataset_basic' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + account_name: 'bqowner' + - name: 'bigquery_dataset_with_max_time_travel_hours' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + account_name: 'bqowner' + skip_docs: true + - name: 'bigquery_dataset_cmek' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + key_name: 'example-key' + keyring_name: 'example-keyring' + skip_test: true + - name: 'bigquery_dataset_authorized_dataset' + primary_resource_id: 'dataset' + vars: + private: 'private' + public: 'public' + account_name: 'bqowner' + - name: 'bigquery_dataset_authorized_routine' + primary_resource_id: 'private' + vars: + private_dataset: 'private_dataset' + public_dataset: 'public_dataset' + public_routine: 'public_routine' + test_env_vars: + service_account: 'SERVICE_ACCT' + - name: 'bigquery_dataset_case_insensitive_names' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + account_name: 'bqowner' + skip_docs: true + - name: 'bigquery_dataset_default_collation_set' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + account_name: 'bqowner' + skip_docs: true + - name: 'bigquery_dataset_external_reference_aws_test' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + skip_docs: true + - name: 'bigquery_dataset_external_reference_aws' + primary_resource_id: 'dataset' + vars: + dataset_id: 'example_dataset' + skip_test: true + - name: 'bigquery_dataset_resource_tags' + primary_resource_id: 'dataset' + primary_resource_name: 'fmt.Sprintf("tf_test_dataset%s", context["random_suffix"])' + min_version: 'beta' + vars: + dataset_id: 'dataset' + tag_key1: 'tag_key1' + tag_value1: 'tag_value1' + tag_key2: 'tag_key2' + tag_value2: 'tag_value2' + skip_docs: true +virtual_fields: + - name: 'delete_contents_on_destroy' + description: | + If set to `true`, delete all the tables in the + dataset when destroying the resource; otherwise, + destroying the resource will fail if tables are present. + type: Boolean + default_value: false +parameters: +properties: + - name: 'maxTimeTravelHours' + type: String + description: + 'Defines the time travel window in hours. The value can be from 48 to 168 + hours (2 to 7 days).' + default_from_api: true + - name: 'access' + type: Array + description: + 'An array of objects that define dataset access for one or more entities.' + is_set: true + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'domain' + type: String + description: | + A domain to grant access to. Any users signed in with the + domain specified will be granted the specified access + - name: 'groupByEmail' + type: String + description: An email address of a Google Group to grant access to. + - name: 'role' + type: String + description: | + Describes the rights granted to the user specified by the other + member of the access object. Basic, predefined, and custom roles + are supported. Predefined roles that have equivalent basic roles + are swapped by the API to their basic counterparts. See + [official docs](https://cloud.google.com/bigquery/docs/access-control). + - name: 'specialGroup' + type: String + description: | + A special group to grant access to. Possible values include: + * `projectOwners`: Owners of the enclosing project. + * `projectReaders`: Readers of the enclosing project. + * `projectWriters`: Writers of the enclosing project. + * `allAuthenticatedUsers`: All authenticated BigQuery users. + - name: 'iamMember' + type: String + description: | + Some other type of member that appears in the IAM Policy but isn't a user, + group, domain, or special group. For example: `allUsers` + - name: 'userByEmail' + type: String + description: | + An email address of a user to grant access to. For example: + fred@example.com + - name: 'view' + type: NestedObject + description: | + A view from a different dataset to grant access to. Queries + executed against that view will have read access to tables in + this dataset. The role field is not required when this field is + set. If that view is updated by any user, access to the view + needs to be granted again via an update operation. + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table. + required: true + - name: 'projectId' + type: String + description: The ID of the project containing this table. + required: true + - name: 'tableId' + type: String + description: | + The ID of the table. The ID must contain only letters (a-z, + A-Z), numbers (0-9), or underscores (_). The maximum length + is 1,024 characters. + required: true + - name: 'dataset' + type: NestedObject + description: | + Grants all resources of particular types in a particular dataset read access to the current dataset. + properties: + - name: 'dataset' + type: NestedObject + description: | + The dataset this entry applies to + required: true + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table. + required: true + - name: 'projectId' + type: String + description: The ID of the project containing this table. + required: true + - name: 'targetTypes' + type: Array + description: | + Which resources in the dataset this entry applies to. Currently, only views are supported, + but additional target types may be added in the future. Possible values: VIEWS + required: true + item_type: + type: String + - name: 'routine' + type: NestedObject + description: | + A routine from a different dataset to grant access to. Queries + executed against that routine will have read access to tables in + this dataset. The role field is not required when this field is + set. If that routine is updated by any user, access to the routine + needs to be granted again via an update operation. + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table. + required: true + - name: 'projectId' + type: String + description: The ID of the project containing this table. + required: true + - name: 'routineId' + type: String + description: | + The ID of the routine. The ID must contain only letters (a-z, + A-Z), numbers (0-9), or underscores (_). The maximum length + is 256 characters. + required: true + - name: 'creationTime' + type: Integer + description: | + The time when this dataset was created, in milliseconds since the + epoch. + output: true + - name: 'datasetReference' + type: NestedObject + description: 'A reference that identifies the dataset.' + required: true + immutable: true + flatten_object: true + properties: + - name: 'datasetId' + type: String + description: | + A unique ID for this dataset, without the project name. The ID + must contain only letters (a-z, A-Z), numbers (0-9), or + underscores (_). The maximum length is 1,024 characters. + required: true + immutable: true + validation: + function: 'validateDatasetId' + - name: 'defaultTableExpirationMs' + type: Integer + description: | + The default lifetime of all tables in the dataset, in milliseconds. + The minimum value is 3600000 milliseconds (one hour). + Once this property is set, all newly-created tables in the dataset + will have an `expirationTime` property set to the creation time plus + the value in this property, and changing the value will only affect + new tables, not existing ones. When the `expirationTime` for a given + table is reached, that table will be deleted automatically. + If a table's `expirationTime` is modified or removed before the + table expires, or if you provide an explicit `expirationTime` when + creating a table, that value takes precedence over the default + expiration time indicated by this property. + validation: + function: 'validateDefaultTableExpirationMs' + - name: 'defaultPartitionExpirationMs' + type: Integer + description: | + The default partition expiration for all partitioned tables in + the dataset, in milliseconds. + Once this property is set, all newly-created partitioned tables in + the dataset will have an `expirationMs` property in the `timePartitioning` + settings set to this value, and changing the value will only + affect new tables, not existing ones. The storage in a partition will + have an expiration time of its partition time plus this value. + Setting this property overrides the use of `defaultTableExpirationMs` + for partitioned tables: only one of `defaultTableExpirationMs` and + `defaultPartitionExpirationMs` will be used for any new partitioned + table. If you provide an explicit `timePartitioning.expirationMs` when + creating or updating a partitioned table, that value takes precedence + over the default partition expiration time indicated by this property. + - name: 'description' + type: String + description: A user-friendly description of the dataset + - name: 'etag' + type: String + description: | + A hash of the resource. + output: true + - name: 'externalDatasetReference' + type: NestedObject + description: | + Information about the external metadata storage where the dataset is defined. + immutable: true + properties: + - name: 'externalSource' + type: String + description: | + External source that backs this dataset. + required: true + immutable: true + - name: 'connection' + type: String + description: | + The connection id that is used to access the externalSource. + Format: projects/{projectId}/locations/{locationId}/connections/{connectionId} + required: true + immutable: true + - name: 'friendlyName' + type: String + description: A descriptive name for the dataset + send_empty_value: true + - name: 'labels' + type: KeyValueLabels + description: | + The labels associated with this dataset. You can use these to + organize and group your datasets. + immutable: false + - name: 'lastModifiedTime' + type: Integer + description: | + The date when this dataset or any of its tables was last modified, in + milliseconds since the epoch. + output: true + - name: 'location' + type: String + description: | + The geographic location where the dataset should reside. + See [official docs](https://cloud.google.com/bigquery/docs/dataset-locations). + There are two types of locations, regional or multi-regional. A regional + location is a specific geographic place, such as Tokyo, and a multi-regional + location is a large geographic area, such as the United States, that + contains at least two geographic places. + The default value is multi-regional location `US`. + Changing this forces a new resource to be created. + immutable: true + diff_suppress_func: 'tpgresource.CaseDiffSuppress' + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_dataset_location.go.tmpl' + default_value: "US" + - name: 'defaultEncryptionConfiguration' + type: NestedObject + description: | + The default encryption key for all tables in the dataset. Once this property is set, + all newly-created partitioned tables in the dataset will have encryption key set to + this value, unless table creation request (or query) overrides the key. + properties: + - name: 'kmsKeyName' + type: String + description: | + Describes the Cloud KMS encryption key that will be used to protect destination + BigQuery table. The BigQuery Service Account associated with your project requires + access to this encryption key. + required: true + - name: 'isCaseInsensitive' + type: Boolean + description: | + TRUE if the dataset and its table names are case-insensitive, otherwise FALSE. + By default, this is FALSE, which means the dataset and its table names are + case-sensitive. This field does not affect routine references. + default_from_api: true + - name: 'defaultCollation' + type: String + description: | + Defines the default collation specification of future tables created + in the dataset. If a table is created in this dataset without table-level + default collation, then the table inherits the dataset default collation, + which is applied to the string fields that do not have explicit collation + specified. A change to this field affects only tables created afterwards, + and does not alter the existing tables. + + The following values are supported: + - 'und:ci': undetermined locale, case insensitive. + - '': empty string. Default to case-sensitive behavior. + default_from_api: true + - name: 'storageBillingModel' + type: String + description: | + Specifies the storage billing model for the dataset. + Set this flag value to LOGICAL to use logical bytes for storage billing, + or to PHYSICAL to use physical bytes instead. + + LOGICAL is the default if this flag isn't specified. + default_from_api: true + - name: 'resourceTags' + type: KeyValuePairs + description: | + The tags attached to this table. Tag keys are globally unique. Tag key is expected to be + in the namespaced format, for example "123456789012/environment" where 123456789012 is the + ID of the parent organization or project resource for this tag key. Tag value is expected + to be the short name, for example "Production". See [Tag definitions](/iam/docs/tags-access-control#definitions) + for more details. + min_version: 'beta' diff --git a/mmv1/products/bigquery/go_DatasetAccess.yaml b/mmv1/products/bigquery/go_DatasetAccess.yaml new file mode 100644 index 000000000000..b126307ef693 --- /dev/null +++ b/mmv1/products/bigquery/go_DatasetAccess.yaml @@ -0,0 +1,295 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'DatasetAccess' +description: | + Gives dataset access for a single entity. This resource is intended to be used in cases where + it is not possible to compile a full list of access blocks to include in a + `google_bigquery_dataset` resource, to enable them to be added separately. + + ~> **Note:** If this resource is used alongside a `google_bigquery_dataset` resource, the + dataset resource must either have no defined `access` blocks or a `lifecycle` block with + `ignore_changes = [access]` so they don't fight over which accesses should be on the dataset. + Additionally, both resource cannot be modified in the same apply. +references: + guides: + 'Controlling access to datasets': 'https://cloud.google.com/bigquery/docs/dataset-access-controls' + api: 'https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets' +docs: + warning: | + You must specify the role field using the legacy format `OWNER` instead of `roles/bigquery.dataOwner`. + The API does accept both formats but it will always return the legacy format which results in Terraform + showing permanent diff on each plan and apply operation. +base_url: 'projects/{{project}}/datasets/{{dataset_id}}' +self_link: 'projects/{{project}}/datasets/{{dataset_id}}' +create_verb: 'PATCH' +delete_verb: 'PATCH' +immutable: true +mutex: '{{dataset_id}}' +exclude_import: true +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +identity: + - role + - userByEmail + - groupByEmail + - domain + - specialGroup + - iamMember + - view + - dataset + - routine +nested_query: + keys: + - access + is_list_of_ids: false + modify_by_patch: true +custom_code: + extra_schema_entry: 'templates/terraform/extra_schema_entry/go/bigquery_dataset_access.go.tmpl' + constants: 'templates/terraform/constants/go/bigquery_dataset_access.go.tmpl' + post_create: 'templates/terraform/post_create/go/bigquery_dataset_access.go.tmpl' +exclude_tgc: true +skip_sweeper: true +error_retry_predicates: + + - 'transport_tpg.IsBigqueryIAMQuotaError' +examples: + - name: 'bigquery_dataset_access_basic_user' + primary_resource_id: 'access' + vars: + dataset_id: 'example_dataset' + account_name: 'bqowner' + skip_test: true + - name: 'bigquery_dataset_access_view' + primary_resource_id: 'access' + vars: + dataset_id: 'example_dataset' + dataset_id2: 'example_dataset2' + table_id: 'example_table' + skip_test: true + - name: 'bigquery_dataset_access_authorized_dataset' + primary_resource_id: 'access' + vars: + private: 'private' + public: 'public' + skip_test: true + - name: 'bigquery_dataset_access_authorized_routine' + primary_resource_id: 'authorized_routine' + vars: + private_dataset: 'private_dataset' + public_dataset: 'public_dataset' + public_routine: 'public_routine' + skip_test: true +parameters: +properties: + - name: 'datasetId' + type: String + description: | + A unique ID for this dataset, without the project name. The ID + must contain only letters (a-z, A-Z), numbers (0-9), or + underscores (_). The maximum length is 1,024 characters. + required: true + ignore_read: true + - name: 'role' + type: String + description: | + Describes the rights granted to the user specified by the other + member of the access object. Basic, predefined, and custom roles are + supported. Predefined roles that have equivalent basic roles are + swapped by the API to their basic counterparts, and will show a diff + post-create. See + [official docs](https://cloud.google.com/bigquery/docs/access-control). + diff_suppress_func: 'resourceBigQueryDatasetAccessRoleDiffSuppress' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_access_role.go.tmpl' + - name: 'userByEmail' + type: String + description: | + An email address of a user to grant access to. For example: + fred@example.com + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + - name: 'groupByEmail' + type: String + description: An email address of a Google Group to grant access to. + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + - name: 'domain' + type: String + description: | + A domain to grant access to. Any users signed in with the + domain specified will be granted the specified access + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + - name: 'specialGroup' + type: String + description: | + A special group to grant access to. Possible values include: + * `projectOwners`: Owners of the enclosing project. + * `projectReaders`: Readers of the enclosing project. + * `projectWriters`: Writers of the enclosing project. + * `allAuthenticatedUsers`: All authenticated BigQuery users. + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + - name: 'iamMember' + type: String + description: | + Some other type of member that appears in the IAM Policy but isn't a user, + group, domain, or special group. For example: `allUsers` + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + diff_suppress_func: 'resourceBigQueryDatasetAccessIamMemberDiffSuppress' + - name: 'view' + type: NestedObject + description: | + A view from a different dataset to grant access to. Queries + executed against that view will have read access to tables in + this dataset. The role field is not required when this field is + set. If that view is updated by any user, access to the view + needs to be granted again via an update operation. + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table. + required: true + - name: 'projectId' + type: String + description: The ID of the project containing this table. + required: true + - name: 'tableId' + type: String + description: | + The ID of the table. The ID must contain only letters (a-z, + A-Z), numbers (0-9), or underscores (_). The maximum length + is 1,024 characters. + required: true + - name: 'dataset' + type: NestedObject + description: | + Grants all resources of particular types in a particular dataset read access to the current dataset. + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + properties: + - name: 'dataset' + type: NestedObject + description: | + The dataset this entry applies to + required: true + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table. + required: true + - name: 'projectId' + type: String + description: The ID of the project containing this table. + required: true + - name: 'targetTypes' + type: Array + description: | + Which resources in the dataset this entry applies to. Currently, only views are supported, + but additional target types may be added in the future. Possible values: VIEWS + required: true + item_type: + type: String + - name: 'routine' + type: NestedObject + description: | + A routine from a different dataset to grant access to. Queries + executed against that routine will have read access to tables in + this dataset. The role field is not required when this field is + set. If that routine is updated by any user, access to the routine + needs to be granted again via an update operation. + exactly_one_of: + - 'user_by_email' + - 'group_by_email' + - 'domain' + - 'special_group' + - 'iam_member' + - 'view' + - 'dataset' + - 'routine' + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table. + required: true + - name: 'projectId' + type: String + description: The ID of the project containing this table. + required: true + - name: 'routineId' + type: String + description: | + The ID of the routine. The ID must contain only letters (a-z, + A-Z), numbers (0-9), or underscores (_). The maximum length + is 256 characters. + required: true diff --git a/mmv1/products/bigquery/go_Job.yaml b/mmv1/products/bigquery/go_Job.yaml new file mode 100644 index 000000000000..f6057ff8c73e --- /dev/null +++ b/mmv1/products/bigquery/go_Job.yaml @@ -0,0 +1,877 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Job' +kind: 'bigquery#job' +description: | + Jobs are actions that BigQuery runs on your behalf to load data, export data, query data, or copy data. + Once a BigQuery job is created, it cannot be changed or deleted. +references: + guides: + 'BigQuery Jobs Intro': 'https://cloud.google.com/bigquery/docs/jobs-overview' + api: 'https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs' +docs: +id_format: 'projects/{{project}}/jobs/{{job_id}}' +base_url: 'projects/{{project}}/jobs' +self_link: 'projects/{{project}}/jobs/{{job_id}}?location={{location}}' +skip_delete: true +immutable: true +import_format: + - 'projects/{{project}}/jobs/{{job_id}}/location/{{location}}' + - 'projects/{{project}}/jobs/{{job_id}}' + - '{{project}}/{{job_id}}' + - '{{job_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +async: + type: 'PollAsync' + check_response_func_existence: 'transport_tpg.PollCheckForExistence' + check_response_func_absence: 'transport_tpg.PollCheckForAbsence' + suppress_error: false + target_occurrences: 1 + actions: ['create'] +custom_code: + constants: 'templates/terraform/constants/go/bigquery_job.go.tmpl' + encoder: 'templates/terraform/encoders/go/bigquery_job.go.tmpl' +schema_version: 1 +state_upgraders: true +examples: + - name: 'bigquery_job_query' + primary_resource_id: 'job' + vars: + job_id: 'job_query' + account_name: 'bqowner' + ignore_read_extra: + - 'etag' + - 'status.0.state' + - name: 'bigquery_job_query_table_reference' + primary_resource_id: 'job' + vars: + job_id: 'job_query' + account_name: 'bqowner' + ignore_read_extra: + - 'etag' + - 'query.0.default_dataset.0.dataset_id' + - 'query.0.destination_table.0.table_id' + - 'status.0.state' + - name: 'bigquery_job_load' + primary_resource_id: 'job' + vars: + job_id: 'job_load' + ignore_read_extra: + - 'etag' + - 'status.0.state' + - name: 'bigquery_job_load_geojson' + primary_resource_id: 'job' + vars: + job_id: 'job_load' + bucket_name: 'bq-geojson' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'etag' + - 'status.0.state' + - name: 'bigquery_job_load_parquet' + primary_resource_id: 'job' + vars: + job_id: 'job_load' + ignore_read_extra: + - 'etag' + - 'status.0.state' + - name: 'bigquery_job_load_table_reference' + primary_resource_id: 'job' + vars: + job_id: 'job_load' + ignore_read_extra: + - 'etag' + - 'load.0.destination_table.0.table_id' + - 'status.0.state' + skip_docs: true + - name: 'bigquery_job_copy' + primary_resource_id: 'job' + vars: + job_id: 'job_copy' + account_name: 'bqowner' + key_name: 'example-key' + keyring_name: 'example-keyring' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'etag' + - 'status.0.state' + - name: 'bigquery_job_copy_table_reference' + primary_resource_id: 'job' + vars: + job_id: 'job_copy' + account_name: 'bqowner' + key_name: 'example-key' + keyring_name: 'example-keyring' + test_env_vars: + project: 'PROJECT_NAME' + ignore_read_extra: + - 'etag' + - 'copy.0.destination_table.0.table_id' + - 'copy.0.source_tables.0.table_id' + - 'copy.0.source_tables.1.table_id' + - 'status.0.state' + skip_docs: true + - name: 'bigquery_job_extract' + primary_resource_id: 'job' + vars: + job_id: 'job_extract' + account_name: 'bqowner' + ignore_read_extra: + - 'etag' + - 'status.0.state' + - name: 'bigquery_job_extract_table_reference' + primary_resource_id: 'job' + vars: + job_id: 'job_extract' + account_name: 'bqowner' + ignore_read_extra: + - 'etag' + - 'extract.0.source_table.0.table_id' + - 'status.0.state' + skip_docs: true +parameters: +properties: + - name: 'user_email' + type: String + description: | + Email address of the user who ran the job. + output: true + - name: 'configuration' + type: NestedObject + description: 'Describes the job configuration.' + required: true + flatten_object: true + properties: + - name: 'jobType' + type: String + description: | + The type of the job. + output: true + - name: 'jobTimeoutMs' + type: String + description: | + Job timeout in milliseconds. If this time limit is exceeded, BigQuery may attempt to terminate the job. + - name: 'labels' + type: KeyValueLabels + description: | + The labels associated with this job. You can use these to organize and group your jobs. + - name: 'query' + type: NestedObject + description: 'Configures a query job.' + exactly_one_of: + - 'configuration.0.query' + - 'configuration.0.load' + - 'configuration.0.copy' + - 'configuration.0.extract' + properties: + - name: 'query' + type: String + description: | + SQL query text to execute. The useLegacySql field can be used to indicate whether the query uses legacy SQL or standard SQL. + *NOTE*: queries containing [DML language](https://cloud.google.com/bigquery/docs/reference/standard-sql/data-manipulation-language) + (`DELETE`, `UPDATE`, `MERGE`, `INSERT`) must specify `create_disposition = ""` and `write_disposition = ""`. + required: true + - name: 'destinationTable' + type: NestedObject + description: | + Describes the table where the query results should be stored. + This property must be set for large results that exceed the maximum response size. + For queries that produce anonymous (cached) results, this field will be populated by BigQuery. + default_from_api: true + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_table_ref_query_destinationtable.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl' + properties: + - name: 'projectId' + type: String + description: 'The ID of the project containing this table.' + required: false + default_from_api: true + - name: 'datasetId' + type: String + description: 'The ID of the dataset containing this table.' + required: false + default_from_api: true + - name: 'tableId' + type: String + description: | + The table. Can be specified `{{table_id}}` if `project_id` and `dataset_id` are also set, + or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'userDefinedFunctionResources' + type: Array + description: | + Describes user-defined function resources used in the query. + item_type: + type: NestedObject + properties: + - name: 'resourceUri' + type: String + description: + 'A code resource to load from a Google Cloud Storage URI + (gs://bucket/path).' + - name: 'inlineCode' + type: String + description: | + An inline resource that contains code for a user-defined function (UDF). + Providing a inline code resource is equivalent to providing a URI for a file containing the same code. + - name: 'createDisposition' + type: Enum + description: | + Specifies whether the job is allowed to create new tables. The following values are supported: + CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. + CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. + Creation, truncation and append actions occur as one atomic update upon job completion + default_value: "CREATE_IF_NEEDED" + enum_values: + - 'CREATE_IF_NEEDED' + - 'CREATE_NEVER' + - name: 'writeDisposition' + type: Enum + description: | + Specifies the action that occurs if the destination table already exists. The following values are supported: + WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. + WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. + WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. + Each action is atomic and only occurs if BigQuery is able to complete the job successfully. + Creation, truncation and append actions occur as one atomic update upon job completion. + default_value: "WRITE_EMPTY" + enum_values: + - 'WRITE_TRUNCATE' + - 'WRITE_APPEND' + - 'WRITE_EMPTY' + - name: 'defaultDataset' + type: NestedObject + description: | + Specifies the default dataset to use for unqualified table names in the query. Note that this does not alter behavior of unqualified dataset names. + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_dataset_ref.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_dataset_ref.go.tmpl' + properties: + - name: 'datasetId' + type: String + description: | + The dataset. Can be specified `{{dataset_id}}` if `project_id` is also set, + or of the form `projects/{{project}}/datasets/{{dataset_id}}` if not. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'projectId' + type: String + description: 'The ID of the project containing this table.' + required: false + default_from_api: true + - name: 'priority' + type: Enum + description: | + Specifies a priority for the query. + default_value: "INTERACTIVE" + enum_values: + - 'INTERACTIVE' + - 'BATCH' + - name: 'allowLargeResults' + type: Boolean + description: | + If true and query uses legacy SQL dialect, allows the query to produce arbitrarily large result tables at a slight cost in performance. + Requires destinationTable to be set. For standard SQL queries, this flag is ignored and large results are always allowed. + However, you must still set destinationTable when result size exceeds the allowed maximum response size. + - name: 'useQueryCache' + type: Boolean + description: | + Whether to look for the result in the query cache. The query cache is a best-effort cache that will be flushed whenever + tables in the query are modified. Moreover, the query cache is only available when a query does not have a destination table specified. + The default value is true. + default_value: true + - name: 'flattenResults' + type: Boolean + description: | + If true and query uses legacy SQL dialect, flattens all nested and repeated fields in the query results. + allowLargeResults must be true if this is set to false. For standard SQL queries, this flag is ignored and results are never flattened. + - name: 'maximumBillingTier' + type: Integer + description: | + Limits the billing tier for this job. Queries that have resource usage beyond this tier will fail (without incurring a charge). + If unspecified, this will be set to your project default. + - name: 'maximumBytesBilled' + type: String + description: | + Limits the bytes billed for this job. Queries that will have bytes billed beyond this limit will fail (without incurring a charge). + If unspecified, this will be set to your project default. + - name: 'useLegacySql' + type: Boolean + description: | + Specifies whether to use BigQuery's legacy SQL dialect for this query. The default value is true. + If set to false, the query will use BigQuery's standard SQL. + send_empty_value: true + - name: 'parameterMode' + type: String + description: | + Standard SQL only. Set to POSITIONAL to use positional (?) query parameters or to NAMED to use named (@myparam) query parameters in this query. + - name: 'schemaUpdateOptions' + type: Array + description: | + Allows the schema of the destination table to be updated as a side effect of the query job. + Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; + when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, + specified by partition decorators. For normal tables, WRITE_TRUNCATE will always overwrite the schema. + One or more of the following values are specified: + ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable. + item_type: + type: String + - name: 'destinationEncryptionConfiguration' + type: NestedObject + description: | + Custom encryption configuration (e.g., Cloud KMS keys) + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl' + properties: + - name: 'kmsKeyName' + type: String + description: | + Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. + The BigQuery Service Account associated with your project requires access to this encryption key. + required: true + - name: 'kmsKeyVersion' + type: String + description: | + Describes the Cloud KMS encryption key version used to protect destination BigQuery table. + output: true + - name: 'scriptOptions' + type: NestedObject + description: | + Options controlling the execution of scripts. + properties: + - name: 'statementTimeoutMs' + type: String + description: 'Timeout period for each statement in a script.' + at_least_one_of: + - 'configuration.0.query.0.script_options.0.statement_timeout_ms' + - 'configuration.0.query.0.script_options.0.statement_byte_budget' + - 'configuration.0.query.0.script_options.0.key_result_statement' + - name: 'statementByteBudget' + type: String + description: + 'Limit on the number of bytes billed per statement. Exceeding + this budget results in an error.' + at_least_one_of: + - 'configuration.0.query.0.script_options.0.statement_timeout_ms' + - 'configuration.0.query.0.script_options.0.statement_byte_budget' + - 'configuration.0.query.0.script_options.0.key_result_statement' + - name: 'keyResultStatement' + type: Enum + description: | + Determines which statement in the script represents the "key result", + used to populate the schema and query results of the script job. + at_least_one_of: + - 'configuration.0.query.0.script_options.0.statement_timeout_ms' + - 'configuration.0.query.0.script_options.0.statement_byte_budget' + - 'configuration.0.query.0.script_options.0.key_result_statement' + enum_values: + - 'LAST' + - 'FIRST_SELECT' + - name: 'load' + type: NestedObject + description: 'Configures a load job.' + exactly_one_of: + - 'configuration.0.query' + - 'configuration.0.load' + - 'configuration.0.copy' + - 'configuration.0.extract' + properties: + - name: 'sourceUris' + type: Array + description: | + The fully-qualified URIs that point to your data in Google Cloud. + For Google Cloud Storage URIs: Each URI can contain one '\*' wildcard character + and it must come after the 'bucket' name. Size limits related to load jobs apply + to external data sources. For Google Cloud Bigtable URIs: Exactly one URI can be + specified and it has be a fully specified and valid HTTPS URL for a Google Cloud Bigtable table. + For Google Cloud Datastore backups: Exactly one URI can be specified. Also, the '\*' wildcard character is not allowed. + required: true + item_type: + type: String + - name: 'destinationTable' + type: NestedObject + description: | + The destination table to load the data into. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_table_ref_load_destinationtable.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl' + properties: + - name: 'projectId' + type: String + description: 'The ID of the project containing this table.' + required: false + default_from_api: true + - name: 'datasetId' + type: String + description: 'The ID of the dataset containing this table.' + required: false + default_from_api: true + - name: 'tableId' + type: String + description: | + The table. Can be specified `{{table_id}}` if `project_id` and `dataset_id` are also set, + or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'createDisposition' + type: Enum + description: | + Specifies whether the job is allowed to create new tables. The following values are supported: + CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. + CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. + Creation, truncation and append actions occur as one atomic update upon job completion + default_value: "CREATE_IF_NEEDED" + enum_values: + - 'CREATE_IF_NEEDED' + - 'CREATE_NEVER' + - name: 'writeDisposition' + type: Enum + description: | + Specifies the action that occurs if the destination table already exists. The following values are supported: + WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. + WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. + WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. + Each action is atomic and only occurs if BigQuery is able to complete the job successfully. + Creation, truncation and append actions occur as one atomic update upon job completion. + default_value: "WRITE_EMPTY" + enum_values: + - 'WRITE_TRUNCATE' + - 'WRITE_APPEND' + - 'WRITE_EMPTY' + - name: 'nullMarker' + type: String + description: | + Specifies a string that represents a null value in a CSV file. For example, if you specify "\N", BigQuery interprets "\N" as a null value + when loading a CSV file. The default value is the empty string. If you set this property to a custom value, BigQuery throws an error if an + empty string is present for all data types except for STRING and BYTE. For STRING and BYTE columns, BigQuery interprets the empty string as + an empty value. + default_value: "" + - name: 'fieldDelimiter' + type: String + description: | + The separator for fields in a CSV file. The separator can be any ISO-8859-1 single-byte character. + To use a character in the range 128-255, you must encode the character as UTF8. BigQuery converts + the string to ISO-8859-1 encoding, and then uses the first byte of the encoded string to split the + data in its raw, binary state. BigQuery also supports the escape sequence "\t" to specify a tab separator. + The default value is a comma (','). + default_from_api: true + - name: 'skipLeadingRows' + type: Integer + description: | + The number of rows at the top of a CSV file that BigQuery will skip when loading the data. + The default value is 0. This property is useful if you have header rows in the file that should be skipped. + When autodetect is on, the behavior is the following: + skipLeadingRows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, + the row is read as data. Otherwise data is read starting from the second row. + skipLeadingRows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. + skipLeadingRows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, + row N is just skipped. Otherwise row N is used to extract column names for the detected schema. + validation: + function: 'validation.IntAtLeast(0)' + default_value: 0 + - name: 'encoding' + type: String + description: | + The character encoding of the data. The supported values are UTF-8 or ISO-8859-1. + The default value is UTF-8. BigQuery decodes the data after the raw, binary data + has been split using the values of the quote and fieldDelimiter properties. + default_value: "UTF-8" + - name: 'quote' + type: String + description: | + The value that is used to quote data sections in a CSV file. BigQuery converts the string to ISO-8859-1 encoding, + and then uses the first byte of the encoded string to split the data in its raw, binary state. + The default value is a double-quote ('"'). If your data does not contain quoted sections, set the property value to an empty string. + If your data contains quoted newline characters, you must also set the allowQuotedNewlines property to true. + default_from_api: true + - name: 'maxBadRecords' + type: Integer + description: | + The maximum number of bad records that BigQuery can ignore when running the job. If the number of bad records exceeds this value, + an invalid error is returned in the job result. The default value is 0, which requires that all records are valid. + default_value: 0 + - name: 'allowQuotedNewlines' + type: Boolean + description: | + Indicates if BigQuery should allow quoted data sections that contain newline characters in a CSV file. + The default value is false. + default_value: false + - name: 'sourceFormat' + type: String + description: | + The format of the data files. For CSV files, specify "CSV". For datastore backups, specify "DATASTORE_BACKUP". + For newline-delimited JSON, specify "NEWLINE_DELIMITED_JSON". For Avro, specify "AVRO". For parquet, specify "PARQUET". + For orc, specify "ORC". [Beta] For Bigtable, specify "BIGTABLE". + The default value is CSV. + default_value: "CSV" + - name: 'jsonExtension' + type: String + description: | + If sourceFormat is set to newline-delimited JSON, indicates whether it should be processed as a JSON variant such as GeoJSON. + For a sourceFormat other than JSON, omit this field. If the sourceFormat is newline-delimited JSON: - for newline-delimited + GeoJSON: set to GEOJSON. + - name: 'allowJaggedRows' + type: Boolean + description: | + Accept rows that are missing trailing optional columns. The missing values are treated as nulls. + If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, + an invalid error is returned in the job result. The default value is false. Only applicable to CSV, ignored for other formats. + default_value: false + - name: 'ignoreUnknownValues' + type: Boolean + description: | + Indicates if BigQuery should allow extra values that are not represented in the table schema. + If true, the extra values are ignored. If false, records with extra columns are treated as bad records, + and if there are too many bad records, an invalid error is returned in the job result. + The default value is false. The sourceFormat property determines what BigQuery treats as an extra value: + CSV: Trailing columns + JSON: Named values that don't match any column names + default_value: false + - name: 'projectionFields' + type: Array + description: | + If sourceFormat is set to "DATASTORE_BACKUP", indicates which entity properties to load into BigQuery from a Cloud Datastore backup. + Property names are case sensitive and must be top-level properties. If no properties are specified, BigQuery loads all properties. + If any named property isn't found in the Cloud Datastore backup, an invalid error is returned in the job result. + item_type: + type: String + - name: 'autodetect' + type: Boolean + description: | + Indicates if we should automatically infer the options and schema for CSV and JSON sources. + - name: 'schemaUpdateOptions' + type: Array + description: | + Allows the schema of the destination table to be updated as a side effect of the load job if a schema is autodetected or + supplied in the job configuration. Schema update options are supported in two cases: when writeDisposition is WRITE_APPEND; + when writeDisposition is WRITE_TRUNCATE and the destination table is a partition of a table, specified by partition decorators. + For normal tables, WRITE_TRUNCATE will always overwrite the schema. One or more of the following values are specified: + ALLOW_FIELD_ADDITION: allow adding a nullable field to the schema. + ALLOW_FIELD_RELAXATION: allow relaxing a required field in the original schema to nullable. + item_type: + type: String + - name: 'timePartitioning' + type: NestedObject + description: | + Time-based partitioning specification for the destination table. + properties: + - name: 'type' + type: String + description: | + The only type supported is DAY, which will generate one partition per day. Providing an empty string used to cause an error, + but in OnePlatform the field will be treated as unset. + required: true + - name: 'expirationMs' + type: String + description: | + Number of milliseconds for which to keep the storage for a partition. A wrapper is used here because 0 is an invalid value. + - name: 'field' + type: String + description: | + If not set, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the table is partitioned by this field. + The field must be a top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or REQUIRED. + A wrapper is used here because an empty string is an invalid value. + - name: 'destinationEncryptionConfiguration' + type: NestedObject + description: | + Custom encryption configuration (e.g., Cloud KMS keys) + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl' + properties: + - name: 'kmsKeyName' + type: String + description: | + Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. + The BigQuery Service Account associated with your project requires access to this encryption key. + required: true + - name: 'kmsKeyVersion' + type: String + description: | + Describes the Cloud KMS encryption key version used to protect destination BigQuery table. + output: true + - name: 'parquetOptions' + type: NestedObject + description: | + Parquet Options for load and make external tables. + properties: + - name: 'enumAsString' + type: Boolean + description: | + If sourceFormat is set to PARQUET, indicates whether to infer Parquet ENUM logical type as STRING instead of BYTES by default. + - name: 'enableListInference' + type: Boolean + description: | + If sourceFormat is set to PARQUET, indicates whether to use schema inference specifically for Parquet LIST logical type. + at_least_one_of: + - 'configuration.0.load.0.parquet_options.0.enum_as_string' + - 'configuration.0.load.0.parquet_options.0.enable_list_inference' + - name: 'copy' + type: NestedObject + description: 'Copies a table.' + exactly_one_of: + - 'configuration.0.query' + - 'configuration.0.load' + - 'configuration.0.copy' + - 'configuration.0.extract' + properties: + - name: 'sourceTables' + type: Array + description: | + Source tables to copy. + required: true + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_table_ref_copy_sourcetables.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_table_ref_array.go.tmpl' + item_type: + type: NestedObject + properties: + - name: 'projectId' + type: String + description: 'The ID of the project containing this table.' + required: false + default_from_api: true + - name: 'datasetId' + type: String + description: 'The ID of the dataset containing this table.' + required: false + default_from_api: true + - name: 'tableId' + type: String + description: | + The table. Can be specified `{{table_id}}` if `project_id` and `dataset_id` are also set, + or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'destinationTable' + type: NestedObject + description: 'The destination table.' + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_table_ref_copy_destinationtable.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl' + properties: + - name: 'projectId' + type: String + description: 'The ID of the project containing this table.' + required: false + default_from_api: true + - name: 'datasetId' + type: String + description: 'The ID of the dataset containing this table.' + required: false + default_from_api: true + - name: 'tableId' + type: String + description: | + The table. Can be specified `{{table_id}}` if `project_id` and `dataset_id` are also set, + or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'createDisposition' + type: Enum + description: | + Specifies whether the job is allowed to create new tables. The following values are supported: + CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the table. + CREATE_NEVER: The table must already exist. If it does not, a 'notFound' error is returned in the job result. + Creation, truncation and append actions occur as one atomic update upon job completion + default_value: "CREATE_IF_NEEDED" + enum_values: + - 'CREATE_IF_NEEDED' + - 'CREATE_NEVER' + - name: 'writeDisposition' + type: Enum + description: | + Specifies the action that occurs if the destination table already exists. The following values are supported: + WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the table data and uses the schema from the query result. + WRITE_APPEND: If the table already exists, BigQuery appends the data to the table. + WRITE_EMPTY: If the table already exists and contains data, a 'duplicate' error is returned in the job result. + Each action is atomic and only occurs if BigQuery is able to complete the job successfully. + Creation, truncation and append actions occur as one atomic update upon job completion. + default_value: "WRITE_EMPTY" + enum_values: + - 'WRITE_TRUNCATE' + - 'WRITE_APPEND' + - 'WRITE_EMPTY' + - name: 'destinationEncryptionConfiguration' + type: NestedObject + description: | + Custom encryption configuration (e.g., Cloud KMS keys) + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_kms_version.go.tmpl' + properties: + - name: 'kmsKeyName' + type: String + description: | + Describes the Cloud KMS encryption key that will be used to protect destination BigQuery table. + The BigQuery Service Account associated with your project requires access to this encryption key. + required: true + - name: 'kmsKeyVersion' + type: String + description: | + Describes the Cloud KMS encryption key version used to protect destination BigQuery table. + output: true + - name: 'extract' + type: NestedObject + description: 'Configures an extract job.' + exactly_one_of: + - 'configuration.0.query' + - 'configuration.0.load' + - 'configuration.0.copy' + - 'configuration.0.extract' + properties: + - name: 'destinationUris' + type: Array + description: | + A list of fully-qualified Google Cloud Storage URIs where the extracted table should be written. + required: true + item_type: + type: String + - name: 'printHeader' + type: Boolean + description: | + Whether to print out a header row in the results. Default is true. + default_value: true + - name: 'fieldDelimiter' + type: String + description: | + When extracting data in CSV format, this defines the delimiter to use between fields in the exported data. + Default is ',' + default_from_api: true + - name: 'destinationFormat' + type: String + description: | + The exported file format. Possible values include CSV, NEWLINE_DELIMITED_JSON and AVRO for tables and SAVED_MODEL for models. + The default value for tables is CSV. Tables with nested or repeated fields cannot be exported as CSV. + The default value for models is SAVED_MODEL. + default_from_api: true + - name: 'compression' + type: String + description: | + The compression type to use for exported files. Possible values include GZIP, DEFLATE, SNAPPY, and NONE. + The default value is NONE. DEFLATE and SNAPPY are only supported for Avro. + default_value: "NONE" + - name: 'useAvroLogicalTypes' + type: Boolean + description: | + Whether to use logical types when extracting to AVRO format. + - name: 'sourceTable' + type: NestedObject + description: | + A reference to the table being exported. + exactly_one_of: + - 'configuration.0.extract.0.source_table' + - 'configuration.0.extract.0.source_model' + custom_flatten: 'templates/terraform/custom_flatten/go/bigquery_table_ref_extract_sourcetable.go.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/bigquery_table_ref.go.tmpl' + properties: + - name: 'projectId' + type: String + description: 'The ID of the project containing this table.' + required: false + default_from_api: true + - name: 'datasetId' + type: String + description: 'The ID of the dataset containing this table.' + required: false + default_from_api: true + - name: 'tableId' + type: String + description: | + The table. Can be specified `{{table_id}}` if `project_id` and `dataset_id` are also set, + or of the form `projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}` if not. + required: true + diff_suppress_func: 'tpgresource.CompareSelfLinkRelativePaths' + - name: 'sourceModel' + type: NestedObject + description: | + A reference to the model being exported. + exactly_one_of: + - 'configuration.0.extract.0.source_table' + - 'configuration.0.extract.0.source_model' + properties: + - name: 'projectId' + type: String + description: 'The ID of the project containing this model.' + required: true + - name: 'datasetId' + type: String + description: 'The ID of the dataset containing this model.' + required: true + - name: 'modelId' + type: String + description: 'The ID of the model.' + required: true + - name: 'jobReference' + type: NestedObject + description: | + Reference describing the unique-per-user name of the job. + flatten_object: true + properties: + - name: 'jobId' + type: String + description: | + The ID of the job. The ID must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or dashes (-). The maximum length is 1,024 characters. + required: true + - name: 'location' + type: String + description: | + The geographic location of the job. The default value is US. + default_value: "US" + - name: 'status' + type: NestedObject + description: | + The status of this job. Examine this value when polling an asynchronous job to see if the job is complete. + output: true + properties: + - name: 'errorResult' + type: NestedObject + description: | + Final error result of the job. If present, indicates that the job has completed and was unsuccessful. + output: true + properties: + - name: 'reason' + type: String + description: A short error code that summarizes the error. + - name: 'location' + type: String + description: Specifies where the error occurred, if present. + - name: 'message' + type: String + description: A human-readable description of the error. + - name: 'errors' + type: Array + description: | + The first errors encountered during the running of the job. The final message + includes the number of errors that caused the process to stop. Errors here do + not necessarily mean that the job has not completed or was unsuccessful. + output: true + item_type: + type: NestedObject + properties: + - name: 'reason' + type: String + description: A short error code that summarizes the error. + - name: 'location' + type: String + description: Specifies where the error occurred, if present. + - name: 'message' + type: String + description: A human-readable description of the error. + - name: 'state' + type: String + description: | + Running state of the job. Valid states include 'PENDING', 'RUNNING', and 'DONE'. + output: true diff --git a/mmv1/products/bigquery/go_Routine.yaml b/mmv1/products/bigquery/go_Routine.yaml new file mode 100644 index 000000000000..2d012385c6c3 --- /dev/null +++ b/mmv1/products/bigquery/go_Routine.yaml @@ -0,0 +1,325 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Routine' +kind: 'bigquery#routine' +description: | + A user-defined function or a stored procedure that belongs to a Dataset +references: + guides: + 'Routines Intro': 'https://cloud.google.com/bigquery/docs/reference/rest/v2/routines' + api: 'https://cloud.google.com/bigquery/docs/reference/rest/v2/routines' +docs: +base_url: 'projects/{{project}}/datasets/{{dataset_id}}/routines' +self_link: 'projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}' +import_format: + - 'projects/{{project}}/datasets/{{dataset_id}}/routines/{{routine_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +custom_code: +examples: + - name: 'bigquery_routine_basic' + primary_resource_id: 'sproc' + primary_resource_name: 'fmt.Sprintf("tf_test_dataset_id%s", context["random_suffix"]), fmt.Sprintf("tf_test_table_id%s", context["random_suffix"])' + vars: + dataset_id: 'dataset_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_json' + primary_resource_id: 'sproc' + primary_resource_name: 'fmt.Sprintf("tf_test_dataset_id%s", context["random_suffix"]), fmt.Sprintf("tf_test_table_id%s", context["random_suffix"])' + vars: + dataset_id: 'dataset_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_tvf' + primary_resource_id: 'sproc' + primary_resource_name: 'fmt.Sprintf("tf_test_dataset_id%s", context["random_suffix"]), fmt.Sprintf("tf_test_table_id%s", context["random_suffix"])' + vars: + dataset_id: 'dataset_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_pyspark' + primary_resource_id: 'pyspark' + vars: + dataset_id: 'dataset_id' + connection_id: 'connection_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_pyspark_mainfile' + primary_resource_id: 'pyspark_mainfile' + vars: + dataset_id: 'dataset_id' + connection_id: 'connection_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_spark_jar' + primary_resource_id: 'spark_jar' + vars: + dataset_id: 'dataset_id' + connection_id: 'connection_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_data_governance_type' + primary_resource_id: 'custom_masking_routine' + vars: + dataset_id: 'dataset_id' + routine_id: 'routine_id' + - name: 'bigquery_routine_remote_function' + primary_resource_id: 'remote_function' + vars: + dataset_id: 'dataset_id' + connection_id: 'connection_id' + routine_id: 'routine_id' + skip_test: true +parameters: +properties: + - name: 'routineReference' + type: NestedObject + description: Reference describing the ID of this routine + required: true + custom_expand: 'templates/terraform/custom_expand/go/bigquery_routine_ref.go.tmpl' + flatten_object: true + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this routine + required: true + immutable: true + - name: 'routineId' + type: String + description: + The ID of the the routine. The ID must contain only letters (a-z, + A-Z), numbers (0-9), or underscores (_). The maximum length is 256 + characters. + required: true + immutable: true + - name: 'routineType' + type: Enum + description: The type of routine. + required: true + immutable: true + enum_values: + - 'SCALAR_FUNCTION' + - 'PROCEDURE' + - 'TABLE_VALUED_FUNCTION' + - name: 'creationTime' + type: Integer + description: | + The time when this routine was created, in milliseconds since the + epoch. + output: true + - name: 'lastModifiedTime' + type: Integer + description: | + The time when this routine was modified, in milliseconds since the + epoch. + output: true + - name: 'language' + type: Enum + description: | + The language of the routine. + enum_values: + - 'SQL' + - 'JAVASCRIPT' + - 'PYTHON' + - 'JAVA' + - 'SCALA' + - name: 'arguments' + type: Array + description: Input/output argument of a function or a stored procedure. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The name of this argument. Can be absent for function return argument. + - name: 'argumentKind' + type: Enum + description: Defaults to FIXED_TYPE. + default_value: "FIXED_TYPE" + enum_values: + - 'FIXED_TYPE' + - 'ANY_TYPE' + - name: 'mode' + type: Enum + description: | + Specifies whether the argument is input or output. Can be set for procedures only. + enum_values: + - 'IN' + - 'OUT' + - 'INOUT' + - name: 'dataType' + type: String + description: | + A JSON schema for the data type. Required unless argumentKind = ANY_TYPE. + ~>**NOTE**: Because this field expects a JSON string, any changes to the string + will create a diff, even if the JSON itself hasn't changed. If the API returns + a different value for the same schema, e.g. it switched the order of values + or replaced STRUCT field type with RECORD field type, we currently cannot + suppress the recurring diff this causes. As a workaround, we recommend using + the schema as returned by the API. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'returnType' + type: String + description: | + A JSON schema for the return type. Optional if language = "SQL"; required otherwise. + If absent, the return type is inferred from definitionBody at query time in each query + that references this routine. If present, then the evaluated result will be cast to + the specified returned type at query time. ~>**NOTE**: Because this field expects a JSON + string, any changes to the string will create a diff, even if the JSON itself hasn't + changed. If the API returns a different value for the same schema, e.g. it switche + d the order of values or replaced STRUCT field type with RECORD field type, we currently + cannot suppress the recurring diff this causes. As a workaround, we recommend using + the schema as returned by the API. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'returnTableType' + type: String + description: | + Optional. Can be set only if routineType = "TABLE_VALUED_FUNCTION". + + If absent, the return table type is inferred from definitionBody at query time in each query + that references this routine. If present, then the columns in the evaluated table result will + be cast to match the column types specificed in return table type, at query time. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'importedLibraries' + type: Array + description: | + Optional. If language = "JAVASCRIPT", this field stores the path of the + imported JAVASCRIPT libraries. + item_type: + type: String + - name: 'definitionBody' + type: String + description: | + The body of the routine. For functions, this is the expression in the AS clause. + If language=SQL, it is the substring inside (but excluding) the parentheses. + required: true + - name: 'description' + type: String + description: The description of the routine if defined. + - name: 'determinismLevel' + type: Enum + description: The determinism level of the JavaScript UDF if defined. + enum_values: + - 'DETERMINISM_LEVEL_UNSPECIFIED' + - 'DETERMINISTIC' + - 'NOT_DETERMINISTIC' + - name: 'dataGovernanceType' + type: Enum + description: If set to DATA_MASKING, the function is validated and made available as a masking function. For more information, see https://cloud.google.com/bigquery/docs/user-defined-functions#custom-mask + enum_values: + - 'DATA_MASKING' + - name: 'sparkOptions' + type: NestedObject + description: | + Optional. If language is one of "PYTHON", "JAVA", "SCALA", this field stores the options for spark stored procedure. + properties: + - name: 'connection' + type: String + description: | + Fully qualified name of the user-provided Spark connection object. + Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}" + - name: 'runtimeVersion' + type: String + description: Runtime version. If not specified, the default runtime version is used. + - name: 'containerImage' + type: String + description: Custom container image for the runtime environment. + - name: 'properties' + type: KeyValuePairs + description: | + Configuration properties as a set of key/value pairs, which will be passed on to the Spark application. + For more information, see Apache Spark and the procedure option list. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + default_from_api: true + - name: 'mainFileUri' + type: String + description: | + The main file/jar URI of the Spark application. + Exactly one of the definitionBody field and the mainFileUri field must be set for Python. + Exactly one of mainClass and mainFileUri field should be set for Java/Scala language type. + - name: 'pyFileUris' + type: Array + description: | + Python files to be placed on the PYTHONPATH for PySpark application. Supported file types: .py, .egg, and .zip. For more information about Apache Spark, see Apache Spark. + default_from_api: true + item_type: + type: String + - name: 'jarUris' + type: Array + description: | + JARs to include on the driver and executor CLASSPATH. For more information about Apache Spark, see Apache Spark. + default_from_api: true + item_type: + type: String + - name: 'fileUris' + type: Array + description: | + Files to be placed in the working directory of each executor. For more information about Apache Spark, see Apache Spark. + default_from_api: true + item_type: + type: String + - name: 'archiveUris' + type: Array + description: | + Archive files to be extracted into the working directory of each executor. For more information about Apache Spark, see Apache Spark. + default_from_api: true + item_type: + type: String + - name: 'mainClass' + type: String + description: | + The fully qualified name of a class in jarUris, for example, com.example.wordcount. + Exactly one of mainClass and main_jar_uri field should be set for Java/Scala language type. + - name: 'remoteFunctionOptions' + type: NestedObject + description: Remote function specific options. + properties: + - name: 'endpoint' + type: String + description: | + Endpoint of the user-provided remote service, e.g. + `https://us-east1-my_gcf_project.cloudfunctions.net/remote_add` + - name: 'connection' + type: String + description: | + Fully qualified name of the user-provided connection object which holds + the authentication information to send requests to the remote service. + Format: "projects/{projectId}/locations/{locationId}/connections/{connectionId}" + - name: 'userDefinedContext' + type: KeyValuePairs + description: | + User-defined context as a set of key/value pairs, which will be sent as function + invocation context together with batched arguments in the requests to the remote + service. The total number of bytes of keys and values must be less than 8KB. + + An object containing a list of "key": value pairs. Example: + `{ "name": "wrench", "mass": "1.3kg", "count": "3" }`. + default_from_api: true + - name: 'maxBatchingRows' + type: String + description: | + Max number of rows in each batch sent to the remote service. If absent or if 0, + BigQuery dynamically decides the number of rows in a batch. diff --git a/mmv1/products/bigquery/go_Table.yaml b/mmv1/products/bigquery/go_Table.yaml new file mode 100644 index 000000000000..71056cc189f4 --- /dev/null +++ b/mmv1/products/bigquery/go_Table.yaml @@ -0,0 +1,557 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Table' +kind: 'bigquery#table' +description: | + A Table that belongs to a Dataset +exclude_resource: true +docs: +id_format: '{{table_id}}' +base_url: 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' +self_link: 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' +import_format: + - 'projects/{{project}}/datasets/{{dataset_id}}/tables/{{table_id}}' + - '{{table_id}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +iam_policy: + method_name_separator: ':' + parent_resource_type: 'google_bigquery_table' + fetch_iam_policy_verb: 'POST' + allowed_iam_role: 'roles/bigquery.dataOwner' + parent_resource_attribute: 'table_id' + iam_conditions_request_type: 'REQUEST_BODY' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + iam_policy_version: '1' +custom_code: +examples: + - name: 'bigquery_bigquery_table' + primary_resource_id: 'test' + primary_resource_name: 'fmt.Sprintf("tf_test_dataset_id%s", context["random_suffix"]), fmt.Sprintf("tf_test_table_id%s", context["random_suffix"])' + vars: + dataset_id: 'dataset_id' + table_id: 'table_id' +virtual_fields: + - name: 'allow_resource_tags_on_deletion' + description: | + If set to true, it allows table deletion when there are still resource tags attached. + type: Boolean + default_value: false +parameters: + - name: 'dataset' + type: String + description: Name of the dataset +properties: + - name: 'tableReference' + type: NestedObject + description: Reference describing the ID of this table + properties: + - name: 'datasetId' + type: String + description: The ID of the dataset containing this table + - name: 'projectId' + type: String + description: The ID of the project containing this table + - name: 'tableId' + type: String + description: The ID of the the table + - name: 'clustering' + type: Array + description: | + One or more fields on which data should be clustered. Only + top-level, non-repeated, simple-type fields are supported. When + you cluster a table using multiple columns, the order of columns + you specify is important. The order of the specified columns + determines the sort order of the data. + item_type: + type: String + - name: 'creationTime' + type: Integer + description: | + The time when this dataset was created, in milliseconds since the + epoch. + output: true + - name: 'description' + type: String + description: A user-friendly description of the dataset + - name: 'friendlyName' + type: String + description: A descriptive name for this table + - name: 'id' + type: String + description: 'An opaque ID uniquely identifying the table.' + output: true + - name: 'labels' + type: KeyValueLabels + description: | + The labels associated with this dataset. You can use these to + organize and group your datasets + immutable: false + - name: 'lastModifiedTime' + type: Integer + description: | + The time when this table was last modified, in milliseconds since the + epoch. + output: true + - name: 'location' + type: String + description: | + The geographic location where the table resides. This value is + inherited from the dataset. + output: true + - name: 'name' + type: String + description: 'Name of the table' + - name: 'numBytes' + type: Integer + description: | + The size of this table in bytes, excluding any data in the streaming + buffer. + output: true + - name: 'numLongTermBytes' + type: Integer + description: | + The number of bytes in the table that are considered "long-term + storage". + output: true + - name: 'numRows' + type: Integer + description: | + The number of rows of data in this table, excluding any data in the + streaming buffer. + - name: 'requirePartitionFilter' + type: Boolean + description: | + If set to true, queries over this table require a partition filter + that can be used for partition elimination to be specified. + output: true + - name: 'type' + type: Enum + description: 'Describes the table type' + output: true + enum_values: + - 'TABLE' + - 'VIEW' + - 'EXTERNAL' + - name: 'view' + type: NestedObject + description: The view definition. + properties: + - name: 'useLegacySql' + type: Boolean + description: | + Specifies whether to use BigQuery's legacy SQL for this view + - name: 'userDefinedFunctionResources' + type: Array + description: | + Describes user-defined function resources used in the query. + item_type: + type: NestedObject + properties: + - name: 'inlineCode' + type: String + description: | + An inline resource that contains code for a user-defined + function (UDF). Providing a inline code resource is + equivalent to providing a URI for a file containing the + same code. + # TODO: Convert into cross-product ResourceRef + - name: 'resourceUri' + type: String + description: | + A code resource to load from a Google Cloud Storage URI + (gs://bucket/path). + - name: 'timePartitioning' + type: NestedObject + description: | + If specified, configures time-based partitioning for this table. + properties: + - name: 'expirationMs' + type: Integer + description: | + Number of milliseconds for which to keep the storage for a + partition. If unspecified when the table is created in a dataset + that has `defaultPartitionExpirationMs`, it will inherit + the value of `defaultPartitionExpirationMs` from the dataset. + To specify a unlimited expiration, set the value to 0. + - name: 'field' + type: String + description: | + If not set, the table is partitioned by pseudo column, + referenced via either '_PARTITIONTIME' as TIMESTAMP type, or + '_PARTITIONDATE' as DATE type. If field is specified, the table + is instead partitioned by this field. The field must be a + top-level TIMESTAMP or DATE field. Its mode must be NULLABLE or + REQUIRED. + - name: 'type' + type: Enum + description: | + The only type supported is DAY, which will generate one partition + per day. + enum_values: + - 'DAY' + - name: 'streamingBuffer' + type: NestedObject + description: | + Contains information regarding this table's streaming buffer, if one + is present. This field will be absent if the table is not being + streamed to or if there is no data in the streaming buffer. + output: true + properties: + - name: 'estimatedBytes' + type: Integer + description: | + A lower-bound estimate of the number of bytes currently in the + streaming buffer. + output: true + - name: 'estimatedRows' + type: Integer + description: | + A lower-bound estimate of the number of rows currently in the + streaming buffer. + output: true + - name: 'oldestEntryTime' + type: Integer + description: | + Contains the timestamp of the oldest entry in the streaming + buffer, in milliseconds since the epoch, if the streaming buffer + is available. + output: true + - name: 'schema' + type: NestedObject + description: Describes the schema of this table + properties: + - name: 'fields' + type: Array + description: Describes the fields in a table. + item_type: + type: NestedObject + properties: + - name: 'description' + type: String + description: | + The field description. The maximum length is 1,024 + characters. + - name: 'fields' + type: Array + description: | + Describes the nested schema fields if the type property is + set to RECORD. + item_type: + type: String + - name: 'mode' + type: Enum + description: The field mode + enum_values: + - 'NULLABLE' + - 'REQUIRED' + - 'REPEATED' + - name: 'name' + type: String + description: The field name + - name: 'type' + type: Enum + description: 'The field data type' + enum_values: + - 'STRING' + - 'BYTES' + - 'INTEGER' + - 'FLOAT' + - 'TIMESTAMP' + - 'DATE' + - 'TIME' + - 'DATETIME' + - 'RECORD' + - name: 'encryptionConfiguration' + type: NestedObject + description: Custom encryption configuration + properties: + - name: 'kmsKeyName' + type: String + description: | + Describes the Cloud KMS encryption key that will be used to + protect destination BigQuery table. The BigQuery Service Account + associated with your project requires access to this encryption + key. + - name: 'expirationTime' + type: Integer + description: | + The time when this table expires, in milliseconds since the epoch. If + not present, the table will persist indefinitely. + - name: 'externalDataConfiguration' + type: NestedObject + description: | + Describes the data format, location, and other properties of a table + stored outside of BigQuery. By defining these properties, the data + source can then be queried as if it were a standard BigQuery table. + properties: + - name: 'autodetect' + type: Boolean + description: | + Try to detect schema and format options automatically. Any option + specified explicitly will be honored. + - name: 'compression' + type: Enum + description: The compression type of the data source + enum_values: + - 'GZIP' + - 'NONE' + - name: 'ignoreUnknownValues' + type: Boolean + description: | + Indicates if BigQuery should allow extra values that are not + represented in the table schema + - name: 'maxBadRecords' + type: Integer + description: | + The maximum number of bad records that BigQuery can ignore when reading data + default_value: 0 + - name: 'sourceFormat' + type: Enum + description: The data format + enum_values: + - 'CSV' + - 'GOOGLE_SHEETS' + - 'NEWLINE_DELIMITED_JSON' + - 'AVRO' + - 'DATASTORE_BACKUP' + - 'BIGTABLE' + - 'ORC' + - 'PARQUET' + - 'ICEBERG' + - name: 'sourceUris' + type: Array + description: | + The fully-qualified URIs that point to your data in Google Cloud. + For Google Cloud Storage URIs: Each URI can contain one '\*' + wildcard character and it must come after the 'bucket' name. Size + limits related to load jobs apply to external data sources. For + Google Cloud Bigtable URIs: Exactly one URI can be specified and it + has be a fully specified and valid HTTPS URL for a Google Cloud + Bigtable table. For Google Cloud Datastore backups, exactly one + URI can be specified. Also, the '\*' wildcard character is not + allowed. + item_type: + type: String + - name: 'schema' + type: NestedObject + description: + 'The schema for the data. Schema is required for CSV and JSON formats' + properties: + - name: 'fields' + type: Array + description: 'Describes the fields in a table.' + item_type: + type: NestedObject + properties: + - name: 'description' + type: String + description: The field description + - name: 'fields' + type: Array + description: | + Describes the nested schema fields if the type property + is set to RECORD + item_type: + type: String + - name: 'mode' + type: Enum + description: Field mode. + enum_values: + - 'NULLABLE' + - 'REQUIRED' + - 'REPEATED' + - name: 'name' + type: String + description: Field name + - name: 'type' + type: Enum + description: Field data type + enum_values: + - 'STRING' + - 'BYTES' + - 'INTEGER' + - 'FLOAT' + - 'TIMESTAMP' + - 'DATE' + - 'TIME' + - 'DATETIME' + - 'RECORD' + - name: 'googleSheetsOptions' + type: NestedObject + description: + 'Additional options if sourceFormat is set to GOOGLE_SHEETS.' + properties: + - name: 'skipLeadingRows' + type: Integer + description: | + The number of rows at the top of a Google Sheet that BigQuery + will skip when reading the data. + default_value: 0 + - name: 'csvOptions' + type: NestedObject + description: Additional properties to set if sourceFormat is set to CSV. + properties: + - name: 'allowJaggedRows' + type: Boolean + description: | + Indicates if BigQuery should accept rows that are missing + trailing optional columns + - name: 'allowQuotedNewlines' + type: Boolean + description: | + Indicates if BigQuery should allow quoted data sections that + contain newline characters in a CSV file + - name: 'encoding' + type: Enum + description: 'The character encoding of the data' + enum_values: + - 'UTF-8' + - 'ISO-8859-1' + - name: 'fieldDelimiter' + type: String + description: 'The separator for fields in a CSV file' + - name: 'quote' + type: String + description: + 'The value that is used to quote data sections in a CSV file' + - name: 'skipLeadingRows' + type: Integer + description: | + The number of rows at the top of a CSV file that BigQuery + will skip when reading the data. + default_value: 0 + - name: 'bigtableOptions' + type: NestedObject + description: 'Additional options if sourceFormat is set to BIGTABLE.' + properties: + - name: 'ignoreUnspecifiedColumnFamilies' + type: Boolean + description: | + If field is true, then the column families that are not specified in + columnFamilies list are not exposed in the table schema + - name: 'readRowkeyAsString' + type: Boolean + description: | + If field is true, then the rowkey column families will be + read and converted to string. + - name: 'columnFamilies' + type: Array + description: | + List of column families to expose in the table schema along + with their types. + item_type: + type: NestedObject + properties: + - name: 'columns' + type: Array + description: | + Lists of columns that should be exposed as individual + fields as opposed to a list of (column name, value) pairs. + item_type: + type: NestedObject + properties: + - name: 'encoding' + type: Enum + description: + The encoding of the values when the type is not STRING + enum_values: + - 'TEXT' + - 'BINARY' + - name: 'fieldName' + type: String + description: | + If the qualifier is not a valid BigQuery field + identifier, a valid identifier must be provided as + the column field name and is used as field name in + queries. + - name: 'onlyReadLatest' + type: Boolean + description: | + If this is set, only the latest version of value in this column are exposed + - name: 'qualifierString' + type: String + description: Qualifier of the column + required: true + - name: 'type' + type: Enum + description: + The type to convert the value in cells of this column + enum_values: + - 'BYTES' + - 'STRING' + - 'INTEGER' + - 'FLOAT' + - 'BOOLEAN' + - name: 'encoding' + type: Enum + description: + The encoding of the values when the type is not STRING + enum_values: + - 'TEXT' + - 'BINARY' + - name: 'familyId' + type: String + description: Identifier of the column family. + - name: 'onlyReadLatest' + type: Boolean + description: | + If this is set only the latest version of value are + exposed for all columns in this column family + - name: 'type' + type: Enum + description: + The type to convert the value in cells of this column family + enum_values: + - 'BYTES' + - 'STRING' + - 'INTEGER' + - 'FLOAT' + - 'BOOLEAN' + - name: 'tableReplicationInfo' + type: NestedObject + description: | + Replication info of a table created using "AS REPLICA" DDL like: + `CREATE MATERIALIZED VIEW mv1 AS REPLICA OF src_mv`. + properties: + - name: 'sourceProjectId' + type: String + description: The ID of the source project. + required: true + - name: 'sourceDatasetId' + type: String + description: The ID of the source dataset. + required: true + - name: 'sourceTableId' + type: String + description: The ID of the source materialized view. + required: true + - name: 'replicationIntervalMs' + type: Integer + description: | + The interval at which the source materialized view is polled for updates. The default is + 300000. + default_value: 300000 + - name: 'resourceTags' + type: KeyValuePairs + description: | + The tags attached to this table. Tag keys are globally unique. Tag key is expected to be + in the namespaced format, for example "123456789012/environment" where 123456789012 is the + ID of the parent organization or project resource for this tag key. Tag value is expected + to be the short name, for example "Production". + min_version: 'beta' diff --git a/mmv1/products/bigquery/go_product.yaml b/mmv1/products/bigquery/go_product.yaml new file mode 100644 index 000000000000..804848d8e0f3 --- /dev/null +++ b/mmv1/products/bigquery/go_product.yaml @@ -0,0 +1,25 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'BigQuery' +legacy_name: 'bigquery' +display_name: 'BigQuery' +versions: + - name: 'ga' + base_url: 'https://bigquery.googleapis.com/bigquery/v2/' + - name: 'beta' + base_url: 'https://bigquery.googleapis.com/bigquery/v2/' +scopes: + - 'https://www.googleapis.com/auth/bigquery' diff --git a/mmv1/products/compute/go_Address.yaml b/mmv1/products/compute/go_Address.yaml index 5a280c74e32b..a83a22d3ad4e 100644 --- a/mmv1/products/compute/go_Address.yaml +++ b/mmv1/products/compute/go_Address.yaml @@ -43,6 +43,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Autoscaler.yaml b/mmv1/products/compute/go_Autoscaler.yaml index 86eec2f2de5c..58e7e9cf016d 100644 --- a/mmv1/products/compute/go_Autoscaler.yaml +++ b/mmv1/products/compute/go_Autoscaler.yaml @@ -34,6 +34,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_BackendBucket.yaml b/mmv1/products/compute/go_BackendBucket.yaml index 5296bba84b1b..2cd35c63275c 100644 --- a/mmv1/products/compute/go_BackendBucket.yaml +++ b/mmv1/products/compute/go_BackendBucket.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' @@ -50,6 +51,7 @@ async: collection_url_key: 'items' iam_policy: parent_resource_attribute: 'name' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' import_format: - 'projects/{{project}}/global/backendBuckets/{{name}}' - '{{name}}' diff --git a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml index 77f911edccf0..3c98d9c4729e 100644 --- a/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendBucketSignedUrlKey.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_BackendService.yaml b/mmv1/products/compute/go_BackendService.yaml index 274582b20829..9c3b8fd42b92 100644 --- a/mmv1/products/compute/go_BackendService.yaml +++ b/mmv1/products/compute/go_BackendService.yaml @@ -34,6 +34,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' @@ -51,6 +52,7 @@ iam_policy: allowed_iam_role: 'roles/compute.admin' parent_resource_attribute: 'name' iam_conditions_request_type: 'QUERY_PARAM' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' min_version: 'beta' custom_code: constants: 'templates/terraform/constants/go/backend_service.go.tmpl' diff --git a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml index b03ba69ac476..6878e0000f40 100644 --- a/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml +++ b/mmv1/products/compute/go_BackendServiceSignedUrlKey.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Disk.yaml b/mmv1/products/compute/go_Disk.yaml index 08a4bf0ea163..5ec50f06b5a8 100644 --- a/mmv1/products/compute/go_Disk.yaml +++ b/mmv1/products/compute/go_Disk.yaml @@ -44,6 +44,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' @@ -60,6 +61,7 @@ collection_url_key: 'items' iam_policy: parent_resource_attribute: 'name' base_url: 'projects/{{project}}/zones/{{zone}}/disks/{{name}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' import_format: - 'projects/{{project}}/zones/{{zone}}/disks/{{name}}' - '{{name}}' diff --git a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml index 72cb814abaa1..b5e5fa203277 100644 --- a/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/go_DiskResourcePolicyAttachment.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_ExternalVpnGateway.yaml b/mmv1/products/compute/go_ExternalVpnGateway.yaml index b31f58b110ff..ecf9d2d84dee 100644 --- a/mmv1/products/compute/go_ExternalVpnGateway.yaml +++ b/mmv1/products/compute/go_ExternalVpnGateway.yaml @@ -29,6 +29,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Firewall.yaml b/mmv1/products/compute/go_Firewall.yaml index c28d906a27b9..6deaee15a1ae 100644 --- a/mmv1/products/compute/go_Firewall.yaml +++ b/mmv1/products/compute/go_Firewall.yaml @@ -44,6 +44,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_ForwardingRule.yaml b/mmv1/products/compute/go_ForwardingRule.yaml index 417db5eed9f2..73f1a57d54f0 100644 --- a/mmv1/products/compute/go_ForwardingRule.yaml +++ b/mmv1/products/compute/go_ForwardingRule.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_GlobalAddress.yaml b/mmv1/products/compute/go_GlobalAddress.yaml index 7e56e69dc9e4..283f3e10064f 100644 --- a/mmv1/products/compute/go_GlobalAddress.yaml +++ b/mmv1/products/compute/go_GlobalAddress.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_GlobalForwardingRule.yaml b/mmv1/products/compute/go_GlobalForwardingRule.yaml index b9fe030fcdc3..901dc9347599 100644 --- a/mmv1/products/compute/go_GlobalForwardingRule.yaml +++ b/mmv1/products/compute/go_GlobalForwardingRule.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml index fe29d45d0ce4..f6b78196a286 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpoint.yaml @@ -38,6 +38,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml index 2e8f0b57b0b8..cbcc8744fea8 100644 --- a/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_GlobalNetworkEndpointGroup.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_HaVpnGateway.yaml b/mmv1/products/compute/go_HaVpnGateway.yaml index 4545e037be24..ba5acae6e1a2 100644 --- a/mmv1/products/compute/go_HaVpnGateway.yaml +++ b/mmv1/products/compute/go_HaVpnGateway.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_HealthCheck.yaml b/mmv1/products/compute/go_HealthCheck.yaml index 053c86241be7..f2ea88f02cba 100644 --- a/mmv1/products/compute/go_HealthCheck.yaml +++ b/mmv1/products/compute/go_HealthCheck.yaml @@ -43,6 +43,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_HttpHealthCheck.yaml b/mmv1/products/compute/go_HttpHealthCheck.yaml index 0a7780b177a1..7ee7bc77a0d7 100644 --- a/mmv1/products/compute/go_HttpHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpHealthCheck.yaml @@ -36,6 +36,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_HttpsHealthCheck.yaml b/mmv1/products/compute/go_HttpsHealthCheck.yaml index 6a29961e22bb..34adda55888c 100644 --- a/mmv1/products/compute/go_HttpsHealthCheck.yaml +++ b/mmv1/products/compute/go_HttpsHealthCheck.yaml @@ -36,6 +36,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Image.yaml b/mmv1/products/compute/go_Image.yaml index 731e4028534d..32afc9e70d92 100644 --- a/mmv1/products/compute/go_Image.yaml +++ b/mmv1/products/compute/go_Image.yaml @@ -45,6 +45,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' @@ -62,6 +63,7 @@ iam_policy: allowed_iam_role: 'roles/compute.imageUser' parent_resource_attribute: 'image' iam_conditions_request_type: 'QUERY_PARAM' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: examples: - name: 'image_basic' diff --git a/mmv1/products/compute/go_Instance.yaml b/mmv1/products/compute/go_Instance.yaml index c8b2b4856e99..4ffbf5911cfc 100644 --- a/mmv1/products/compute/go_Instance.yaml +++ b/mmv1/products/compute/go_Instance.yaml @@ -27,6 +27,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' @@ -44,6 +45,7 @@ iam_policy: allowed_iam_role: 'roles/compute.osLogin' parent_resource_attribute: 'instance_name' iam_conditions_request_type: 'QUERY_PARAM' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: examples: - name: 'instance_basic' diff --git a/mmv1/products/compute/go_InstanceGroup.yaml b/mmv1/products/compute/go_InstanceGroup.yaml index 634f78fecdeb..48e0fa36a110 100644 --- a/mmv1/products/compute/go_InstanceGroup.yaml +++ b/mmv1/products/compute/go_InstanceGroup.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_InstanceGroupManager.yaml b/mmv1/products/compute/go_InstanceGroupManager.yaml index 7f560b060068..3a52e01f41f3 100644 --- a/mmv1/products/compute/go_InstanceGroupManager.yaml +++ b/mmv1/products/compute/go_InstanceGroupManager.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_InstanceGroupMembership.yaml b/mmv1/products/compute/go_InstanceGroupMembership.yaml index 80b4bd6e974f..fd910f82c8d2 100644 --- a/mmv1/products/compute/go_InstanceGroupMembership.yaml +++ b/mmv1/products/compute/go_InstanceGroupMembership.yaml @@ -47,6 +47,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml index 0e7e8cc5dde3..fd1d5514a0e6 100644 --- a/mmv1/products/compute/go_InstanceGroupNamedPort.yaml +++ b/mmv1/products/compute/go_InstanceGroupNamedPort.yaml @@ -39,6 +39,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_InstanceSettings.yaml b/mmv1/products/compute/go_InstanceSettings.yaml index 4dcf97880ec1..3069656e3b43 100644 --- a/mmv1/products/compute/go_InstanceSettings.yaml +++ b/mmv1/products/compute/go_InstanceSettings.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Interconnect.yaml b/mmv1/products/compute/go_Interconnect.yaml index 0091df5f4c47..8655a38959cd 100644 --- a/mmv1/products/compute/go_Interconnect.yaml +++ b/mmv1/products/compute/go_Interconnect.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_InterconnectAttachment.yaml b/mmv1/products/compute/go_InterconnectAttachment.yaml index f422c5b8fbb3..8ec6cc6738ee 100644 --- a/mmv1/products/compute/go_InterconnectAttachment.yaml +++ b/mmv1/products/compute/go_InterconnectAttachment.yaml @@ -27,6 +27,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_MachineImage.yaml b/mmv1/products/compute/go_MachineImage.yaml index 80ab7f070583..5f9a605e6787 100644 --- a/mmv1/products/compute/go_MachineImage.yaml +++ b/mmv1/products/compute/go_MachineImage.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' @@ -50,6 +51,7 @@ iam_policy: allowed_iam_role: 'roles/compute.admin' parent_resource_attribute: 'machine_image' iam_conditions_request_type: 'QUERY_PARAM' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: examples: - name: 'machine_image_basic' diff --git a/mmv1/products/compute/go_ManagedSslCertificate.yaml b/mmv1/products/compute/go_ManagedSslCertificate.yaml index c9d0d3457913..a83f40296587 100644 --- a/mmv1/products/compute/go_ManagedSslCertificate.yaml +++ b/mmv1/products/compute/go_ManagedSslCertificate.yaml @@ -48,6 +48,7 @@ timeouts: update_minutes: 30 delete_minutes: 30 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Network.yaml b/mmv1/products/compute/go_Network.yaml index 9a8704c9fa3d..cec97ad33d59 100644 --- a/mmv1/products/compute/go_Network.yaml +++ b/mmv1/products/compute/go_Network.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkAttachment.yaml b/mmv1/products/compute/go_NetworkAttachment.yaml index 0750d9be5a76..4d5d9677a147 100644 --- a/mmv1/products/compute/go_NetworkAttachment.yaml +++ b/mmv1/products/compute/go_NetworkAttachment.yaml @@ -29,6 +29,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml index 26634af91311..2a79092c8698 100644 --- a/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml +++ b/mmv1/products/compute/go_NetworkEdgeSecurityService.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkEndpoint.yaml b/mmv1/products/compute/go_NetworkEndpoint.yaml index 626d38e5138d..abe4429c67f6 100644 --- a/mmv1/products/compute/go_NetworkEndpoint.yaml +++ b/mmv1/products/compute/go_NetworkEndpoint.yaml @@ -44,6 +44,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkEndpointGroup.yaml b/mmv1/products/compute/go_NetworkEndpointGroup.yaml index e4d0610c43c1..c5d3c85558e8 100644 --- a/mmv1/products/compute/go_NetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_NetworkEndpointGroup.yaml @@ -43,6 +43,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkEndpoints.yaml b/mmv1/products/compute/go_NetworkEndpoints.yaml index 89fc3591b842..7242e983788f 100644 --- a/mmv1/products/compute/go_NetworkEndpoints.yaml +++ b/mmv1/products/compute/go_NetworkEndpoints.yaml @@ -50,6 +50,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml index 6cb137323ffd..6324ccdf3966 100644 --- a/mmv1/products/compute/go_NetworkFirewallPolicy.yaml +++ b/mmv1/products/compute/go_NetworkFirewallPolicy.yaml @@ -25,6 +25,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml index ff78cb57b720..bfc331e0f42d 100644 --- a/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml +++ b/mmv1/products/compute/go_NetworkPeeringRoutesConfig.yaml @@ -40,6 +40,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NodeGroup.yaml b/mmv1/products/compute/go_NodeGroup.yaml index afef7468796c..a09bb63d9657 100644 --- a/mmv1/products/compute/go_NodeGroup.yaml +++ b/mmv1/products/compute/go_NodeGroup.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_NodeTemplate.yaml b/mmv1/products/compute/go_NodeTemplate.yaml index 2b09bff673b5..ae6d1d82a6ad 100644 --- a/mmv1/products/compute/go_NodeTemplate.yaml +++ b/mmv1/products/compute/go_NodeTemplate.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_PacketMirroring.yaml b/mmv1/products/compute/go_PacketMirroring.yaml index 065c5979be0c..99ff103ff7f7 100644 --- a/mmv1/products/compute/go_PacketMirroring.yaml +++ b/mmv1/products/compute/go_PacketMirroring.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_PerInstanceConfig.yaml b/mmv1/products/compute/go_PerInstanceConfig.yaml index 7134e2e74f1c..03858858a4e9 100644 --- a/mmv1/products/compute/go_PerInstanceConfig.yaml +++ b/mmv1/products/compute/go_PerInstanceConfig.yaml @@ -37,6 +37,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml index 049a8a4ffbe2..9b086511eed3 100644 --- a/mmv1/products/compute/go_ProjectCloudArmorTier.yaml +++ b/mmv1/products/compute/go_ProjectCloudArmorTier.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml index 309ca0401355..7d4440834824 100644 --- a/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml +++ b/mmv1/products/compute/go_PublicAdvertisedPrefix.yaml @@ -29,6 +29,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml index b9f6c6929e4b..5144500c2ac1 100644 --- a/mmv1/products/compute/go_PublicDelegatedPrefix.yaml +++ b/mmv1/products/compute/go_PublicDelegatedPrefix.yaml @@ -29,6 +29,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionAutoscaler.yaml b/mmv1/products/compute/go_RegionAutoscaler.yaml index 43c28318b020..b91645cf641a 100644 --- a/mmv1/products/compute/go_RegionAutoscaler.yaml +++ b/mmv1/products/compute/go_RegionAutoscaler.yaml @@ -34,6 +34,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionBackendService.yaml b/mmv1/products/compute/go_RegionBackendService.yaml index f01a23bc1d77..f8b2e6a04482 100644 --- a/mmv1/products/compute/go_RegionBackendService.yaml +++ b/mmv1/products/compute/go_RegionBackendService.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' @@ -47,6 +48,7 @@ iam_policy: allowed_iam_role: 'roles/compute.admin' parent_resource_attribute: 'name' iam_conditions_request_type: 'QUERY_PARAM' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' min_version: 'beta' custom_code: constants: 'templates/terraform/constants/go/region_backend_service.go.tmpl' diff --git a/mmv1/products/compute/go_RegionCommitment.yaml b/mmv1/products/compute/go_RegionCommitment.yaml index f40c115711cd..f01b2ff61484 100644 --- a/mmv1/products/compute/go_RegionCommitment.yaml +++ b/mmv1/products/compute/go_RegionCommitment.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionDisk.yaml b/mmv1/products/compute/go_RegionDisk.yaml index 15c02435d62c..835fe3365a64 100644 --- a/mmv1/products/compute/go_RegionDisk.yaml +++ b/mmv1/products/compute/go_RegionDisk.yaml @@ -44,6 +44,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' @@ -60,6 +61,7 @@ collection_url_key: 'items' iam_policy: parent_resource_attribute: 'name' base_url: 'projects/{{project}}/regions/{{region}}/disks/{{name}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' import_format: - 'projects/{{project}}/regions/{{region}}/disks/{{name}}' - '{{name}}' diff --git a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml index c658bb931ec6..1c41e05ebb19 100644 --- a/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml +++ b/mmv1/products/compute/go_RegionDiskResourcePolicyAttachment.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionHealthCheck.yaml b/mmv1/products/compute/go_RegionHealthCheck.yaml index 061556ba80b1..62dcea119fdc 100644 --- a/mmv1/products/compute/go_RegionHealthCheck.yaml +++ b/mmv1/products/compute/go_RegionHealthCheck.yaml @@ -39,6 +39,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml index cfc36bb029fb..46c46ed67bca 100644 --- a/mmv1/products/compute/go_RegionInstanceGroupManager.yaml +++ b/mmv1/products/compute/go_RegionInstanceGroupManager.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml index b74ab976f8f6..347a086a82bb 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpoint.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpoint.yaml @@ -40,6 +40,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml index 57d6aa26deda..6a57b76776d1 100644 --- a/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml +++ b/mmv1/products/compute/go_RegionNetworkEndpointGroup.yaml @@ -36,6 +36,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml index 75b0f374ec67..e172f020417b 100644 --- a/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml +++ b/mmv1/products/compute/go_RegionNetworkFirewallPolicy.yaml @@ -25,6 +25,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml index c8c275451f3f..06eb6b903b0d 100644 --- a/mmv1/products/compute/go_RegionPerInstanceConfig.yaml +++ b/mmv1/products/compute/go_RegionPerInstanceConfig.yaml @@ -38,6 +38,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionSecurityPolicy.yaml b/mmv1/products/compute/go_RegionSecurityPolicy.yaml index 1792c4d2eaeb..f6473c18f264 100644 --- a/mmv1/products/compute/go_RegionSecurityPolicy.yaml +++ b/mmv1/products/compute/go_RegionSecurityPolicy.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml index 87e43e49bed5..370fb57bf869 100644 --- a/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_RegionSecurityPolicyRule.yaml @@ -38,6 +38,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionSslCertificate.yaml b/mmv1/products/compute/go_RegionSslCertificate.yaml index 0d16bbc8dd59..f3c5a78db137 100644 --- a/mmv1/products/compute/go_RegionSslCertificate.yaml +++ b/mmv1/products/compute/go_RegionSslCertificate.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionSslPolicy.yaml b/mmv1/products/compute/go_RegionSslPolicy.yaml index c19fa6181e55..25f9dabca134 100644 --- a/mmv1/products/compute/go_RegionSslPolicy.yaml +++ b/mmv1/products/compute/go_RegionSslPolicy.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml index 4bf7dd643dbe..e82dbaa539fe 100644 --- a/mmv1/products/compute/go_RegionTargetHttpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetHttpProxy.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml index f54c29bae5f7..f3e5b50a1ee3 100644 --- a/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetHttpsProxy.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml index f1c24ee41313..b787d8fd3ae8 100644 --- a/mmv1/products/compute/go_RegionTargetTcpProxy.yaml +++ b/mmv1/products/compute/go_RegionTargetTcpProxy.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RegionUrlMap.yaml b/mmv1/products/compute/go_RegionUrlMap.yaml index 0094adf584ad..afa052ed8e67 100644 --- a/mmv1/products/compute/go_RegionUrlMap.yaml +++ b/mmv1/products/compute/go_RegionUrlMap.yaml @@ -26,6 +26,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Reservation.yaml b/mmv1/products/compute/go_Reservation.yaml index 1ae6cf037ade..48a4f45d81da 100644 --- a/mmv1/products/compute/go_Reservation.yaml +++ b/mmv1/products/compute/go_Reservation.yaml @@ -38,6 +38,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml index cec8edde5c7e..c0cd3c3bee7c 100644 --- a/mmv1/products/compute/go_ResourcePolicy.yaml +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -29,6 +29,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Route.yaml b/mmv1/products/compute/go_Route.yaml index 5d6cd4498bf3..af854f00bb37 100644 --- a/mmv1/products/compute/go_Route.yaml +++ b/mmv1/products/compute/go_Route.yaml @@ -58,6 +58,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Router.yaml b/mmv1/products/compute/go_Router.yaml index f0ab6e85ebef..530a6a8142c9 100644 --- a/mmv1/products/compute/go_Router.yaml +++ b/mmv1/products/compute/go_Router.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_RouterNat.yaml b/mmv1/products/compute/go_RouterNat.yaml index d53602cf5d13..bd91cff799c6 100644 --- a/mmv1/products/compute/go_RouterNat.yaml +++ b/mmv1/products/compute/go_RouterNat.yaml @@ -36,6 +36,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{regions}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_SecurityPolicyRule.yaml b/mmv1/products/compute/go_SecurityPolicyRule.yaml index dac5567f837d..38faecb63b84 100644 --- a/mmv1/products/compute/go_SecurityPolicyRule.yaml +++ b/mmv1/products/compute/go_SecurityPolicyRule.yaml @@ -37,6 +37,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_ServiceAttachment.yaml b/mmv1/products/compute/go_ServiceAttachment.yaml index 6c7d3fc345de..f716511ba81b 100644 --- a/mmv1/products/compute/go_ServiceAttachment.yaml +++ b/mmv1/products/compute/go_ServiceAttachment.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Snapshot.yaml b/mmv1/products/compute/go_Snapshot.yaml index 7f94a8ef9064..777187208741 100644 --- a/mmv1/products/compute/go_Snapshot.yaml +++ b/mmv1/products/compute/go_Snapshot.yaml @@ -45,6 +45,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: full_url: 'selfLink' @@ -60,6 +61,7 @@ async: collection_url_key: 'items' iam_policy: parent_resource_attribute: 'name' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' import_format: - 'projects/{{project}}/global/snapshots/{{name}}' - '{{name}}' diff --git a/mmv1/products/compute/go_SslCertificate.yaml b/mmv1/products/compute/go_SslCertificate.yaml index d57ea5e11265..0c12f39c56b0 100644 --- a/mmv1/products/compute/go_SslCertificate.yaml +++ b/mmv1/products/compute/go_SslCertificate.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_SslPolicy.yaml b/mmv1/products/compute/go_SslPolicy.yaml index 9641c0832a70..fedccef00273 100644 --- a/mmv1/products/compute/go_SslPolicy.yaml +++ b/mmv1/products/compute/go_SslPolicy.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_Subnetwork.yaml b/mmv1/products/compute/go_Subnetwork.yaml index b1e6108aed1b..288bb3b3429e 100644 --- a/mmv1/products/compute/go_Subnetwork.yaml +++ b/mmv1/products/compute/go_Subnetwork.yaml @@ -52,6 +52,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' @@ -69,6 +70,7 @@ iam_policy: allowed_iam_role: 'roles/compute.networkUser' parent_resource_attribute: 'subnetwork' iam_conditions_request_type: 'QUERY_PARAM' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' custom_code: extra_schema_entry: 'templates/terraform/extra_schema_entry/go/subnetwork.tmpl' constants: 'templates/terraform/constants/go/subnetwork.tmpl' diff --git a/mmv1/products/compute/go_TargetGrpcProxy.yaml b/mmv1/products/compute/go_TargetGrpcProxy.yaml index 06078ff914ec..74b096d7d4b5 100644 --- a/mmv1/products/compute/go_TargetGrpcProxy.yaml +++ b/mmv1/products/compute/go_TargetGrpcProxy.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_TargetHttpProxy.yaml b/mmv1/products/compute/go_TargetHttpProxy.yaml index 401f81e16f6b..aceddd3b42da 100644 --- a/mmv1/products/compute/go_TargetHttpProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpProxy.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_TargetHttpsProxy.yaml b/mmv1/products/compute/go_TargetHttpsProxy.yaml index 296503b25d2b..f37b0918cc8e 100644 --- a/mmv1/products/compute/go_TargetHttpsProxy.yaml +++ b/mmv1/products/compute/go_TargetHttpsProxy.yaml @@ -31,6 +31,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_TargetInstance.yaml b/mmv1/products/compute/go_TargetInstance.yaml index c61038f4b112..3f9b985e344b 100644 --- a/mmv1/products/compute/go_TargetInstance.yaml +++ b/mmv1/products/compute/go_TargetInstance.yaml @@ -35,6 +35,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/zones/{{zone}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_TargetSslProxy.yaml b/mmv1/products/compute/go_TargetSslProxy.yaml index e5dba61992b5..f77aac55132b 100644 --- a/mmv1/products/compute/go_TargetSslProxy.yaml +++ b/mmv1/products/compute/go_TargetSslProxy.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_TargetTcpProxy.yaml b/mmv1/products/compute/go_TargetTcpProxy.yaml index bc8d89f4d45c..e135ab5eed35 100644 --- a/mmv1/products/compute/go_TargetTcpProxy.yaml +++ b/mmv1/products/compute/go_TargetTcpProxy.yaml @@ -32,6 +32,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_UrlMap.yaml b/mmv1/products/compute/go_UrlMap.yaml index 1890eafba34a..c6febafa9843 100644 --- a/mmv1/products/compute/go_UrlMap.yaml +++ b/mmv1/products/compute/go_UrlMap.yaml @@ -29,6 +29,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/global/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_VpnGateway.yaml b/mmv1/products/compute/go_VpnGateway.yaml index 07b33e5b2f78..54202d714178 100644 --- a/mmv1/products/compute/go_VpnGateway.yaml +++ b/mmv1/products/compute/go_VpnGateway.yaml @@ -33,6 +33,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/products/compute/go_VpnTunnel.yaml b/mmv1/products/compute/go_VpnTunnel.yaml index 52b206de98c9..366e8934e306 100644 --- a/mmv1/products/compute/go_VpnTunnel.yaml +++ b/mmv1/products/compute/go_VpnTunnel.yaml @@ -30,6 +30,7 @@ timeouts: update_minutes: 20 delete_minutes: 20 async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: 'projects/{{project}}/regions/{{region}}/operations/{{op_id}}' diff --git a/mmv1/templates/terraform/constants/go/bigquery_job.go.tmpl b/mmv1/templates/terraform/constants/go/bigquery_job.go.tmpl new file mode 100644 index 000000000000..e937c836021a --- /dev/null +++ b/mmv1/templates/terraform/constants/go/bigquery_job.go.tmpl @@ -0,0 +1,4 @@ +var ( + bigqueryDatasetRegexp = regexp.MustCompile("projects/(.+)/datasets/(.+)") + bigqueryTableRegexp = regexp.MustCompile("projects/(.+)/datasets/(.+)/tables/(.+)") +) \ No newline at end of file diff --git a/mmv1/templates/terraform/constants/go/cloudfunctions2_function.go.tmpl b/mmv1/templates/terraform/constants/go/cloudfunctions2_function.go.tmpl new file mode 100644 index 000000000000..c447509eff60 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/cloudfunctions2_function.go.tmpl @@ -0,0 +1,14 @@ +// Suppress diffs for the system environment variables +func environmentVariablesDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + if k == "service_config.0.environment_variables.LOG_EXECUTION_ID" && new == "" { + return true + } + + // Let diff be determined by environment_variables (above) + if strings.HasPrefix(k, "service_config.0.environment_variables.%") { + return true + } + + // For other keys, don't suppress diff. + return false +} diff --git a/mmv1/templates/terraform/constants/go/org_policy_policy.go.tmpl b/mmv1/templates/terraform/constants/go/org_policy_policy.go.tmpl new file mode 100644 index 000000000000..e6bd256ca587 --- /dev/null +++ b/mmv1/templates/terraform/constants/go/org_policy_policy.go.tmpl @@ -0,0 +1,5 @@ +func resourceOrgpolicyPolicyRulesConditionExpressionDiffSuppress(_, old, new string, d *schema.ResourceData) bool { + oldReplaced := strings.ReplaceAll(strings.ReplaceAll(old, "Labels", "TagId"), "label", "tag") + newReplaced := strings.ReplaceAll(strings.ReplaceAll(new, "Labels", "TagId"), "label", "tag") + return oldReplaced == newReplaced +} diff --git a/mmv1/templates/terraform/custom_expand/go/enum_bool.go.tmpl b/mmv1/templates/terraform/custom_expand/go/enum_bool.go.tmpl new file mode 100644 index 000000000000..0c5d3853da14 --- /dev/null +++ b/mmv1/templates/terraform/custom_expand/go/enum_bool.go.tmpl @@ -0,0 +1,23 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func expand{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + if v == nil { + return nil, nil + } + + b, err := strconv.ParseBool(v.(string)) + if err != nil { + return nil, nil + } + return b, nil +} diff --git a/mmv1/templates/terraform/custom_flatten/go/enum_bool.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/enum_bool.go.tmpl new file mode 100644 index 000000000000..4a47f1d6b392 --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/enum_bool.go.tmpl @@ -0,0 +1,6 @@ +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + if v == nil { + return "" + } + return strings.ToUpper(strconv.FormatBool(v.(bool))) +} diff --git a/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_view_ignore_project_number.go.tmpl b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_view_ignore_project_number.go.tmpl new file mode 100644 index 000000000000..8f68845fe2ea --- /dev/null +++ b/mmv1/templates/terraform/custom_flatten/go/vertex_ai_feature_view_ignore_project_number.go.tmpl @@ -0,0 +1,15 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +func flatten{{$.GetPrefix}}{{$.TitlelizeProperty}}(v interface{}, d *schema.ResourceData, config *transport_tpg.Config) interface{} { + return d.Get("feature_registry_source.0.project_number") +} diff --git a/mmv1/templates/terraform/custom_update/go/vertex_ai_index.go.tmpl b/mmv1/templates/terraform/custom_update/go/vertex_ai_index.go.tmpl new file mode 100644 index 000000000000..047f88f50aa8 --- /dev/null +++ b/mmv1/templates/terraform/custom_update/go/vertex_ai_index.go.tmpl @@ -0,0 +1,143 @@ +userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) +if err != nil { + return err +} + +billingProject := "" + +project, err := tpgresource.GetProject(d, config) +if err != nil { + return fmt.Errorf("Error fetching project for Index: %s", err) +} +billingProject = project + +obj := make(map[string]interface{}) +displayNameProp, err := expandVertexAIIndexDisplayName(d.Get("display_name"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("display_name"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp +} +descriptionProp, err := expandVertexAIIndexDescription(d.Get("description"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("description"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp +} +metadataProp, err := expandVertexAIIndexMetadata(d.Get("metadata"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("metadata"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, metadataProp)) { + obj["metadata"] = metadataProp +} +labelsProp, err := expandVertexAIIndexEffectiveLabels(d.Get("effective_labels"), d, config) +if err != nil { + return err +} else if v, ok := d.GetOkExists("effective_labels"); !tpgresource.IsEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp +} + +url, err := tpgresource.ReplaceVars(d, config, "{{"{{"}}VertexAIBasePath{{"}}"}}projects/{{"{{"}}project{{"}}"}}/locations/{{"{{"}}region{{"}}"}}/indexes/{{"{{"}}name{{"}}"}}") +if err != nil { + return err +} + +log.Printf("[DEBUG] Updating Index %q: %#v", d.Id(), obj) +headers := make(http.Header) +updateMask := []string{} + +if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") +} + +if d.HasChange("description") { + updateMask = append(updateMask, "description") +} + +if d.HasChange("effective_labels") { + updateMask = append(updateMask, "labels") +} + +// err == nil indicates that the billing_project value was found +if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp +} + +// if updateMask is empty we are not updating anything so skip the post +if len(updateMask) > 0 { + log.Printf("[DEBUG] Updating first Index with updateMask: %#v", updateMask) + // updateMask is a URL parameter but not present in the schema, so ReplaceVars + // won't set it + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error updating first Index %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating first Index %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Index", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } +} + +secondUpdateMask := []string{} +// 'If `contents_delta_gcs_uri` is set as part of `index.metadata`, +// then no other Index fields can be also updated as part of the same update call.' +// Metadata update need to be done in a separate update call. +if d.HasChange("metadata") { + secondUpdateMask = append(secondUpdateMask, "metadata") +} + +// if secondUpdateMask is empty we are not updating anything so skip the post +if len(secondUpdateMask) > 0 { + log.Printf("[DEBUG] Updating second Index with updateMask: %#v", secondUpdateMask) + // Override updateMask with secondUpdateMask + url, err = transport_tpg.AddQueryParams(url, map[string]string{"updateMask": strings.Join(secondUpdateMask, ",")}) + if err != nil { + return err + } + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "PATCH", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutUpdate), + Headers: headers, + }) + + if err != nil { + return fmt.Errorf("Error Updating second Index %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished Updating second Index %q: %#v", d.Id(), res) + } + + err = VertexAIOperationWaitTime( + config, res, project, "Updating Index", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } +} + +return resourceVertexAIIndexRead(d, meta) diff --git a/mmv1/templates/terraform/encoders/go/org_policy_policy.go.tmpl b/mmv1/templates/terraform/encoders/go/org_policy_policy.go.tmpl new file mode 100644 index 000000000000..39137b874958 --- /dev/null +++ b/mmv1/templates/terraform/encoders/go/org_policy_policy.go.tmpl @@ -0,0 +1,15 @@ +{{/* + The license inside this block applies to this file + Copyright 2024 Google Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ -}} +name := d.Get("name").(string) +d.Set("name", tpgresource.GetResourceNameFromSelfLink(name)) +return obj, nil diff --git a/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl index 5cabd103b4cd..4a82a492c4cd 100644 --- a/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/compute_packet_mirroring_full.tf.tmpl @@ -54,7 +54,6 @@ resource "google_compute_forwarding_rule" "default" { network_tier = "PREMIUM" } -# [START compute_vm_packet_mirror] resource "google_compute_packet_mirroring" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mirroring_name"}}" description = "bar" @@ -76,4 +75,3 @@ resource "google_compute_packet_mirroring" "{{$.PrimaryResourceId}}" { direction = "BOTH" } } -# [END compute_vm_packet_mirror] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl b/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl index 802269430c8c..c98518869326 100644 --- a/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/compute_reservation.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_reservation_create_local_reservation] resource "google_compute_reservation" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "gce_reservation_local"}}" @@ -17,4 +16,3 @@ resource "google_compute_reservation" "{{$.PrimaryResourceId}}" { } } -# [END compute_reservation_create_local_reservation] diff --git a/mmv1/templates/terraform/examples/go/dlp_job_trigger_timespan_config_big_query.tf.tmpl b/mmv1/templates/terraform/examples/go/dlp_job_trigger_timespan_config_big_query.tf.tmpl new file mode 100644 index 000000000000..b9e522cd2d52 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/dlp_job_trigger_timespan_config_big_query.tf.tmpl @@ -0,0 +1,43 @@ +resource "google_data_loss_prevention_job_trigger" "{{$.PrimaryResourceId}}" { + parent = "projects/{{index $.TestEnvVars "project"}}" + description = "BigQuery DLP Job Trigger with timespan config and row limit" + display_name = "bigquery-dlp-job-trigger-limit-timespan" + + triggers { + schedule { + recurrence_period_duration ="86400s" + } + } + + inspect_job { + inspect_template_name = "projects/test/locations/global/inspectTemplates/6425492983381733900" + storage_config { + big_query_options { + table_reference { + project_id = "project" + dataset_id = "dataset" + table_id = "table" + } + sample_method = "" + } + + timespan_config { + start_time = "2023-01-01T00:00:23Z" + timestamp_field { + name = "timestamp" + } + } + } + + actions { + save_findings { + output_config { + table { + project_id = "project" + dataset_id = "output" + } + } + } + } +} +} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl index 7f7a458ed13a..59fd73af0a75 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_basic] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "example-zone" dns_name = "example-${random_id.rnd.hex}.com." @@ -11,4 +10,3 @@ resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { resource "random_id" "rnd" { byte_length = 4 } -# [END dns_managed_zone_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl index 5c3a1784d3aa..2f48709d0e95 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -28,4 +27,3 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_private] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl index a8667c1d827d..0194bfb8f74f 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_forwarding.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_forwarding] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -37,4 +36,3 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_private_forwarding] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl index bb062e78c832..6a4a3cc8d941 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_gke.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_gke] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "private.example.com." @@ -67,4 +66,3 @@ resource "google_container_cluster" "cluster-1" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END dns_managed_zone_private_gke] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl index f8515d19c5b8..891a4512eadd 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_private_peering.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_private_peering] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "zone_name"}}" dns_name = "peering.example.com." @@ -28,4 +27,3 @@ resource "google_compute_network" "network-target" { name = "{{index $.Vars "network_target_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_private_peering] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl index 6d0a5f6bebc2..580fe7096c3b 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_quickstart.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_domain_tutorial] # to setup a web-server resource "google_compute_instance" "default" { name = "{{index $.Vars "dns_compute_instance"}}" @@ -53,4 +52,3 @@ resource "google_dns_record_set" "default" { google_compute_instance.default.network_interface.0.access_config.0.nat_ip ] } -# [END dns_domain_tutorial] diff --git a/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl index 5fc030a212d7..916242554cd1 100644 --- a/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_managed_zone_service_directory.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_managed_zone_service_directory] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { provider = google-beta @@ -28,4 +27,3 @@ resource "google_compute_network" "network" { name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = false } -# [END dns_managed_zone_service_directory] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl index 7129c4992bfe..86ceb05b04d9 100644 --- a/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_policy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_policy_basic] resource "google_dns_policy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "policy_name"}}" enable_inbound_forwarding = true @@ -32,4 +31,3 @@ resource "google_compute_network" "network-2" { name = "{{index $.Vars "network_2_name"}}" auto_create_subnetworks = false } -# [END dns_policy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl index 57e48b8efff9..4ac1dc34847f 100644 --- a/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_record_set_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_record_set_basic] resource "google_dns_managed_zone" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sample_zone"}}" dns_name = "{{index $.Vars "sample_zone"}}.hashicorptest.com." @@ -12,4 +11,3 @@ resource "google_dns_record_set" "default" { rrdatas = ["10.0.0.1", "10.1.0.1"] ttl = 86400 } -# [END dns_record_set_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl index ac7e4c75ef2f..ab8da5ec4b1c 100644 --- a/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_response_policy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_response_policy_basic] resource "google_compute_network" "network-1" { name = "{{index $.Vars "network_1_name"}}" auto_create_subnetworks = false @@ -69,4 +68,3 @@ resource "google_dns_response_policy" "{{$.PrimaryResourceId}}" { gke_cluster_name = google_container_cluster.cluster-1.id } } -# [END dns_response_policy_basic] diff --git a/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl index c187913e052c..7e8e38c88372 100644 --- a/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/dns_response_policy_rule_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START dns_response_policy_rule_basic] resource "google_compute_network" "network-1" { name = "{{index $.Vars "network_1_name"}}" auto_create_subnetworks = false @@ -35,4 +34,3 @@ resource "google_dns_response_policy_rule" "{{$.PrimaryResourceId}}" { } } -# [END dns_response_policy_rule_basic] diff --git a/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl b/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl index 1e8e402edba8..d0abe1648d10 100644 --- a/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_cdn_lb_with_backend_bucket.tf.tmpl @@ -1,6 +1,5 @@ # CDN load balancer with Cloud bucket as backend -# [START cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] # Cloud Storage bucket resource "google_storage_bucket" "default" { name = "{{index $.Vars "my_bucket"}}" @@ -16,18 +15,14 @@ resource "google_storage_bucket" "default" { } } -# [END cloudloadbalancing_cdn_with_backend_bucket_cloud_storage_bucket] -# [START cloudloadbalancing_cdn_with_backend_bucket_make_public] # make bucket public resource "google_storage_bucket_iam_member" "default" { bucket = google_storage_bucket.default.name role = "roles/storage.objectViewer" member = "allUsers" } -# [END cloudloadbalancing_cdn_with_backend_bucket_make_public] -# [START cloudloadbalancing_cdn_with_backend_bucket_index_page] resource "google_storage_bucket_object" "index_page" { name = "{{index $.Vars "index_page"}}" bucket = google_storage_bucket.default.name @@ -37,9 +32,7 @@ resource "google_storage_bucket_object" "index_page" { EOT } -# [END cloudloadbalancing_cdn_with_backend_bucket_index_page] -# [START cloudloadbalancing_cdn_with_backend_bucket_error_page] resource "google_storage_bucket_object" "error_page" { name = "{{index $.Vars "404_page"}}" bucket = google_storage_bucket.default.name @@ -49,9 +42,7 @@ resource "google_storage_bucket_object" "error_page" { EOT } -# [END cloudloadbalancing_cdn_with_backend_bucket_error_page] -# [START cloudloadbalancing_cdn_with_backend_bucket_image] # image object for testing, try to access http:///test.jpg resource "google_storage_bucket_object" "test_image" { name = "{{index $.Vars "test_object"}}" @@ -65,16 +56,12 @@ resource "google_storage_bucket_object" "test_image" { bucket = google_storage_bucket.default.name } -# [END cloudloadbalancing_cdn_with_backend_bucket_image] -# [START cloudloadbalancing_cdn_with_backend_bucket_ip_address] # reserve IP address resource "google_compute_global_address" "default" { name = "{{index $.Vars "example_ip"}}" } -# [END cloudloadbalancing_cdn_with_backend_bucket_ip_address] -# [START cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] # forwarding rule resource "google_compute_global_forwarding_rule" "default" { name = "{{index $.Vars "http_lb_forwarding_rule"}}" @@ -84,25 +71,19 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_forwarding_rule] -# [START cloudloadbalancing_cdn_with_backend_bucket_http_proxy] # http proxy resource "google_compute_target_http_proxy" "default" { name = "{{index $.Vars "http_lb_proxy"}}" url_map = google_compute_url_map.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_http_proxy] -# [START cloudloadbalancing_cdn_with_backend_bucket_url_map] # url map resource "google_compute_url_map" "default" { name = "{{index $.Vars "http_lb"}}" default_service = google_compute_backend_bucket.default.id } -# [END cloudloadbalancing_cdn_with_backend_bucket_url_map] -# [START cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] # backend bucket with CDN policy with default ttl settings resource "google_compute_backend_bucket" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "cat_backend_bucket"}}" @@ -118,4 +99,3 @@ resource "google_compute_backend_bucket" "{{$.PrimaryResourceId}}" { serve_while_stale = 86400 } } -# [END cloudloadbalancing_cdn_with_backend_bucket_backend_bucket] diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl index 3e3c2ea58296..3e0de516a7e5 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # External HTTP load balancer with an CDN-enabled managed instance group backend -# [START cloudloadbalancing_ext_http_gce_instance_template] resource "google_compute_instance_template" "default" { name = "{{index $.Vars "lb_backend_template"}}" disk { @@ -37,9 +36,7 @@ resource "google_compute_instance_template" "default" { } tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_http_gce_instance_template] -# [START cloudloadbalancing_ext_http_gce_instance_mig] resource "google_compute_instance_group_manager" "default" { name = "{{index $.Vars "lb_backend_example"}}" zone = "us-east1-b" @@ -54,10 +51,8 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } -# [END cloudloadbalancing_ext_http_gce_instance_mig] -# [START cloudloadbalancing_ext_http_gce_instance_firewall_rule] resource "google_compute_firewall" "default" { name = "{{index $.Vars "fw_allow_health_check"}}" direction = "INGRESS" @@ -70,16 +65,12 @@ resource "google_compute_firewall" "default" { protocol = "tcp" } } -# [END cloudloadbalancing_ext_http_gce_instance_firewall_rule] -# [START cloudloadbalancing_ext_http_gce_instance_ip_address] resource "google_compute_global_address" "default" { name = "{{index $.Vars "lb_ipv4_1"}}" ip_version = "IPV4" } -# [END cloudloadbalancing_ext_http_gce_instance_ip_address] -# [START cloudloadbalancing_ext_http_gce_instance_health_check] resource "google_compute_health_check" "default" { name = "{{index $.Vars "http_basic_check"}}" check_interval_sec = 5 @@ -93,9 +84,7 @@ resource "google_compute_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } -# [END cloudloadbalancing_ext_http_gce_instance_health_check] -# [START cloudloadbalancing_ext_http_gce_instance_backend_service] resource "google_compute_backend_service" "default" { name = "{{index $.Vars "web_backend_service"}}" connection_draining_timeout_sec = 0 @@ -111,23 +100,17 @@ resource "google_compute_backend_service" "default" { capacity_scaler = 1.0 } } -# [END cloudloadbalancing_ext_http_gce_instance_backend_service] -# [START cloudloadbalancing_ext_http_gce_instance_url_map] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "web_map_http"}}" default_service = google_compute_backend_service.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_url_map] -# [START cloudloadbalancing_ext_http_gce_instance_target_http_proxy] resource "google_compute_target_http_proxy" "default" { name = "{{index $.Vars "http_lb_proxy"}}" url_map = google_compute_url_map.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_target_http_proxy] -# [START cloudloadbalancing_ext_http_gce_instance_forwarding_rule] resource "google_compute_global_forwarding_rule" "default" { name = "{{index $.Vars "http_content_rule"}}" ip_protocol = "TCP" @@ -136,4 +119,3 @@ resource "google_compute_global_forwarding_rule" "default" { target = google_compute_target_http_proxy.default.id ip_address = google_compute_global_address.default.id } -# [END cloudloadbalancing_ext_http_gce_instance_forwarding_rule] diff --git a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl index 488cb2603138..176fbbe36422 100644 --- a/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_http_lb_mig_backend_custom_header.tf.tmpl @@ -1,7 +1,6 @@ # External HTTP load balancer with a CDN-enabled managed instance group backend # and custom request and response headers -# [START cloudloadbalancing_ext_http_gce_custom_header] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "xlb_network_name"}}" @@ -154,4 +153,3 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_http_gce_custom_header] diff --git a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl index dada3d4e2bfb..57c73c4d558f 100644 --- a/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_ssl_proxy_lb_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # External SSL proxy load balancer with managed instance group backend -# [START cloudloadbalancing_ext_ssl_proxy_lb] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "ssl_proxy_xlb_network"}}" @@ -184,5 +183,4 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_ssl_proxy_lb] diff --git a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl index 714938b64518..b1da9819a42f 100644 --- a/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_tcp_proxy_lb_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # External TCP proxy load balancer with managed instance group backend -# [START cloudloadbalancing_ext_tcp_proxy_lb] # VPC resource "google_compute_network" "default" { name = "{{index $.Vars "tcp_proxy_xlb_network"}}" @@ -142,4 +141,3 @@ resource "google_compute_firewall" "default" { } target_tags = ["allow-health-check"] } -# [END cloudloadbalancing_ext_tcp_proxy_lb] diff --git a/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl b/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl index 0eae785631a7..9eeb7bf49af5 100644 --- a/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/external_vpn_gateway.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudvpn_ha_external] resource "google_compute_ha_vpn_gateway" "ha_gateway" { region = "us-central1" name = "{{index $.Vars "ha_vpn_gateway_name"}}" @@ -100,4 +99,3 @@ resource "google_compute_router_peer" "router1_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router1_interface2.name } -# [END cloudvpn_ha_external] diff --git a/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl b/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl index 748888614305..44b17b9699ed 100644 --- a/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/firewall_with_target_tags.tf.tmpl @@ -1,4 +1,3 @@ -# [START vpc_firewall_create] resource "google_compute_firewall" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "firewall_name"}}" @@ -13,4 +12,3 @@ resource "google_compute_firewall" "{{$.PrimaryResourceId}}" { source_tags = ["foo"] target_tags = ["web"] } -# [END vpc_firewall_create] diff --git a/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl index 7419704f987f..fc23157f4b37 100644 --- a/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/flask_google_cloud_quickstart.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_flask_quickstart_vm] # Create a single Compute Engine instance resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "flask_vm"}}" @@ -26,9 +25,7 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } } -# [END compute_flask_quickstart_vm] -# [START vpc_flask_quickstart_ssh_fw] resource "google_compute_firewall" "ssh" { name = "{{index $.Vars "allow_ssh"}}" allow { @@ -41,10 +38,8 @@ resource "google_compute_firewall" "ssh" { source_ranges = ["0.0.0.0/0"] target_tags = ["ssh"] } -# [END vpc_flask_quickstart_ssh_fw] -# [START vpc_flask_quickstart_5000_fw] resource "google_compute_firewall" "flask" { name = "{{index $.Vars "flask_app_firewall"}}" network = "default" @@ -55,12 +50,10 @@ resource "google_compute_firewall" "flask" { } source_ranges = ["0.0.0.0/0"] } -# [END vpc_flask_quickstart_5000_fw] # Create new multi-region storage bucket in the US # with versioning enabled -# [START storage_bucket_tf_with_versioning] resource "google_storage_bucket" "default" { name = "{{index $.Vars "bucket_tfstate"}}" force_destroy = false @@ -70,4 +63,3 @@ resource "google_storage_bucket" "default" { enabled = true } } -# [END storage_bucket_tf_with_versioning] diff --git a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl index cc6d212cfbbc..b0f91a10dcc5 100644 --- a/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/global_internal_http_lb_with_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # Global Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "gilb_network" { name = "{{index $.Vars "gilb_network_name"}}" @@ -182,4 +181,3 @@ resource "google_compute_instance" "vm-test" { } } } -# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl b/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl index cc7826b64ce9..b048fc49571a 100644 --- a/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/ha_vpn_gateway_gcp_to_gcp.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudvpn_ha_gcp_to_gcp] resource "google_compute_ha_vpn_gateway" "{{$.PrimaryResourceId}}" { region = "us-central1" name = "{{index $.Vars "ha_vpn_gateway1_name"}}" @@ -178,4 +177,3 @@ resource "google_compute_router_peer" "router2_peer2" { advertised_route_priority = 100 interface = google_compute_router_interface.router2_interface2.name } -# [END cloudvpn_ha_gcp_to_gcp] diff --git a/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl index b67c592a6cb0..29e5e8ee0cbb 100644 --- a/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_custom_hostname.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_custom_hostname_instance_create] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "custom_hostname_instance_name"}}" @@ -7,7 +6,6 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { # Set a custom hostname below hostname = "hashicorptest.com" - boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -21,4 +19,3 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } -# [END compute_custom_hostname_instance_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl index a05e7c8ab91d..3ef1792a394a 100644 --- a/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_settings_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START instance_settings_basic] resource "google_compute_instance_settings" "{{$.PrimaryResourceId}}" { zone = "us-east7-b" @@ -9,4 +8,3 @@ resource "google_compute_instance_settings" "{{$.PrimaryResourceId}}" { } } -# [END instance_settings_basic] diff --git a/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl b/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl index a1ea2d5cbccb..5785f9546ff6 100644 --- a/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/instance_virtual_display_enabled.tf.tmpl @@ -1,13 +1,10 @@ -# [START compute_instance_virtual_display_enabled] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "instance_virtual_display"}}" machine_type = "f1-micro" zone = "us-central1-c" - # Set the below to true to enable virtual display enable_display = true - boot_disk { initialize_params { image = "debian-cloud/debian-11" @@ -21,4 +18,3 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } -# [END compute_instance_virtual_display_enabled] diff --git a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl index f0e27c29df46..dcd31d306dbc 100644 --- a/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/int_https_lb_https_redirect.tf.tmpl @@ -1,6 +1,5 @@ # Internal HTTPS load balancer with HTTP-to-HTTPS redirect -# [START cloudloadbalancing_int_https_with_redirect] # VPC network resource "google_compute_network" "default" { @@ -283,4 +282,3 @@ resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { } } } -# [END cloudloadbalancing_int_https_with_redirect] diff --git a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl index 17c4821eb155..3883a2acd688 100644 --- a/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_http_lb_with_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -188,4 +187,3 @@ resource "google_compute_instance" "vm-test" { } } } -# [END cloudloadbalancing_int_http_gce] diff --git a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl index d28ddcf47664..7205bc4ca755 100644 --- a/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/internal_tcp_udp_lb_with_mig_backend.tf.tmpl @@ -1,6 +1,5 @@ # Internal TCP/UDP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_tcp_udp_gce] # VPC resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -177,4 +176,3 @@ resource "google_compute_instance" "vm_test" { } } } -# [END cloudloadbalancing_int_tcp_udp_gce] diff --git a/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl b/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl index 622d27933c18..d621d2a084bf 100644 --- a/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_custom_firewall_enforcement_order.tf.tmpl @@ -1,8 +1,6 @@ -# [START vpc_auto_create] resource "google_compute_network" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = true network_firewall_policy_enforcement_order = "BEFORE_CLASSIC_FIREWALL" } -# [END vpc_auto_create] diff --git a/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl b/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl index 3e3dc85f6d18..0a075376d536 100644 --- a/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_custom_mtu.tf.tmpl @@ -1,8 +1,6 @@ -# [START vpc_auto_create] resource "google_compute_network" "{{$.PrimaryResourceId}}" { project = "{{index $.TestEnvVars "project"}}" name = "{{index $.Vars "network_name"}}" auto_create_subnetworks = true mtu = 1460 } -# [END vpc_auto_create] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl index 17f1a11e3825..aa9a5ce8ef3c 100644 --- a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_addresses.tf.tmpl @@ -1,4 +1,3 @@ -# [START networkmanagement_test_addresses] resource "google_network_management_connectivity_test" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "primary_resource_name"}}" source { @@ -43,4 +42,3 @@ resource "google_compute_address" "dest-addr" { address = "10.0.43.43" region = "us-central1" } -# [END networkmanagement_test_addresses] diff --git a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl index 682cbbd3ff4e..1ab2b32cea92 100644 --- a/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_management_connectivity_test_instances.tf.tmpl @@ -1,4 +1,3 @@ -# [START networkmanagement_test_instances] resource "google_network_management_connectivity_test" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "primary_resource_name"}}" source { @@ -57,4 +56,3 @@ data "google_compute_image" "debian_9" { family = "debian-11" project = "debian-cloud" } -# [END networkmanagement_test_instances] diff --git a/mmv1/templates/terraform/examples/go/network_security_address_groups_cloud_armor.tf.tmpl b/mmv1/templates/terraform/examples/go/network_security_address_groups_cloud_armor.tf.tmpl new file mode 100644 index 000000000000..f33650525d12 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/network_security_address_groups_cloud_armor.tf.tmpl @@ -0,0 +1,10 @@ +resource "google_network_security_address_group" "{{$.PrimaryResourceId}}" { + provider = google-beta + name = "{{index $.Vars "resource_name"}}" + parent = "projects/{{index $.TestEnvVars "project"}}" + location = "global" + type = "IPV4" + capacity = "100" + purpose = ["CLOUD_ARMOR"] + items = ["208.80.154.224/32"] +} diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl index fbb8cf36001d..ed012d98ccb0 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_route_extension_basic.tf.tmpl @@ -1,5 +1,4 @@ # Internal HTTP load balancer with a managed instance group backend -# [START cloudloadbalancing_int_http_gce] # VPC network resource "google_compute_network" "ilb_network" { name = "{{index $.Vars "ilb_network_name"}}" @@ -188,9 +187,7 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } -# [END cloudloadbalancing_int_http_gce] -# [START lb_route_extension] resource "google_network_services_lb_route_extension" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "lb_route_extension_name"}}" description = "my route extension" @@ -349,4 +346,3 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } -# [END lb_route_extension] diff --git a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl index 43b8c0660272..9943d494b2f1 100644 --- a/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_services_lb_traffic_extension_basic.tf.tmpl @@ -177,9 +177,7 @@ resource "google_compute_firewall" "fw_ilb_to_backends" { google_compute_firewall.fw_iap ] } -# [END cloudloadbalancing_int_http_gce] -# [START lb_traffic_extension] resource "google_network_services_lb_traffic_extension" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "lb_traffic_extension_name"}}" description = "my traffic extension" @@ -334,4 +332,3 @@ resource "google_compute_region_backend_service" "callouts_backend" { google_compute_region_backend_service.default ] } -# [END lb_traffic_extension] diff --git a/mmv1/templates/terraform/examples/go/org_policy_policy_dry_run_spec.tf.tmpl b/mmv1/templates/terraform/examples/go/org_policy_policy_dry_run_spec.tf.tmpl new file mode 100644 index 000000000000..a5a1bc790d2c --- /dev/null +++ b/mmv1/templates/terraform/examples/go/org_policy_policy_dry_run_spec.tf.tmpl @@ -0,0 +1,29 @@ +resource "google_org_policy_custom_constraint" "constraint" { + name = "custom.disableGkeAutoUpgrade%{random_suffix}" + parent = "organizations/123456789" + display_name = "Disable GKE auto upgrade" + description = "Only allow GKE NodePool resource to be created or updated if AutoUpgrade is not enabled where this custom constraint is enforced." + + action_type = "ALLOW" + condition = "resource.management.autoUpgrade == false" + method_types = ["CREATE"] + resource_types = ["container.googleapis.com/NodePool"] +} + +resource "google_org_policy_policy" "primary" { + name = "organizations/123456789/policies/${google_org_policy_custom_constraint.constraint.name}" + parent = "organizations/123456789" + + spec { + rules { + enforce = "FALSE" + } + } + dry_run_spec { + inherit_from_parent = false + reset = false + rules { + enforce = "FALSE" + } + } +} diff --git a/mmv1/templates/terraform/examples/go/org_policy_policy_enforce.tf.tmpl b/mmv1/templates/terraform/examples/go/org_policy_policy_enforce.tf.tmpl new file mode 100644 index 000000000000..fa739896cb30 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/org_policy_policy_enforce.tf.tmpl @@ -0,0 +1,16 @@ +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/iam.disableServiceAccountKeyUpload" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + enforce = "FALSE" + } + } +} + +resource "google_project" "basic" { + project_id = "id" + name = "id" + org_id = "123456789" +} diff --git a/mmv1/templates/terraform/examples/go/org_policy_policy_folder.tf.tmpl b/mmv1/templates/terraform/examples/go/org_policy_policy_folder.tf.tmpl new file mode 100644 index 000000000000..c924f1226c53 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/org_policy_policy_folder.tf.tmpl @@ -0,0 +1,17 @@ +resource "google_org_policy_policy" "primary" { + name = "${google_folder.basic.name}/policies/gcp.resourceLocations" + parent = google_folder.basic.name + + spec { + inherit_from_parent = true + + rules { + deny_all = "TRUE" + } + } +} + +resource "google_folder" "basic" { + parent = "organizations/123456789" + display_name = "folder" +} diff --git a/mmv1/templates/terraform/examples/go/org_policy_policy_organization.tf.tmpl b/mmv1/templates/terraform/examples/go/org_policy_policy_organization.tf.tmpl new file mode 100644 index 000000000000..d5d73b2ac6c7 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/org_policy_policy_organization.tf.tmpl @@ -0,0 +1,8 @@ +resource "google_org_policy_policy" "primary" { + name = "organizations/123456789/policies/gcp.detailedAuditLoggingMode" + parent = "organizations/123456789" + + spec { + reset = true + } +} diff --git a/mmv1/templates/terraform/examples/go/org_policy_policy_project.tf.tmpl b/mmv1/templates/terraform/examples/go/org_policy_policy_project.tf.tmpl new file mode 100644 index 000000000000..de076a51c3f9 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/org_policy_policy_project.tf.tmpl @@ -0,0 +1,30 @@ +resource "google_org_policy_policy" "primary" { + name = "projects/${google_project.basic.name}/policies/gcp.resourceLocations" + parent = "projects/${google_project.basic.name}" + + spec { + rules { + condition { + description = "A sample condition for the policy" + expression = "resource.matchLabels('labelKeys/123', 'labelValues/345')" + location = "sample-location.log" + title = "sample-condition" + } + + values { + allowed_values = ["projects/allowed-project"] + denied_values = ["projects/denied-project"] + } + } + + rules { + allow_all = "TRUE" + } + } +} + +resource "google_project" "basic" { + project_id = "id" + name = "id" + org_id = "123456789" +} diff --git a/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl b/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl index dc4e9fbca51e..7434666761aa 100644 --- a/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/private_service_connect_google_apis.tf.tmpl @@ -1,4 +1,3 @@ -# [START vpc_subnet_private_access] resource "google_compute_network" "network" { provider = google-beta project = "{{index $.TestEnvVars "project"}}" @@ -15,9 +14,7 @@ resource "google_compute_subnetwork" "vpc_subnetwork" { network = google_compute_network.network.id private_ip_google_access = true } -# [END vpc_subnet_private_access] -# [START compute_internal_ip_private_access] resource "google_compute_global_address" "default" { provider = google-beta project = google_compute_network.network.project @@ -27,9 +24,7 @@ resource "google_compute_global_address" "default" { network = google_compute_network.network.id address = "100.100.100.106" } -# [END compute_internal_ip_private_access] -# [START compute_forwarding_rule_private_access] resource "google_compute_global_forwarding_rule" "default" { provider = google-beta project = google_compute_network.network.project @@ -43,4 +38,3 @@ resource "google_compute_global_forwarding_rule" "default" { service_directory_region = "europe-west3" } } -# [END compute_forwarding_rule_private_access] diff --git a/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl index f09904910df4..1fce3dc083ef 100644 --- a/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_capool_all_fields.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca_pool_all_fields] resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -88,4 +87,3 @@ resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { } } } -# [END privateca_create_ca_pool_all_fields] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl index d7a4806d9119..c5f76a407a83 100644 --- a/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_capool_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca_pool] resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -11,4 +10,3 @@ resource "google_privateca_ca_pool" "{{$.PrimaryResourceId}}" { foo = "bar" } } -# [END privateca_create_ca_pool] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl index d3a96e88f282..82e718a57170 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca] resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -47,4 +46,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { algorithm = "RSA_PKCS1_4096_SHA256" } } -# [END privateca_create_ca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl index 2aae4fc5f1af..3179b700af9f 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca_byo_key] resource "google_project_service_identity" "privateca_sa" { service = "privateca.googleapis.com" } @@ -69,4 +68,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { google_kms_crypto_key_iam_member.privateca_sa_keyuser_viewer, ] } -# [END privateca_create_ca_byo_key] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl index 92deecf16d0b..6e898ab25e80 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_custom_ski.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_ca] resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { // This example assumes this pool already exists. // Pools cannot be deleted in normal test circumstances, so we depend on static pools @@ -50,4 +49,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { cloud_kms_key_version = "{{index $.Vars "kms_key_name"}}/cryptoKeyVersions/1" } } -# [END privateca_create_ca] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl index 8a6ec536f0b1..f468ba2d74b3 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_subordinate.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_subordinateca] resource "google_privateca_certificate_authority" "root-ca" { pool = "{{index $.Vars "pool_name"}}" certificate_authority_id = "{{index $.Vars "certificate_authority_id"}}-root" @@ -93,4 +92,3 @@ resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { } type = "SUBORDINATE" } -# [END privateca_create_subordinateca] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl index c7eb1742e124..fb81cda6f900 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_config.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_config] resource "google_privateca_ca_pool" "default" { location = "us-central1" @@ -99,4 +98,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { } } } -# [END privateca_create_certificate_config] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl index 74e268f42ba1..e2357245e0b8 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_csr.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_csr] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -55,4 +54,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { lifetime = "860s" pem_csr = file("test-fixtures/rsa_csr.pem") } -# [END privateca_create_certificate_csr] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl index e760da42aede..81d37cec8816 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_custom_ski.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -90,4 +89,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } -# [END privateca_create_certificate] diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl index 8e683242cda5..d09d2d1f7913 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_no_authority.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -87,4 +86,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { // need to be explicitly connected to it depends_on = [google_privateca_certificate_authority.default] } -# [END privateca_create_certificate] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl index ee36989471a2..9d18151adbf7 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_with_template.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_template] resource "google_privateca_ca_pool" "default" { location = "us-central1" name = "{{index $.Vars "ca_pool_id"}}" @@ -131,4 +130,3 @@ resource "google_privateca_certificate" "{{$.PrimaryResourceId}}" { pem_csr = file("test-fixtures/rsa_csr.pem") certificate_template = google_privateca_certificate_template.default.id } -# [END privateca_create_certificate_template] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl index fa96c3119def..9f4328102c52 100644 --- a/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_quickstart.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_quickstart] provider google{} provider tls{} @@ -94,4 +93,3 @@ resource "google_privateca_certificate" "default" { name = "{{index $.Vars "my_certificate"}}" pem_csr = tls_cert_request.example.cert_request_pem } -# [END privateca_quickstart] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl index 03909a72b673..0661224a55fa 100644 --- a/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_template_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START privateca_create_certificate_template] resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "name"}}" location = "us-central1" @@ -71,4 +70,3 @@ resource "google_privateca_certificate_template" "{{$.PrimaryResourceId}}" { label-one = "value-one" } } -# [END privateca_create_certificate_template] diff --git a/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl index dcb2d94988e1..0623a608d612 100644 --- a/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_autoscaler_basic.tf.tmpl @@ -14,7 +14,6 @@ resource "google_compute_region_autoscaler" "{{$.PrimaryResourceId}}" { } } -# [START compute_instance_template_basic] resource "google_compute_instance_template" "foobar" { name = "{{index $.Vars "instance_template_name"}}" machine_type = "e2-standard-4" @@ -46,7 +45,6 @@ resource "google_compute_instance_template" "foobar" { ] } } -# [END compute_instance_template_basic] resource "google_compute_target_pool" "foobar" { name = "{{index $.Vars "target_pool_name"}}" diff --git a/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl index fbdd40f1e5c9..7c8c732c93d1 100644 --- a/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_target_tcp_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_region_target_tcp_proxy_basic] resource "google_compute_region_target_tcp_proxy" "default" { name = "{{index $.Vars "region_target_tcp_proxy_name"}}" region = "europe-west4" @@ -20,9 +19,7 @@ resource "google_compute_region_health_check" "default" { region = "europe-west4" timeout_sec = 1 check_interval_sec = 1 - tcp_health_check { port = "80" } } -# [END cloudloadbalancing_region_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl b/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl index 0490b422924a..f313cfa20b62 100644 --- a/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/region_url_map_path_template_match.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { region = "us-central1" @@ -87,4 +86,3 @@ resource "google_compute_region_health_check" "default" { } } -# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl index d4cb8da4e16a..bd95eafd6553 100644 --- a/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/regional_external_http_load_balancer.tf.tmpl @@ -1,14 +1,10 @@ -# [START cloudloadbalancing_rllxlb_example] -# [START cloudloadbalancing_vpc_network_rllxlb_example] resource "google_compute_network" "default" { name = "{{index $.Vars "lb_network"}}" auto_create_subnetworks = false routing_mode = "REGIONAL" } -# [END cloudloadbalancing_vpc_network_rllxlb_example] -# [START cloudloadbalancing_vpc_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "default" { name = "{{index $.Vars "backend_subnet"}}" ip_cidr_range = "10.1.2.0/24" @@ -18,9 +14,7 @@ resource "google_compute_subnetwork" "default" { region = "us-west1" stack_type = "IPV4_ONLY" } -# [END cloudloadbalancing_vpc_subnetwork_rllxlb_example] -# [START cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] resource "google_compute_subnetwork" "proxy_only" { name = "{{index $.Vars "proxy_only_subnet"}}" ip_cidr_range = "10.129.0.0/23" @@ -29,9 +23,7 @@ resource "google_compute_subnetwork" "proxy_only" { region = "us-west1" role = "ACTIVE" } -# [END cloudloadbalancing_vpc_proxy_subnetwork_rllxlb_example] -# [START cloudloadbalancing_health_firewall_rllxlb_example] resource "google_compute_firewall" "default" { name = "{{index $.Vars "fw_allow_health_check"}}" allow { @@ -43,9 +35,7 @@ resource "google_compute_firewall" "default" { source_ranges = ["130.211.0.0/22", "35.191.0.0/16"] target_tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_health_firewall_rllxlb_example] -# [START cloudloadbalancing_proxy_firewall_rllxlb_example] resource "google_compute_firewall" "allow_proxy" { name = "{{index $.Vars "fw_allow_proxies"}}" allow { @@ -66,9 +56,7 @@ resource "google_compute_firewall" "allow_proxy" { source_ranges = ["10.129.0.0/23"] target_tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_proxy_firewall_rllxlb_example] -# [START cloudloadbalancing_instance_template_rllxlb_example] resource "google_compute_instance_template" "default" { name = "{{index $.Vars "l7_xlb_backend_template"}}" disk { @@ -106,9 +94,7 @@ resource "google_compute_instance_template" "default" { } tags = ["load-balanced-backend"] } -# [END cloudloadbalancing_instance_template_rllxlb_example] -# [START cloudloadbalancing_instance_group_rllxlb_example] resource "google_compute_instance_group_manager" "default" { name = "{{index $.Vars "l7_xlb_backend_example"}}" zone = "us-west1-a" @@ -123,19 +109,15 @@ resource "google_compute_instance_group_manager" "default" { base_instance_name = "vm" target_size = 2 } -# [END cloudloadbalancing_instance_group_rllxlb_example] -# [START cloudloadbalancing_ip_address_rllxlb_example] resource "google_compute_address" "default" { name = "{{index $.Vars "address_name"}}" address_type = "EXTERNAL" network_tier = "STANDARD" region = "us-west1" } -# [END cloudloadbalancing_ip_address_rllxlb_example] -# [START cloudloadbalancing_health_check_rllxlb_example] resource "google_compute_region_health_check" "default" { name = "{{index $.Vars "l7_xlb_basic_check"}}" check_interval_sec = 5 @@ -149,9 +131,7 @@ resource "google_compute_region_health_check" "default" { timeout_sec = 5 unhealthy_threshold = 2 } -# [END cloudloadbalancing_health_check_rllxlb_example] -# [START cloudloadbalancing_backend_service_rllxlb_example] resource "google_compute_region_backend_service" "default" { name = "{{index $.Vars "l7_xlb_backend_service"}}" region = "us-west1" @@ -166,25 +146,19 @@ resource "google_compute_region_backend_service" "default" { capacity_scaler = 1.0 } } -# [END cloudloadbalancing_backend_service_rllxlb_example] -# [START cloudloadbalancing_url_map_rllxlb_example] resource "google_compute_region_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "regional_l7_xlb_map"}}" region = "us-west1" default_service = google_compute_region_backend_service.default.id } -# [END cloudloadbalancing_url_map_rllxlb_example] -# [START cloudloadbalancing_target_http_proxy_rllxlb_example] resource "google_compute_region_target_http_proxy" "default" { name = "{{index $.Vars "l7_xlb_proxy"}}" region = "us-west1" url_map = google_compute_region_url_map.default.id } -# [END cloudloadbalancing_target_http_proxy_rllxlb_example] -# [START cloudloadbalancing_forwarding_rule_rllxlb_example] resource "google_compute_forwarding_rule" "default" { name = "l7-xlb-forwarding-rule" provider = google-beta @@ -199,6 +173,4 @@ resource "google_compute_forwarding_rule" "default" { ip_address = google_compute_address.default.address network_tier = "STANDARD" } -# [END cloudloadbalancing_forwarding_rule_rllxlb_example] -# [END cloudloadbalancing_rllxlb_example] diff --git a/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl index 18c695305bc0..a235283e7196 100644 --- a/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/spot_instance_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START compute_spot_instance_create] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "spot_instance_name"}}" @@ -10,7 +9,6 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { image = "debian-cloud/debian-11" } } - scheduling { preemptible = true automatic_restart = false @@ -26,4 +24,3 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { } } -# [END compute_spot_instance_create] diff --git a/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl index 9c1915799fa8..fa73d6564e15 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_basic.tf.tmpl @@ -1,9 +1,7 @@ -# [START cloud_sql_database_create] resource "google_sql_database" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_name"}}" instance = google_sql_database_instance.instance.name } -# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl index f0704eab298c..94ee726216e9 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_deletion_policy.tf.tmpl @@ -1,10 +1,8 @@ -# [START cloud_sql_database_create] resource "google_sql_database" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_name"}}" instance = google_sql_database_instance.instance.name deletion_policy = "ABANDON" } -# [END cloud_sql_database_create] # See versions at https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/sql_database_instance#database_version resource "google_sql_database_instance" "instance" { diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl index 124e40ca8f2a..21a9c6082906 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_my_sql.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_80_db_n1_s2] -# [START cloud_sql_mysql_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -27,4 +24,3 @@ resource "google_sql_user" "user" { enable_password_verification = true } } -# [END cloud_sql_mysql_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl index 2a41ff5a4017..ca0440f929fd 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_postgres.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_80_db_n1_s2] -# [START cloud_sql_postgres_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -22,4 +19,3 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } -# [END cloud_sql_postgres_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl index 370d70d28d3b..da41d5d0ade6 100644 --- a/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_database_instance_sqlserver.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_80_db_n1_s2] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "database_instance_name"}}" region = "us-central1" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_80_db_n1_s2] -# [START cloud_sql_sqlserver_instance_user] resource "random_password" "pwd" { length = 16 special = false @@ -22,4 +19,3 @@ resource "google_sql_user" "user" { instance = google_sql_database_instance.instance.name password = random_password.pwd.result } -# [END cloud_sql_sqlserver_instance_user] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl index fa875cd2bf18..254ce6225bb3 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl @@ -1,28 +1,21 @@ -# [START cloud_sql_instance_service_identity] resource "google_project_service_identity" "gcp_sa_cloud_sql" { provider = google-beta service = "sqladmin.googleapis.com" } -# [END cloud_sql_instance_service_identity] -# [START cloud_sql_instance_keyring] resource "google_kms_key_ring" "keyring" { provider = google-beta name = "{{index $.Vars "keyring_name"}}" location = "us-central1" } -# [END cloud_sql_instance_keyring] -# [START cloud_sql_instance_key] resource "google_kms_crypto_key" "key" { provider = google-beta name = "{{index $.Vars "crypto_key_name"}}" key_ring = google_kms_key_ring.keyring.id purpose = "ENCRYPT_DECRYPT" } -# [END cloud_sql_instance_key] -# [START cloud_sql_instance_crypto_key] resource "google_kms_crypto_key_iam_member" "crypto_key" { provider = google-beta crypto_key_id = google_kms_crypto_key.key.id @@ -30,9 +23,7 @@ resource "google_kms_crypto_key_iam_member" "crypto_key" { member = "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}" } -# [END cloud_sql_instance_crypto_key] -# [START cloud_sql_mysql_instance_cmek] resource "google_sql_database_instance" "mysql_instance_with_cmek" { name = "{{index $.Vars "mysql_instance_cmek"}}" provider = google-beta @@ -44,9 +35,7 @@ resource "google_sql_database_instance" "mysql_instance_with_cmek" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_cmek] -# [START cloud_sql_postgres_instance_cmek] resource "google_sql_database_instance" "postgres_instance_with_cmek" { name = "{{index $.Vars "postgres_instance_cmek"}}" provider = google-beta @@ -58,9 +47,7 @@ resource "google_sql_database_instance" "postgres_instance_with_cmek" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_cmek] -# [START cloud_sql_sqlserver_instance_cmek] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_cmek"}}" provider = google-beta @@ -73,4 +60,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_cmek] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl index 6ac502c2ddd4..4c56199b3ed4 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_ha.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_ha] resource "google_sql_database_instance" "mysql_instance_ha" { name = "{{index $.Vars "mysql_instance_ha"}}" region = "asia-northeast1" @@ -14,9 +13,7 @@ resource "google_sql_database_instance" "mysql_instance_ha" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_ha] -# [START cloud_sql_postgres_instance_ha] resource "google_sql_database_instance" "postgres_instance_ha" { name = "{{index $.Vars "postgres_instance_ha"}}" region = "us-central1" @@ -32,9 +29,7 @@ resource "google_sql_database_instance" "postgres_instance_ha" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_ha] -# [START cloud_sql_sqlserver_instance_ha] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_ha"}}" region = "us-central1" @@ -50,4 +45,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_ha] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl index 9e80323369b8..b95a271bf5e6 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl @@ -6,7 +6,6 @@ resource "google_project_service_identity" "gcp_sa_cloud_sql" { service = "sqladmin.googleapis.com" } -# [START cloud_sql_instance_iam_conditions] data "google_iam_policy" "sql_iam_policy" { binding { role = "roles/cloudsql.client" @@ -25,7 +24,6 @@ resource "google_project_iam_policy" "project" { project = data.google_project.project.id policy_data = data.google_iam_policy.sql_iam_policy.policy_data } -# [END cloud_sql_instance_iam_conditions] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_iam_condition"}}" diff --git a/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl index 06a6bbc81a24..9fa98a22dad8 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_labels.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_labels] resource "google_sql_database_instance" "mysql_instance_labels" { name = "{{index $.Vars "mysql_instance_labels"}}" region = "us-central1" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "mysql_instance_labels" { } deletion_protection = "false" } -# [END cloud_sql_mysql_instance_labels] -# [START cloud_sql_postgres_instance_labels] resource "google_sql_database_instance" "postgres_instance_labels" { name = "{{index $.Vars "postgres_instance_labels"}}" region = "us-central1" @@ -28,9 +25,7 @@ resource "google_sql_database_instance" "postgres_instance_labels" { } deletion_protection = "false" } -# [END cloud_sql_postgres_instance_labels] -# [START cloud_sql_sqlserver_instance_labels] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_labels"}}" region = "us-central1" @@ -45,4 +40,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "false" } -# [END cloud_sql_sqlserver_instance_labels] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl index a06e64956bf6..3739b999506e 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_pitr.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_pitr] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_pitr"}}" region = "asia-northeast1" @@ -14,9 +13,7 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_pitr] -# [START cloud_sql_postgres_instance_pitr] resource "google_sql_database_instance" "postgres_instance_pitr" { name = "{{index $.Vars "postgres_instance__pitr"}}" region = "us-central1" @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "postgres_instance_pitr" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_pitr] diff --git a/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl index e976fd8c142a..0ac9a24a3c81 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_ssl_cert.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_require_ssl] resource "google_sql_database_instance" "mysql_instance" { name = "{{index $.Vars "mysql_instance"}}" region = "asia-northeast1" @@ -11,16 +10,12 @@ resource "google_sql_database_instance" "mysql_instance" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_require_ssl] -# [START cloud_sql_mysql_instance_ssl_cert] resource "google_sql_ssl_cert" "mysql_client_cert" { common_name = "mysql_common_name" instance = google_sql_database_instance.mysql_instance.name } -# [END cloud_sql_mysql_instance_ssl_cert] -# [START cloud_sql_postgres_instance_require_ssl] resource "google_sql_database_instance" "postgres_instance" { name = "{{index $.Vars "postgres_instance"}}" region = "asia-northeast1" @@ -33,16 +28,12 @@ resource "google_sql_database_instance" "postgres_instance" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_require_ssl] -# [START cloud_sql_postgres_instance_ssl_cert] resource "google_sql_ssl_cert" "postgres_client_cert" { common_name = "postgres_common_name" instance = google_sql_database_instance.postgres_instance.name } -# [END cloud_sql_postgres_instance_ssl_cert] -# [START cloud_sql_sqlserver_instance_require_ssl] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance"}}" region = "asia-northeast1" @@ -56,4 +47,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_require_ssl] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl index 2e6490cd6fe4..1cd16f0e408b 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_authorized_network.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_with_authorized_network"}}" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl index 67dfdc4fad50..fab77d0d506d 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup"}}" region = "asia-northeast1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl index 916d12bf09ac..96e138c64c2d 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_location.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup_location"}}" region = "asia-northeast1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl index 57e630c34e2f..ee7a1d41c254 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_backup_retention.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_backup_retention"}}" region = "asia-northeast1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl index 5ea986116ac5..134192af0268 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_clone.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "mysql_instance_source_name"}}" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_source] -# [START cloud_sql_mysql_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_instance_clone_name"}}" region = "us-central1" @@ -20,4 +17,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl index a2421e78283e..188242db640d 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_flags.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "MYSQL_8_0" name = "{{index $.Vars "mysql_instance"}}" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl index 8165b86cd381..93d0efe450fa 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_public_ip.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "MYSQL_5_7" name = "{{index $.Vars "mysql_public_ip_instance_name"}}" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl index de0fd811c86b..feef53e47ab8 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_pvp.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_pvp] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_pvp_instance_name"}}" region = "asia-northeast1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_pvp] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl index 6b36e0c5c05a..9664d733332e 100644 --- a/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_mysql_instance_replica.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_mysql_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "mysql_primary_instance_name"}}" region = "europe-west4" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_primary] -# [START cloud_sql_mysql_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "mysql_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_mysql_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl index 77bd979fa7d1..1441006a0b88 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_authorized_network.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_with_authorized_network"}}" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl index be494aa10a38..94043bb1c485 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup"}}" region = "us-central1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl index 523fe2128d10..93476520866b 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_location.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup_location"}}" region = "us-central1" @@ -12,4 +11,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl index 5851ba5db1b5..b797a1f8e699 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_backup_retention.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_backup_retention"}}" region = "us-central1" @@ -15,4 +14,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl index d0a68e4b6c9c..fd519d57b23e 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_clone.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "postgres_instance_source_name"}}" region = "us-central1" @@ -8,9 +7,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_source] -# [START cloud_sql_postgres_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance_clone_name"}}" region = "us-central1" @@ -20,4 +17,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl index eec472895808..eed5805b0096 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_flags.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_instance"}}" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl index 8fbb18db8910..4ab67a49c708 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_public_ip.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { database_version = "POSTGRES_14" name = "{{index $.Vars "postgres_public_ip_instance_name"}}" @@ -19,4 +18,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl index d9036765b51c..5181bd859d46 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_pvp.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_pvp] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_pvp_instance_name"}}" region = "asia-northeast1" @@ -17,4 +16,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_pvp] diff --git a/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl index 8f205eb48d40..180fa60611e5 100644 --- a/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_postgres_instance_replica.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_postgres_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "postgres_primary_instance_name"}}" region = "europe-west4" @@ -11,9 +10,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_primary] -# [START cloud_sql_postgres_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "postgres_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -31,4 +28,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_postgres_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl index bb36f15ae286..6cb660a5dfa9 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_authorized_network.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_authorized_network] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_with_authorized_network"}}" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_authorized_network] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl index 80b315b42fe3..ea04446a312c 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup"}}" region = "us-central1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_backup] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl index 46cbacab1c67..ba2d19cc8eed 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_location.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup_location] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup_location"}}" region = "us-central1" @@ -13,4 +12,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_backup_location] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl index 0898f0083008..bcf2751fbac2 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_backup_retention.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_backup_retention] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_backup_retention"}}" region = "us-central1" @@ -16,4 +15,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_backup_retention] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl index 121529a7065d..9122adc7ec14 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_clone.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_source] resource "google_sql_database_instance" "source" { name = "{{index $.Vars "sqlserver_instance_source_name"}}" region = "us-central1" @@ -9,9 +8,7 @@ resource "google_sql_database_instance" "source" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_source] -# [START cloud_sql_sqlserver_instance_clone] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance_clone_name"}}" region = "us-central1" @@ -22,4 +19,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_clone] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl index 559ea293afb1..31e878f1a9fc 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_flags.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_flags] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_instance"}}" region = "us-central1" @@ -21,4 +20,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_flags] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl index 789aba01520a..86b8090be47a 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_public_ip.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_public_ip] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_public_ip_instance_name"}}" region = "europe-west4" @@ -20,4 +19,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_public_ip] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl index 88d0acc1e8d9..93c26ef17944 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_instance_replica.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloud_sql_sqlserver_instance_primary] resource "google_sql_database_instance" "primary" { name = "{{index $.Vars "sqlserver_primary_instance_name"}}" region = "europe-west4" @@ -12,9 +11,7 @@ resource "google_sql_database_instance" "primary" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_primary] -# [START cloud_sql_sqlserver_instance_replica] resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "sqlserver_replica_instance_name"}}" master_instance_name = google_sql_database_instance.primary.name @@ -32,4 +29,3 @@ resource "google_sql_database_instance" "{{$.PrimaryResourceId}}" { } deletion_protection = "{{index $.Vars "deletion_protection"}}" } -# [END cloud_sql_sqlserver_instance_replica] diff --git a/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl index 24ed99f68177..c1d7a1e1d5fa 100644 --- a/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_sqlserver_vm_instance.tf.tmpl @@ -14,7 +14,6 @@ resource "google_compute_subnetwork" "default" { network = google_compute_network.default.id } -# [START cloud_sql_sqlserver_vm_instance] resource "google_compute_instance" "{{$.PrimaryResourceId}}" { provider = google-beta name = "{{index $.Vars "sqlserver_vm"}}" @@ -39,9 +38,7 @@ resource "google_compute_instance" "{{$.PrimaryResourceId}}" { subnetwork = google_compute_subnetwork.default.id } } -# [END cloud_sql_sqlserver_vm_instance] -# [START cloud_sql_sqlserver_vm_firewall_rule] resource "google_compute_firewall" "sql_server_1433" { provider = google-beta name = "{{index $.Vars "sql_server_1433_3"}}" @@ -55,4 +52,3 @@ resource "google_compute_firewall" "sql_server_1433" { priority = 1000 source_ranges = ["0.0.0.0/0"] } -# [END cloud_sql_sqlserver_vm_firewall_rule] diff --git a/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl index 5d248640da3c..189802fbc656 100644 --- a/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_hmac_key.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_hmac_key] # Create a new service account resource "google_service_account" "service_account" { account_id = "{{index $.Vars "account_id"}}" @@ -8,4 +7,3 @@ resource "google_service_account" "service_account" { resource "google_storage_hmac_key" "{{$.PrimaryResourceId}}" { service_account_email = google_service_account.service_account.email } -# [END storage_hmac_key] diff --git a/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl index c020cc71de31..edc5c07db1f5 100644 --- a/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_make_data_public.tf.tmpl @@ -5,7 +5,6 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { uniform_bucket_level_access = true } -# [START storage_make_data_public] # Make bucket public resource "google_storage_bucket_iam_member" "member" { provider = google-beta @@ -13,4 +12,3 @@ resource "google_storage_bucket_iam_member" "member" { role = "roles/storage.objectViewer" member = "allUsers" } -# [END storage_make_data_public] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl index 0e52b1e699a8..b6557318d04c 100644 --- a/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_new_bucket.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_create_new_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { @@ -8,9 +7,7 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { uniform_bucket_level_access = true } -# [END storage_create_new_bucket_tf] -# [START storage_upload_object_tf] # Upload files # Discussion about using tf to upload a large number of objects # https://stackoverflow.com/questions/68455132/terraform-copy-multiple-files-to-bucket-at-the-same-time-bucket-creation @@ -24,9 +21,7 @@ resource "google_storage_bucket_object" "default" { content_type = "text/plain" bucket = google_storage_bucket.static.id } -# [END storage_upload_object_tf] -# [START storage_get_object_metadata_tf] # Get object metadata data "google_storage_bucket_object" "default" { name = google_storage_bucket_object.default.name @@ -36,9 +31,7 @@ data "google_storage_bucket_object" "default" { output "object_metadata" { value = data.google_storage_bucket_object.default } -# [END storage_get_object_metadata_tf] -# [START storage_get_bucket_metadata_tf] # Get bucket metadata data "google_storage_bucket" "default" { name = google_storage_bucket.static.id @@ -47,5 +40,4 @@ data "google_storage_bucket" "default" { output "bucket_metadata" { value = data.google_storage_bucket.default } -# [END storage_get_bucket_metadata_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl index b8fd64b062e1..3beadc46890e 100644 --- a/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_object_lifecycle_setting.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_create_lifecycle_setting_tf] resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { provider = google-beta name = "{{index $.Vars "example_bucket"}}" @@ -14,4 +13,3 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { } } } -# [END storage_create_lifecycle_setting_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl index e586cbbb59d6..b73d62d3826c 100644 --- a/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_pubsub_notifications.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_create_pubsub_notifications_tf] // Create a Pub/Sub notification. resource "google_storage_notification" "notification" { provider = google-beta @@ -33,4 +32,3 @@ resource "google_pubsub_topic" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "your_topic_name"}}" provider = google-beta } -# [END storage_create_pubsub_notifications_tf] diff --git a/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl b/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl index 826b69285cca..6f7fa483a2b0 100644 --- a/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/storage_static_website.tf.tmpl @@ -1,4 +1,3 @@ -# [START storage_static_website_create_bucket_tf] # Create new storage bucket in the US multi-region # with coldline storage and settings for main_page_suffix and not_found_page resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { @@ -10,18 +9,14 @@ resource "google_storage_bucket" "{{$.PrimaryResourceId}}" { not_found_page = "{{index $.Vars "main_page_suffix"}}" } } -# [END storage_static_website_create_bucket_tf] -# [START storage_static_website_make_bucket_public_tf] # Make bucket public by granting allUsers READER access resource "google_storage_bucket_access_control" "public_rule" { bucket = google_storage_bucket.static_website.id role = "READER" entity = "allUsers" } -# [END storage_static_website_make_bucket_public_tf] -# [START storage_static_website_upload_files_tf] # Upload a simple index.html page to the bucket resource "google_storage_bucket_object" "indexpage" { name = "{{index $.Vars "main_page_suffix"}}" @@ -37,4 +32,3 @@ resource "google_storage_bucket_object" "errorpage" { content_type = "text/html" bucket = google_storage_bucket.static_website.id } -# [END storage_static_website_upload_files_tf] diff --git a/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl index 99596d6b90d3..900565b2a1d1 100644 --- a/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_grpc_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_grpc_proxy_basic] resource "google_compute_target_grpc_proxy" "default" { name = "{{index $.Vars "proxy_name"}}" url_map = google_compute_url_map.urlmap.id @@ -86,4 +85,3 @@ resource "google_compute_health_check" "default" { grpc_service_name = "testservice" } } -# [END cloudloadbalancing_target_grpc_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl index 5e5b374f711c..dcce57816ab9 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_basic] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -39,4 +38,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_http_proxy_basic] diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl index 875d95a242f9..9927dcd35ffb 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_http_keep_alive_timeout.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" http_keep_alive_timeout_sec = 610 @@ -41,4 +40,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_http_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl b/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl index e07e79da166d..9b25f866cf5d 100644 --- a/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_http_proxy_https_redirect.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_http_proxy_https_redirect] resource "google_compute_target_http_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_http_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -11,4 +10,3 @@ resource "google_compute_url_map" "default" { strip_query = false } } -# [END cloudloadbalancing_target_http_proxy_https_redirect] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl index 5cfab5833370..087268c78f28 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_basic] resource "google_compute_target_https_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_https_proxy_name"}}" url_map = google_compute_url_map.default.id @@ -48,4 +47,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_basic] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl index fe52d17cb089..075c573196f7 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_http_keep_alive_timeout.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] resource "google_compute_target_https_proxy" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "target_https_proxy_name"}}" http_keep_alive_timeout_sec = 610 @@ -50,4 +49,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_http_keep_alive_timeout] diff --git a/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl b/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl index 08f749b2793c..11c28dfea258 100644 --- a/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_https_proxy_mtls.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_https_proxy_mtls] data "google_project" "project" { provider = google-beta } @@ -90,4 +89,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END cloudloadbalancing_target_https_proxy_mtls] diff --git a/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl index 4167a0c8b1bc..7533c8ab2c1a 100644 --- a/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_ssl_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_ssl_proxy_basic] resource "google_compute_target_ssl_proxy" "default" { name = "{{index $.Vars "target_ssl_proxy_name"}}" backend_service = google_compute_backend_service.default.id @@ -25,4 +24,3 @@ resource "google_compute_health_check" "default" { port = "443" } } -# [END cloudloadbalancing_target_ssl_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl index 2c7689ab33c6..ebd96724985b 100644 --- a/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/target_tcp_proxy_basic.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_target_tcp_proxy_basic] resource "google_compute_target_tcp_proxy" "default" { name = "{{index $.Vars "target_tcp_proxy_name"}}" backend_service = google_compute_backend_service.default.id @@ -21,4 +20,3 @@ resource "google_compute_health_check" "default" { port = "443" } } -# [END cloudloadbalancing_target_tcp_proxy_basic] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl index f8b9d8efdb9a..42c461403bf6 100644 --- a/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_bucket_and_service.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_bucket_and_service] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -73,4 +72,3 @@ resource "google_storage_bucket" "static" { name = "{{index $.Vars "storage_bucket_name"}}" location = "US" } -# [END cloudloadbalancing_url_map_bucket_and_service] diff --git a/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl index 432b692d479d..7d58d12765c4 100644 --- a/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_header_based_routing.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_header_based_routing] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "header-based routing example" @@ -73,4 +72,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END trafficdirector_url_map_header_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl index 02d79ef69565..5b9994af38f9 100644 --- a/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_parameter_based_routing.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_parameter_based_routing] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "parameter-based routing example" @@ -73,4 +72,3 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -# [END trafficdirector_url_map_parameter_based_routing] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl index 17dc901c7517..74fd0795d039 100644 --- a/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_path_template_match.tf.tmpl @@ -1,4 +1,3 @@ -# [START cloudloadbalancing_url_map_path_template_match] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -74,4 +73,3 @@ resource "google_storage_bucket" "static" { name = "{{index $.Vars "storage_bucket_name"}}" location = "US" } -# [END cloudloadbalancing_url_map_path_template_match] diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl index 72e92f8fb516..f401ca61a072 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_path] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -102,4 +101,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_path] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl index ad2340dc3cc3..b35f1cca09fd 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_path_partial.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_path_partial] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -72,4 +71,3 @@ resource "google_compute_health_check" "default" { } } -# [END trafficdirector_url_map_traffic_director_path_partial] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl index a711552980c2..9f75e5b169de 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_route] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -82,4 +81,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_route] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl index c50a260f71ab..1d1ea0c74ffd 100644 --- a/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/url_map_traffic_director_route_partial.tf.tmpl @@ -1,4 +1,3 @@ -# [START trafficdirector_url_map_traffic_director_route_partial] resource "google_compute_url_map" "{{$.PrimaryResourceId}}" { name = "{{index $.Vars "url_map_name"}}" description = "a description" @@ -53,4 +52,3 @@ resource "google_compute_health_check" "default" { port = 80 } } -# [END trafficdirector_url_map_traffic_director_route_partial] \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_featureview_cross_project.tf.tmpl b/mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_featureview_cross_project.tf.tmpl new file mode 100644 index 000000000000..05b8eab12560 --- /dev/null +++ b/mmv1/templates/terraform/examples/go/vertex_ai_featureonlinestore_featureview_cross_project.tf.tmpl @@ -0,0 +1,141 @@ +data "google_project" "test_project" { +} + +resource "google_project" "project" { + project_id = "tf-test%{random_suffix}" + name = "tf-test%{random_suffix}" + org_id = "{{index $.TestEnvVars "org_id"}}" + billing_account = "{{index $.TestEnvVars "billing_account"}}" +} + +resource "time_sleep" "wait_60_seconds" { + depends_on = [google_project.project] + + create_duration = "60s" +} + +resource "time_sleep" "wait_30_seconds" { + depends_on = [google_bigquery_dataset_iam_member.viewer] + + create_duration = "30s" +} + +resource "google_project_service" "vertexai" { + service = "aiplatform.googleapis.com" + project = google_project.project.project_id + timeouts { + create = "30m" + update = "40m" + } + disable_on_destroy = false + # Needed for CI tests for permissions to propagate, should not be needed for actual usage + depends_on = [time_sleep.wait_60_seconds] +} + +resource "google_bigquery_dataset_iam_member" "viewer" { + project = data.google_project.test_project.project_id + dataset_id = google_bigquery_dataset.sample_dataset.dataset_id + role = "roles/bigquery.dataViewer" + member = "serviceAccount:service-${google_project.project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com" + depends_on = [google_vertex_ai_feature_online_store.featureonlinestore] +} + +resource "google_vertex_ai_feature_online_store" "featureonlinestore" { + name = "{{index $.Vars "name"}}" + project = google_project.project.project_id + labels = { + foo = "bar" + } + region = "us-central1" + bigtable { + auto_scaling { + min_node_count = 1 + max_node_count = 2 + cpu_utilization_target = 80 + } + } + depends_on = [google_project_service.vertexai] +} + +resource "google_bigquery_dataset" "sample_dataset" { + dataset_id = "{{index $.Vars "name"}}" + friendly_name = "test" + description = "This is a test description" + location = "US" +} + +resource "google_bigquery_table" "sample_table" { + deletion_protection = false + dataset_id = google_bigquery_dataset.sample_dataset.dataset_id + table_id = "{{index $.Vars "name"}}" + + schema = < Date: Mon, 8 Jul 2024 20:24:30 +0100 Subject: [PATCH 279/356] Make `TestAccComputeNetworkAttachment_networkAttachmentInstanceUsageExample` be beta-only (#11113) --- mmv1/products/compute/NetworkAttachment.yaml | 1 + mmv1/products/compute/go_NetworkAttachment.yaml | 1 + .../examples/network_attachment_instance_usage.tf.erb | 4 ++++ 3 files changed, 6 insertions(+) diff --git a/mmv1/products/compute/NetworkAttachment.yaml b/mmv1/products/compute/NetworkAttachment.yaml index dcfe283e0ca6..dbe41ddb2572 100644 --- a/mmv1/products/compute/NetworkAttachment.yaml +++ b/mmv1/products/compute/NetworkAttachment.yaml @@ -56,6 +56,7 @@ examples: billing_account: :BILLING_ACCT - !ruby/object:Provider::Terraform::Examples name: 'network_attachment_instance_usage' + min_version: beta primary_resource_id: 'default' vars: resource_name: 'basic-network-attachment' diff --git a/mmv1/products/compute/go_NetworkAttachment.yaml b/mmv1/products/compute/go_NetworkAttachment.yaml index 4d5d9677a147..1afc76217e11 100644 --- a/mmv1/products/compute/go_NetworkAttachment.yaml +++ b/mmv1/products/compute/go_NetworkAttachment.yaml @@ -57,6 +57,7 @@ examples: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' - name: 'network_attachment_instance_usage' + min_version: beta primary_resource_id: 'default' vars: resource_name: 'basic-network-attachment' diff --git a/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb b/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb index b1696a1201fe..6e5963d781c8 100644 --- a/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb +++ b/mmv1/templates/terraform/examples/network_attachment_instance_usage.tf.erb @@ -1,9 +1,11 @@ resource "google_compute_network" "default" { + provider = google-beta name = "<%= ctx[:vars]['network_name'] %>" auto_create_subnetworks = false } resource "google_compute_subnetwork" "default" { + provider = google-beta name = "<%= ctx[:vars]['subnetwork_name'] %>" region = "us-central1" @@ -12,6 +14,7 @@ resource "google_compute_subnetwork" "default" { } resource "google_compute_network_attachment" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta name = "<%= ctx[:vars]['resource_name'] %>" region = "us-central1" description = "my basic network attachment" @@ -21,6 +24,7 @@ resource "google_compute_network_attachment" "<%= ctx[:primary_resource_id] %>" } resource "google_compute_instance" "default" { + provider = google-beta name = "<%= ctx[:vars]['instance_name'] %>" zone = "us-central1-a" machine_type = "e2-micro" From ea93c089fdf310dd142175a17909a37705eb7978 Mon Sep 17 00:00:00 2001 From: SizzleHsu Date: Mon, 8 Jul 2024 13:40:23 -0700 Subject: [PATCH 280/356] Make ResourcePolicy updatable to avoid recreation of it. (#11091) --- mmv1/products/compute/ResourcePolicy.yaml | 3 +- ...te_disk_resource_policy_attachment_test.go | 43 +++++++++++- .../compute/resource_compute_disk_test.go.erb | 65 +++++++++++++++++++ 3 files changed, 108 insertions(+), 3 deletions(-) diff --git a/mmv1/products/compute/ResourcePolicy.yaml b/mmv1/products/compute/ResourcePolicy.yaml index e6ba60cf598c..f74e86817f6a 100644 --- a/mmv1/products/compute/ResourcePolicy.yaml +++ b/mmv1/products/compute/ResourcePolicy.yaml @@ -15,8 +15,9 @@ name: 'ResourcePolicy' kind: 'compute#resourcePolicy' base_url: projects/{{project}}/regions/{{region}}/resourcePolicies -immutable: true has_self_link: true +update_verb: :PATCH +update_url: projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}} collection_url_key: 'items' description: | A policy that can be attached to a resource to specify or schedule actions on that resource. diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_resource_policy_attachment_test.go b/mmv1/third_party/terraform/services/compute/resource_compute_disk_resource_policy_attachment_test.go index 5d60bd8c9ae5..101a767f8751 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_resource_policy_attachment_test.go +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_resource_policy_attachment_test.go @@ -13,7 +13,6 @@ func TestAccComputeDiskResourcePolicyAttachment_update(t *testing.T) { diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) policyName := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) - policyName2 := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, @@ -29,7 +28,7 @@ func TestAccComputeDiskResourcePolicyAttachment_update(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccComputeDiskResourcePolicyAttachment_basic(diskName, policyName2), + Config: testAccComputeDiskResourcePolicyAttachment_update(diskName, policyName), }, { ResourceName: "google_compute_disk_resource_policy_attachment.foobar", @@ -80,3 +79,43 @@ resource "google_compute_disk_resource_policy_attachment" "foobar" { } `, diskName, policyName) } + +func testAccComputeDiskResourcePolicyAttachment_update(diskName, policyName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 1000 + type = "pd-extreme" + zone = "us-central1-c" + labels = { + my-label = "my-label-value" + } + provisioned_iops = 90000 +} + +resource "google_compute_resource_policy" "foobar" { + name = "%s" + region = "us-central1" + snapshot_schedule_policy { + schedule { + daily_schedule { + days_in_cycle = 1 + start_time = "05:00" + } + } + } +} + +resource "google_compute_disk_resource_policy_attachment" "foobar" { + name = google_compute_resource_policy.foobar.name + disk = google_compute_disk.foobar.name + zone = "us-central1-c" +} +`, diskName, policyName) +} diff --git a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb index 37180c71ddc4..44a21a2d7faf 100644 --- a/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb +++ b/mmv1/third_party/terraform/services/compute/resource_compute_disk_test.go.erb @@ -793,6 +793,38 @@ func TestAccComputeDisk_resourcePolicies(t *testing.T) { } <% end -%> +<% unless version == 'ga' -%> +func TestAccComputeDisk_updateResourcePolicies(t *testing.T) { + t.Parallel() + + diskName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + policyName := fmt.Sprintf("tf-test-policy-%s", acctest.RandString(t, 10)) + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccComputeDisk_resourcePolicies(diskName, policyName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccComputeDisk_updateResourcePolicies(diskName, policyName), + }, + { + ResourceName: "google_compute_disk.foobar", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +<% end -%> + <% unless version == 'ga' -%> func TestAccComputeDisk_multiWriter(t *testing.T) { t.Parallel() @@ -1222,6 +1254,39 @@ resource "google_compute_disk" "foobar" { } <% end -%> +<% unless version == 'ga' -%> +func testAccComputeDisk_updateResourcePolicies(diskName, policyName string) string { + return fmt.Sprintf(` +data "google_compute_image" "my_image" { + family = "debian-11" + project = "debian-cloud" +} + +resource "google_compute_resource_policy" "foo" { + name = "%s" + region = "us-central1" + snapshot_schedule_policy { + schedule { + daily_schedule { + days_in_cycle = 1 + start_time = "05:00" + } + } + } +} + +resource "google_compute_disk" "foobar" { + name = "%s" + image = data.google_compute_image.my_image.self_link + size = 50 + type = "pd-ssd" + zone = "us-central1-a" + resource_policies = [google_compute_resource_policy.foo.self_link] +} +`, policyName, diskName) +} +<% end -%> + <% unless version == 'ga' -%> func testAccComputeDisk_multiWriter(instance string, diskName string, enableMultiwriter bool) string { return fmt.Sprintf(` From 244ffe17a74221a4f3907aac6466451614a18162 Mon Sep 17 00:00:00 2001 From: bohrjoce Date: Mon, 8 Jul 2024 16:42:41 -0400 Subject: [PATCH 281/356] Add beta support for keyAccessJustificationsPolicy to KMS module (#10792) --- mmv1/products/kms/CryptoKey.yaml | 23 ++++++ .../kms/resource_kms_crypto_key_test.go | 80 +++++++++++++++++++ 2 files changed, 103 insertions(+) diff --git a/mmv1/products/kms/CryptoKey.yaml b/mmv1/products/kms/CryptoKey.yaml index b1b6fc705338..f3f7aeeab0eb 100644 --- a/mmv1/products/kms/CryptoKey.yaml +++ b/mmv1/products/kms/CryptoKey.yaml @@ -164,3 +164,26 @@ properties: The resource name of the backend environment associated with all CryptoKeyVersions within this CryptoKey. The resource name is in the format "projects/*/locations/*/ekmConnections/*" and only applies to "EXTERNAL_VPC" keys. default_from_api: true + - !ruby/object:Api::Type::NestedObject + name: 'keyAccessJustificationsPolicy' + min_version: beta + description: | + The policy used for Key Access Justifications Policy Enforcement. If this + field is present and this key is enrolled in Key Access Justifications + Policy Enforcement, the policy will be evaluated in encrypt, decrypt, and + sign operations, and the operation will fail if rejected by the policy. The + policy is defined by specifying zero or more allowed justification codes. + https://cloud.google.com/assured-workloads/key-access-justifications/docs/justification-codes + By default, this field is absent, and all justification codes are allowed. + This field is currently in beta and is subject to change. + default_from_api: true + update_mask_fields: + - 'keyAccessJustificationsPolicy' + properties: + - !ruby/object:Api::Type::Array + item_type: Api::Type::String + name: 'allowedAccessReasons' + description: | + The list of allowed reasons for access to this CryptoKey. Zero allowed + access reasons means all encrypt, decrypt, and sign operations for + this CryptoKey will fail. diff --git a/mmv1/third_party/terraform/services/kms/resource_kms_crypto_key_test.go b/mmv1/third_party/terraform/services/kms/resource_kms_crypto_key_test.go index 558aee66e084..e369c69ff61e 100644 --- a/mmv1/third_party/terraform/services/kms/resource_kms_crypto_key_test.go +++ b/mmv1/third_party/terraform/services/kms/resource_kms_crypto_key_test.go @@ -317,6 +317,53 @@ func TestAccKmsCryptoKey_destroyDuration(t *testing.T) { }) } +func TestAccKmsCryptoKey_keyAccessJustificationsPolicy(t *testing.T) { + t.Parallel() + + projectId := fmt.Sprintf("tf-test-%d", acctest.RandInt(t)) + projectOrg := envvar.GetTestOrgFromEnv(t) + location := envvar.GetTestRegionFromEnv() + projectBillingAccount := envvar.GetTestBillingAccountFromEnv(t) + keyRingName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + cryptoKeyName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + allowedAccessReason := "CUSTOMER_INITIATED_SUPPORT" + updatedAllowedAccessReason := "GOOGLE_INITIATED_SERVICE" + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowedAccessReason), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + { + Config: testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, updatedAllowedAccessReason), + }, + { + ResourceName: "google_kms_crypto_key.crypto_key", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"labels", "terraform_labels"}, + }, + // Use a separate TestStep rather than a CheckDestroy because we need the project to still exist. + { + Config: testGoogleKmsCryptoKey_removed(projectId, projectOrg, projectBillingAccount, keyRingName), + Check: resource.ComposeTestCheckFunc( + testAccCheckGoogleKmsCryptoKeyWasRemovedFromState("google_kms_crypto_key.crypto_key"), + testAccCheckGoogleKmsCryptoKeyVersionsDestroyed(t, projectId, location, keyRingName, cryptoKeyName), + testAccCheckGoogleKmsCryptoKeyRotationDisabled(t, projectId, location, keyRingName, cryptoKeyName), + ), + }, + }, + }) +} + func TestAccKmsCryptoKey_importOnly(t *testing.T) { t.Parallel() @@ -789,6 +836,39 @@ resource "google_kms_crypto_key" "crypto_key" { `, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName) } +func testGoogleKmsCryptoKey_keyAccessJustificationsPolicy(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowed_access_reason string) string { + return fmt.Sprintf(` +resource "google_project" "acceptance" { + name = "%s" + project_id = "%s" + org_id = "%s" + billing_account = "%s" +} + +resource "google_project_service" "acceptance" { + project = google_project.acceptance.project_id + service = "cloudkms.googleapis.com" +} + +resource "google_kms_key_ring" "key_ring" { + project = google_project_service.acceptance.project + name = "%s" + location = "us-central1" +} + +resource "google_kms_crypto_key" "crypto_key" { + name = "%s" + key_ring = google_kms_key_ring.key_ring.id + labels = { + key = "value" + } + key_access_justifications_policy { + allowed_access_reasons = ["%s"] + } +} +`, projectId, projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName, allowed_access_reason) +} + func testGoogleKmsCryptoKey_importOnly(projectId, projectOrg, projectBillingAccount, keyRingName, cryptoKeyName string) string { return fmt.Sprintf(` resource "google_project" "acceptance" { From a72543a018c089c8579bce785ed34589af22e174 Mon Sep 17 00:00:00 2001 From: Zhenhua Li Date: Mon, 8 Jul 2024 16:36:55 -0700 Subject: [PATCH 282/356] Fix path in go convert custom templates exception list (#11121) --- mmv1/template-converter.go | 2 +- .../iam/example_config_body/go/api_gateway_api_config.tf.tmpl | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/mmv1/template-converter.go b/mmv1/template-converter.go index 3193d5d0841c..d6f815d77566 100644 --- a/mmv1/template-converter.go +++ b/mmv1/template-converter.go @@ -692,7 +692,7 @@ func checkExceptionList(filePath string) bool { "constants/router_nat_validate_action_active_range.go", "unordered_list_customize_diff", "default_if_empty", - "iam/example_config_body/go/api_gateway_api_config.tf.tmpl", + "iam/example_config_body/api_gateway_api_config.tf.erb", // TODO: remove the following files from the exception list after all of the services are migrated to Go // It will generate diffs when partial services are migrated. diff --git a/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl b/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl index 6c023f4146fb..56ac3cb6a3b8 100644 --- a/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl +++ b/mmv1/templates/terraform/iam/example_config_body/go/api_gateway_api_config.tf.tmpl @@ -1,2 +1,3 @@ + {{- ""}} api = google_api_gateway_api_config.api_cfg.api api_config = google_api_gateway_api_config.api_cfg.api_config_id \ No newline at end of file From 48062fba316ff3f5b9c2844ad53fdf05899154b5 Mon Sep 17 00:00:00 2001 From: Shubham Sahu <199201shubhamsahu@gmail.com> Date: Tue, 9 Jul 2024 19:35:51 +0530 Subject: [PATCH 283/356] Add documentation for Switchover operation (#11077) --- mmv1/products/alloydb/Cluster.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mmv1/products/alloydb/Cluster.yaml b/mmv1/products/alloydb/Cluster.yaml index f323441301b3..ea2fcce5a130 100644 --- a/mmv1/products/alloydb/Cluster.yaml +++ b/mmv1/products/alloydb/Cluster.yaml @@ -18,6 +18,13 @@ docs: !ruby/object:Provider::Terraform::Docs Users can promote a secondary cluster to a primary cluster with the help of `cluster_type`. To promote, users have to set the `cluster_type` property as `PRIMARY` and remove the `secondary_config` field from cluster configuration. [See Example](https://github.com/hashicorp/terraform-provider-google/pull/16413). + + Switchover is supported in terraform by refreshing the state of the terraform configurations. + The switchover operation still needs to be called outside of terraform. + After the switchover operation is completed successfully: + 1. Refresh the state of the AlloyDB resources by running `terraform apply -refresh-only --auto-approve` . + 2. Manually update the terraform configuration file(s) to match the actual state of the resources by modifying the `cluster_type` and `secondary_config` fields. + 3. Verify the sync of terraform state by running `terraform plan` and ensure that the infrastructure matches the configuration and no changes are required. self_link: 'projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}' base_url: 'projects/{{project}}/locations/{{location}}/clusters' create_url: 'projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}' From 91abd111f4861ea0a260d6e338a65cc054f5637c Mon Sep 17 00:00:00 2001 From: sahsagar-google <126025352+sahsagar-google@users.noreply.github.com> Date: Tue, 9 Jul 2024 09:59:07 -0700 Subject: [PATCH 284/356] New data source for google_gke_hub_membership_binding (#11032) --- .../provider/provider_mmv1_resources.go.erb | 1 + ...ource_google_gke_hub_membership_binding.go | 51 ++++++++++++++ ..._google_gke_hub_membership_binding_test.go | 70 +++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding.go create mode 100644 mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding_test.go diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 73fa0d83fe75..2634edf4edbd 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -123,6 +123,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_dns_managed_zone": dns.DataSourceDnsManagedZone(), "google_dns_managed_zones": dns.DataSourceDnsManagedZones(), "google_dns_record_set": dns.DataSourceDnsRecordSet(), + "google_gke_hub_membership_binding": gkehub2.DataSourceGoogleGkeHubMembershipBinding(), "google_filestore_instance": filestore.DataSourceGoogleFilestoreInstance(), "google_iam_policy": resourcemanager.DataSourceGoogleIamPolicy(), "google_iam_role": resourcemanager.DataSourceGoogleIamRole(), diff --git a/mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding.go b/mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding.go new file mode 100644 index 000000000000..d12b8f3d6713 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding.go @@ -0,0 +1,51 @@ +package gkehub2 + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" +) + +func DataSourceGoogleGkeHubMembershipBinding() *schema.Resource { + dsSchema := tpgresource.DatasourceSchemaFromResourceSchema(ResourceGKEHub2MembershipBinding().Schema) + tpgresource.AddRequiredFieldsToSchema(dsSchema, "membership_binding_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "membership_id") + tpgresource.AddRequiredFieldsToSchema(dsSchema, "location") + tpgresource.AddOptionalFieldsToSchema(dsSchema, "project") + + return &schema.Resource{ + Read: dataSourceGoogleGkeHubMembershipBindingRead, + Schema: dsSchema, + } +} + +func dataSourceGoogleGkeHubMembershipBindingRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + + id, err := tpgresource.ReplaceVars(d, config, "projects/{{project}}/locations/{{location}}/memberships/{{membership_id}}/bindings/{{membership_binding_id}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + + d.SetId(id) + + err = resourceGKEHub2MembershipBindingRead(d, meta) + if err != nil { + return err + } + + if err := tpgresource.SetDataSourceLabels(d); err != nil { + return err + } + + if err := tpgresource.SetDataSourceAnnotations(d); err != nil { + return err + } + + if d.Id() == "" { + return fmt.Errorf("%s not found", id) + } + return nil +} diff --git a/mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding_test.go b/mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding_test.go new file mode 100644 index 000000000000..094aad12c882 --- /dev/null +++ b/mmv1/third_party/terraform/services/gkehub2/data_source_google_gke_hub_membership_binding_test.go @@ -0,0 +1,70 @@ +package gkehub2_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccDataSourceGoogleGKEHub2MembershipBinding_basic(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "org_id": envvar.GetTestOrgFromEnv(t), + "billing_account": envvar.GetTestBillingAccountFromEnv(t), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckGKEHub2MembershipBindingDestroyProducer(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceGoogleGKEHub2MembershipBinding_basic(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceState("data.google_gke_hub_membership_binding.example", "google_gke_hub_membership_binding.example"), + ), + }, + }, + }) +} + +func testAccDataSourceGoogleGKEHub2MembershipBinding_basic(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_gke_hub_membership" "example" { + membership_id = "tf-test-membership%{random_suffix}" +} + +resource "google_gke_hub_scope" "example" { + scope_id = "tf-test-scope%{random_suffix}" +} + +resource "google_gke_hub_membership_binding" "example" { + membership_binding_id = "tf-test-membership-binding%{random_suffix}" + scope = google_gke_hub_scope.example.name + membership_id = "tf-test-membership%{random_suffix}" + location = "global" + labels = { + keyb = "valueb" + keya = "valuea" + keyc = "valuec" + } + depends_on = [ + google_gke_hub_membership.example, + google_gke_hub_scope.example + ] +} + +data "google_gke_hub_membership_binding" "example" { + location = google_gke_hub_membership_binding.example.location + membership_id = google_gke_hub_membership_binding.example.membership_id + membership_binding_id = google_gke_hub_membership_binding.example.membership_binding_id +} +`, context) +} From e0ef75935164a4a48fb3464bb7bfa91f513db760 Mon Sep 17 00:00:00 2001 From: "YAMAMORI, Akihiro" <6166778+gecko655@users.noreply.github.com> Date: Wed, 10 Jul 2024 02:45:43 +0900 Subject: [PATCH 285/356] Fix wrong field value example in the iap client resource (#11124) --- mmv1/products/iap/Client.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mmv1/products/iap/Client.yaml b/mmv1/products/iap/Client.yaml index 232516d690d7..fa92d06e744c 100644 --- a/mmv1/products/iap/Client.yaml +++ b/mmv1/products/iap/Client.yaml @@ -58,7 +58,7 @@ parameters: description: | Identifier of the brand to which this client is attached to. The format is - `projects/{project_number}/brands/{brand_id}/identityAwareProxyClients/{client_id}`. + `projects/{project_number}/brands/{brand_id}`. immutable: true required: true url_param_only: true From f9598bbfc513e6fc92a6bed4cf08bc0843a6ff38 Mon Sep 17 00:00:00 2001 From: vijaykanthm Date: Tue, 9 Jul 2024 11:22:29 -0700 Subject: [PATCH 286/356] Add Resource SCC V1 Project Notification Config (#11004) --- .../ProjectNotificationConfig.yaml | 116 ++++++++++++++++++ ...c_project_notification_config_basic.tf.erb | 14 +++ ...ce_scc_project_notification_config_test.go | 63 ++++++++++ 3 files changed, 193 insertions(+) create mode 100644 mmv1/products/securitycenter/ProjectNotificationConfig.yaml create mode 100644 mmv1/templates/terraform/examples/scc_project_notification_config_basic.tf.erb create mode 100644 mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go diff --git a/mmv1/products/securitycenter/ProjectNotificationConfig.yaml b/mmv1/products/securitycenter/ProjectNotificationConfig.yaml new file mode 100644 index 000000000000..d76a7db3b229 --- /dev/null +++ b/mmv1/products/securitycenter/ProjectNotificationConfig.yaml @@ -0,0 +1,116 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +--- !ruby/object:Api::Resource +name: 'ProjectNotificationConfig' +base_url: projects/{{project}}/notificationConfigs +self_link: '{{name}}' +create_url: projects/{{project}}/notificationConfigs?configId={{config_id}} +update_verb: :PATCH +update_mask: true +description: | + A Cloud Security Command Center (Cloud SCC) notification configs. A + notification config is a Cloud SCC resource that contains the + configuration to send notifications for create/update events of + findings, assets and etc. + ~> **Note:** In order to use Cloud SCC resources, your organization must be enrolled + in [SCC Standard/Premium](https://cloud.google.com/security-command-center/docs/quickstart-security-command-center). + Without doing so, you may run into errors during resource creation. +references: !ruby/object:Api::Resource::ReferenceLinks + guides: + 'Official Documentation': 'https://cloud.google.com/security-command-center/docs' + api: 'https://cloud.google.com/security-command-center/docs/reference/rest/v1/projects.notificationConfigs' +examples: + - !ruby/object:Provider::Terraform::Examples + name: 'scc_project_notification_config_basic' + primary_resource_id: 'custom_notification_config' + vars: + topic_name: 'my-topic' + config_id: 'my-config' + test_env_vars: + project: :PROJECT_NAME + ignore_read_extra: + - 'project' +custom_code: !ruby/object:Provider::Terraform::CustomCode + custom_import: templates/terraform/custom_import/self_link_as_name_set_project.go.erb + post_create: templates/terraform/post_create/set_computed_name.erb +parameters: + - !ruby/object:Api::Type::String + name: configId + required: true + immutable: true + url_param_only: true + description: | + This must be unique within the organization. +properties: + - !ruby/object:Api::Type::String + name: name + output: true + description: | + The resource name of this notification config, in the format + `projects/{{projectId}}/notificationConfigs/{{config_id}}`. + - !ruby/object:Api::Type::String + name: description + description: | + The description of the notification config (max of 1024 characters). + validation: !ruby/object:Provider::Terraform::Validation + function: 'validation.StringLenBetween(0, 1024)' + - !ruby/object:Api::Type::String + name: pubsubTopic + required: true + description: | + The Pub/Sub topic to send notifications to. Its format is + "projects/[project_id]/topics/[topic]". + - !ruby/object:Api::Type::String + name: serviceAccount + output: true + description: | + The service account that needs "pubsub.topics.publish" permission to + publish to the Pub/Sub topic. + - !ruby/object:Api::Type::NestedObject + name: streamingConfig + required: true + description: | + The config for triggering streaming-based notifications. + update_mask_fields: + - 'streamingConfig.filter' + properties: + - !ruby/object:Api::Type::String + name: filter + required: true + description: | + Expression that defines the filter to apply across create/update + events of assets or findings as specified by the event type. The + expression is a list of zero or more restrictions combined via + logical operators AND and OR. Parentheses are supported, and OR + has higher precedence than AND. + + Restrictions have the form and may have + a - character in front of them to indicate negation. The fields + map to those defined in the corresponding resource. + + The supported operators are: + + * = for all value types. + * >, <, >=, <= for integer values. + * :, meaning substring matching, for strings. + + The supported value types are: + + * string literals in quotes. + * integer literals without quotes. + * boolean literals true and false without quotes. + + See + [Filtering notifications](https://cloud.google.com/security-command-center/docs/how-to-api-filter-notifications) + for information on how to write a filter. diff --git a/mmv1/templates/terraform/examples/scc_project_notification_config_basic.tf.erb b/mmv1/templates/terraform/examples/scc_project_notification_config_basic.tf.erb new file mode 100644 index 000000000000..d353c3b667f8 --- /dev/null +++ b/mmv1/templates/terraform/examples/scc_project_notification_config_basic.tf.erb @@ -0,0 +1,14 @@ +resource "google_pubsub_topic" "scc_project_notification" { + name = "<%= ctx[:vars]['topic_name'] %>" +} + +resource "google_scc_project_notification_config" "<%= ctx[:primary_resource_id] %>" { + config_id = "<%= ctx[:vars]['config_id'] %>" + project = "<%= ctx[:test_env_vars]['project'] %>" + description = "My custom Cloud Security Command Center Finding Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_project_notification.id + + streaming_config { + filter = "category = \"OPEN_FIREWALL\" AND state = \"ACTIVE\"" + } +} diff --git a/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go b/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go new file mode 100644 index 000000000000..c3416289682f --- /dev/null +++ b/mmv1/third_party/terraform/services/securitycenter/resource_scc_project_notification_config_test.go @@ -0,0 +1,63 @@ +package securitycenter_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" + "github.com/hashicorp/terraform-provider-google/google/envvar" +) + +func TestAccSecurityCenterProjectNotificationConfig_updateStreamingConfigFilter(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "project": envvar.GetTestProjectFromEnv(), + "random_suffix": acctest.RandString(t, 10), + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + CheckDestroy: testAccCheckSecurityCenterProjectNotificationConfigDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccSecurityCenterProjectNotificationConfig_sccProjectNotificationConfigBasicExample(context), + }, + { + ResourceName: "google_scc_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "config_id"}, + }, + { + Config: testAccSecurityCenterProjectNotificationConfig_updateStreamingConfigFilter(context), + }, + { + ResourceName: "google_scc_project_notification_config.custom_notification_config", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"project", "config_id"}, + }, + }, + }) +} + +func testAccSecurityCenterProjectNotificationConfig_updateStreamingConfigFilter(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_pubsub_topic" "scc_project_notification" { + name = "tf-test-my-topic%{random_suffix}" +} + +resource "google_scc_project_notification_config" "custom_notification_config" { + config_id = "tf-test-my-config%{random_suffix}" + project = "%{project}" + description = "My custom Cloud Security Command Center Finding Notification Configuration" + pubsub_topic = google_pubsub_topic.scc_project_notification.id + + streaming_config { + filter = "category = \"OPEN_FIREWALL\"" + } +} +`, context) +} From 07fb117adfe662240e7719aa03c1cc63761ea34b Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Tue, 9 Jul 2024 20:12:51 +0100 Subject: [PATCH 287/356] Update ACM version to highest valid version number (#11110) --- ...rce_gke_hub_feature_membership_test.go.erb | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.erb b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.erb index a7ab46c640a6..269e2f079364 100644 --- a/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.erb +++ b/mmv1/third_party/terraform/services/gkehub/resource_gke_hub_feature_membership_test.go.erb @@ -107,7 +107,7 @@ resource "google_gke_hub_feature_membership" "feature_member_1" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "hierarchy" git { @@ -124,7 +124,7 @@ resource "google_gke_hub_feature_membership" "feature_member_2" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_second.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "hierarchy" git { @@ -156,7 +156,7 @@ resource "google_gke_hub_feature_membership" "feature_member_1" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "hierarchy" git { @@ -173,7 +173,7 @@ resource "google_gke_hub_feature_membership" "feature_member_2" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_second.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "hierarchy" git { @@ -211,7 +211,7 @@ resource "google_gke_hub_feature_membership" "feature_member_2" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_second.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "unstructured" git { @@ -239,7 +239,7 @@ resource "google_gke_hub_feature_membership" "feature_member_3" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_third.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "hierarchy" git { @@ -267,7 +267,7 @@ resource "google_gke_hub_feature_membership" "feature_member_4" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_fourth.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" policy_controller { enabled = true audit_interval_seconds = "100" @@ -304,7 +304,7 @@ resource "google_gke_hub_feature_membership" "feature_member_3" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_third.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" policy_controller { enabled = true audit_interval_seconds = "100" @@ -421,7 +421,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { git { sync_repo = "https://github.com/hashicorp/terraform" @@ -487,7 +487,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { git { sync_repo = "https://github.com/hashicorp/terraform" @@ -559,7 +559,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { git { sync_repo = "https://github.com/hashicorp/terraform" @@ -648,7 +648,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_acmoci.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "unstructured" oci { @@ -697,7 +697,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_acmoci.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" config_sync { source_format = "hierarchy" oci { @@ -746,7 +746,7 @@ resource "google_gke_hub_feature_membership" "feature_member" { feature = google_gke_hub_feature.feature.name membership = google_gke_hub_membership.membership_acmoci.membership_id configmanagement { - version = "1.15.1" + version = "1.18.2" policy_controller { enabled = true audit_interval_seconds = "100" From 48507d229ca7d8d444df782c64efaa0665f95cee Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Tue, 9 Jul 2024 16:33:51 -0500 Subject: [PATCH 288/356] Sync version_6_upgrade.html.markdown (#11128) --- .../website/docs/guides/version_6_upgrade.html.markdown | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown index a32f7fa8fb58..089b4db3abc3 100644 --- a/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown +++ b/mmv1/third_party/terraform/website/docs/guides/version_6_upgrade.html.markdown @@ -113,3 +113,9 @@ Description of the change and how users should adjust their configuration (if ne ### `settings.ip_configuration.require_ssl` is now removed Removed in favor of field `settings.ip_configuration.ssl_mode`. + +## Resource: `google_pubsub_topic` + +### `schema_settings` no longer has a default value + +An empty value means the setting should be cleared. From 347145739efc5eebe07a3006b8246b29525d8384 Mon Sep 17 00:00:00 2001 From: varsharmavs Date: Tue, 9 Jul 2024 21:52:17 +0000 Subject: [PATCH 289/356] promote resource privileged_access_manager_entitlement to ga (#11094) Co-authored-by: Nick Elliot --- .../privilegedaccessmanager/Entitlement.yaml | 2 -- .../privilegedaccessmanager/product.yaml | 5 ++++- ...ivileged_access_manager_entitlement.go.erb | 2 +- ...ed_access_manager_entitlement_basic.tf.erb | 21 +++++++++++++------ ...ged_access_manager_entitlement_test.go.erb | 7 +------ 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/mmv1/products/privilegedaccessmanager/Entitlement.yaml b/mmv1/products/privilegedaccessmanager/Entitlement.yaml index 3baf807ce452..5d8e72387418 100644 --- a/mmv1/products/privilegedaccessmanager/Entitlement.yaml +++ b/mmv1/products/privilegedaccessmanager/Entitlement.yaml @@ -23,12 +23,10 @@ description: | An Entitlement defines the eligibility of a set of users to obtain a predefined access for some time possibly after going through an approval workflow. update_verb: :PATCH update_mask: true -min_version: beta autogen_async: true examples: - !ruby/object:Provider::Terraform::Examples name: "privileged_access_manager_entitlement_basic" - min_version: beta primary_resource_id: "tfentitlement" vars: entitlement_id: "example-entitlement" diff --git a/mmv1/products/privilegedaccessmanager/product.yaml b/mmv1/products/privilegedaccessmanager/product.yaml index 557e754ca62a..c669b2052162 100644 --- a/mmv1/products/privilegedaccessmanager/product.yaml +++ b/mmv1/products/privilegedaccessmanager/product.yaml @@ -14,8 +14,11 @@ --- !ruby/object:Api::Product versions: - !ruby/object:Api::Product::Version - base_url: https://privilegedaccessmanager.googleapis.com/v1beta/ + name: ga + base_url: https://privilegedaccessmanager.googleapis.com/v1/ + - !ruby/object:Api::Product::Version name: beta + base_url: https://privilegedaccessmanager.googleapis.com/v1beta/ name: PrivilegedAccessManager display_name: Privileged Access Manager scopes: diff --git a/mmv1/templates/terraform/constants/privileged_access_manager_entitlement.go.erb b/mmv1/templates/terraform/constants/privileged_access_manager_entitlement.go.erb index 43044924a06e..2b99a467de4e 100644 --- a/mmv1/templates/terraform/constants/privileged_access_manager_entitlement.go.erb +++ b/mmv1/templates/terraform/constants/privileged_access_manager_entitlement.go.erb @@ -20,4 +20,4 @@ func validateEntitlementId(v interface{}, k string) (ws []string, errors []error } return -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/examples/privileged_access_manager_entitlement_basic.tf.erb b/mmv1/templates/terraform/examples/privileged_access_manager_entitlement_basic.tf.erb index abed6ce9d28b..530f1f987f67 100644 --- a/mmv1/templates/terraform/examples/privileged_access_manager_entitlement_basic.tf.erb +++ b/mmv1/templates/terraform/examples/privileged_access_manager_entitlement_basic.tf.erb @@ -1,5 +1,4 @@ resource "google_privileged_access_manager_entitlement" "<%= ctx[:primary_resource_id] %>" { - provider = google-beta entitlement_id = "<%= ctx[:vars]['entitlement_id'] %>" location = "global" max_request_duration = "43200s" @@ -8,7 +7,9 @@ resource "google_privileged_access_manager_entitlement" "<%= ctx[:primary_resour unstructured{} } eligible_users { - principals = ["group:test@google.com"] + principals = [ + "group:test@google.com" + ] } privileged_access{ gcp_iam_access{ @@ -21,17 +22,25 @@ resource "google_privileged_access_manager_entitlement" "<%= ctx[:primary_resour } } additional_notification_targets { - admin_email_recipients = ["user@example.com"] - requester_email_recipients = ["user@example.com"] + admin_email_recipients = [ + "user@example.com", + ] + requester_email_recipients = [ + "user@example.com" + ] } approval_workflow { manual_approvals { require_approver_justification = true steps { approvals_needed = 1 - approver_email_recipients = ["user@example.com"] + approver_email_recipients = [ + "user@example.com" + ] approvers { - principals = ["group:test@google.com"] + principals = [ + "group:test@google.com" + ] } } } diff --git a/mmv1/third_party/terraform/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_test.go.erb b/mmv1/third_party/terraform/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_test.go.erb index ea752221157c..92fdb61dfcc7 100644 --- a/mmv1/third_party/terraform/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_test.go.erb +++ b/mmv1/third_party/terraform/services/privilegedaccessmanager/resource_privileged_access_manager_entitlement_test.go.erb @@ -1,6 +1,5 @@ <% autogen_exception -%> package privilegedaccessmanager_test -<% unless version == 'ga' -%> import ( "testing" @@ -21,7 +20,7 @@ func TestAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlemen acctest.VcrTest(t, resource.TestCase{ PreCheck: func() { acctest.AccTestPreCheck(t) }, - ProtoV5ProviderFactories: acctest.ProtoV5ProviderBetaFactories(t), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), CheckDestroy: testAccCheckPrivilegedAccessManagerEntitlementDestroyProducer(t), Steps: []resource.TestStep{ { @@ -49,7 +48,6 @@ func TestAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlemen func testAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementBasicExample_basic(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_privileged_access_manager_entitlement" "tfentitlement" { - provider = google-beta entitlement_id = "tf-test-example-entitlement%{random_suffix}" location = "global" max_request_duration = "43200s" @@ -93,7 +91,6 @@ resource "google_privileged_access_manager_entitlement" "tfentitlement" { func testAccPrivilegedAccessManagerEntitlement_privilegedAccessManagerEntitlementBasicExample_update(context map[string]interface{}) string { return acctest.Nprintf(` resource "google_privileged_access_manager_entitlement" "tfentitlement" { - provider = google-beta entitlement_id = "tf-test-example-entitlement%{random_suffix}" location = "global" max_request_duration = "4300s" @@ -133,5 +130,3 @@ resource "google_privileged_access_manager_entitlement" "tfentitlement" { } `, context) } - -<% end -%> From b33d6de70b36556872c5d0d870116b057878daf2 Mon Sep 17 00:00:00 2001 From: Chris Hawk Date: Tue, 9 Jul 2024 15:19:23 -0700 Subject: [PATCH 290/356] Add data_source_google_site_verification_token (#10999) --- mmv1/products/siteverification/product.yaml | 21 +++ .../components/inputs/services_beta.kt | 5 + .../components/inputs/services_ga.kt | 5 + .../provider/provider_mmv1_resources.go.erb | 2 + ...a_source_google_site_verification_token.go | 147 ++++++++++++++++++ ...rce_google_site_verification_token_test.go | 103 ++++++++++++ .../d/site_verification_token.html.markdown | 84 ++++++++++ 7 files changed, 367 insertions(+) create mode 100644 mmv1/products/siteverification/product.yaml create mode 100644 mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token.go create mode 100644 mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token_test.go create mode 100644 mmv1/third_party/terraform/website/docs/d/site_verification_token.html.markdown diff --git a/mmv1/products/siteverification/product.yaml b/mmv1/products/siteverification/product.yaml new file mode 100644 index 000000000000..8960dd7f8c31 --- /dev/null +++ b/mmv1/products/siteverification/product.yaml @@ -0,0 +1,21 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +--- !ruby/object:Api::Product +name: SiteVerification +display_name: Site Verification +versions: + - !ruby/object:Api::Product::Version + name: ga + base_url: https://www.googleapis.com/siteVerification/v1/ +scopes: + - https://www.googleapis.com/auth/siteverification diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt index 92b46f197d84..88daf1edbb61 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_beta.kt @@ -661,6 +661,11 @@ var ServicesListBeta = mapOf( "displayName" to "Serviceusage", "path" to "./google-beta/services/serviceusage" ), + "siteverification" to mapOf( + "name" to "siteverification", + "displayName" to "Siteverification", + "path" to "./google-beta/services/siteverification" + ), "sourcerepo" to mapOf( "name" to "sourcerepo", "displayName" to "Sourcerepo", diff --git a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt index c0d118fdc1b6..1b0d2a1d8a64 100644 --- a/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt +++ b/mmv1/third_party/terraform/.teamcity/components/inputs/services_ga.kt @@ -656,6 +656,11 @@ var ServicesListGa = mapOf( "displayName" to "Serviceusage", "path" to "./google/services/serviceusage" ), + "siteverification" to mapOf( + "name" to "siteverification", + "displayName" to "Siteverification", + "path" to "./google/services/siteverification" + ), "sourcerepo" to mapOf( "name" to "sourcerepo", "displayName" to "Sourcerepo", diff --git a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb index 2634edf4edbd..4941b2ea2104 100644 --- a/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb +++ b/mmv1/third_party/terraform/provider/provider_mmv1_resources.go.erb @@ -10,6 +10,7 @@ import ( <% if version == 'ga' -%> "github.com/hashicorp/terraform-provider-google/google/services/composer" + "github.com/hashicorp/terraform-provider-google/google/services/siteverification" <% end -%> "github.com/hashicorp/terraform-provider-google/google/services/container" "github.com/hashicorp/terraform-provider-google/google/services/containeraws" @@ -183,6 +184,7 @@ var handwrittenDatasources = map[string]*schema.Resource{ "google_service_account_id_token": resourcemanager.DataSourceGoogleServiceAccountIdToken(), "google_service_account_jwt": resourcemanager.DataSourceGoogleServiceAccountJwt(), "google_service_account_key": resourcemanager.DataSourceGoogleServiceAccountKey(), + "google_site_verification_token": siteverification.DataSourceSiteVerificationToken(), "google_sourcerepo_repository": sourcerepo.DataSourceGoogleSourceRepoRepository(), "google_spanner_instance": spanner.DataSourceSpannerInstance(), "google_sql_ca_certs": sql.DataSourceGoogleSQLCaCerts(), diff --git a/mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token.go b/mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token.go new file mode 100644 index 000000000000..0e2cd9facf62 --- /dev/null +++ b/mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token.go @@ -0,0 +1,147 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package siteverification + +import ( + "fmt" + "log" + "net/http" + "reflect" + "regexp" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-google/google/tpgresource" + transport_tpg "github.com/hashicorp/terraform-provider-google/google/transport" + "github.com/hashicorp/terraform-provider-google/google/verify" +) + +func DataSourceSiteVerificationToken() *schema.Resource { + return &schema.Resource{ + Read: dataSourceSiteVerificationTokenRead, + + Timeouts: &schema.ResourceTimeout{ + Read: schema.DefaultTimeout(5 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "identifier": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The site identifier. If the type is set to SITE, the identifier is a URL. If the type is +set to INET_DOMAIN, the identifier is a domain name.`, + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"INET_DOMAIN", "SITE"}), + Description: `The type of resource to be verified, either a domain or a web site. Possible values: ["INET_DOMAIN", "SITE"]`, + }, + "verification_method": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidateEnum([]string{"ANALYTICS", "DNS_CNAME", "DNS_TXT", "FILE", "META", "TAG_MANAGER"}), + Description: `The verification method for the Site Verification system to use to verify +this site or domain. Possible values: ["ANALYTICS", "DNS_CNAME", "DNS_TXT", "FILE", "META", "TAG_MANAGER"]`, + }, + "token": { + Type: schema.TypeString, + Computed: true, + Description: `The returned token for use in subsequent verification steps.`, + }, + }, + UseJSONNumber: true, + } +} + +func dataSourceSiteVerificationTokenRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*transport_tpg.Config) + userAgent, err := tpgresource.GenerateUserAgentString(d, config.UserAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + site := make(map[string]interface{}) + typeProp, err := expandSiteVerificationTokenType(d.Get("type"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("type"); !tpgresource.IsEmptyValue(reflect.ValueOf(typeProp)) && (ok || !reflect.DeepEqual(v, typeProp)) { + site["type"] = typeProp + } + identifierProp, err := expandSiteVerificationTokenIdentifier(d.Get("identifier"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("identifier"); !tpgresource.IsEmptyValue(reflect.ValueOf(identifierProp)) && (ok || !reflect.DeepEqual(v, identifierProp)) { + site["identifier"] = identifierProp + } + obj["site"] = site + verification_methodProp, err := expandSiteVerificationTokenVerificationMethod(d.Get("verification_method"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("verification_method"); !tpgresource.IsEmptyValue(reflect.ValueOf(verification_methodProp)) && (ok || !reflect.DeepEqual(v, verification_methodProp)) { + obj["verificationMethod"] = verification_methodProp + } + + url, err := tpgresource.ReplaceVars(d, config, "{{SiteVerificationBasePath}}token") + if err != nil { + return err + } + + log.Printf("[DEBUG] Reading Token: %#v", obj) + billingProject := "" + + if parts := regexp.MustCompile(`projects\/([^\/]+)\/`).FindStringSubmatch(url); parts != nil { + billingProject = parts[1] + } + + // err == nil indicates that the billing_project value was found + if bp, err := tpgresource.GetBillingProject(d, config); err == nil { + billingProject = bp + } + + headers := make(http.Header) + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + Config: config, + Method: "POST", + Project: billingProject, + RawURL: url, + UserAgent: userAgent, + Body: obj, + Timeout: d.Timeout(schema.TimeoutCreate), + Headers: headers, + }) + if err != nil { + return fmt.Errorf("Error reading Token: %s", err) + } + + // Store the ID now + id, err := tpgresource.ReplaceVars(d, config, "{{identifier}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + if token, ok := res["token"].(string); ok { + d.Set("token", token) + } + + log.Printf("[DEBUG] Finished reading Token %q: %#v", d.Id(), res) + + return nil +} + +func expandSiteVerificationTokenType(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSiteVerificationTokenIdentifier(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} + +func expandSiteVerificationTokenVerificationMethod(v interface{}, d tpgresource.TerraformResourceData, config *transport_tpg.Config) (interface{}, error) { + return v, nil +} diff --git a/mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token_test.go b/mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token_test.go new file mode 100644 index 000000000000..96b5bdfb7721 --- /dev/null +++ b/mmv1/third_party/terraform/services/siteverification/data_source_google_site_verification_token_test.go @@ -0,0 +1,103 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 +package siteverification_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-provider-google/google/acctest" +) + +func TestAccSiteVerificationToken_siteverificationTokenSite(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "site": "https://www.example.com", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + Steps: []resource.TestStep{ + { + Config: testAccSiteVerificationToken_siteverificationTokenSite(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.google_site_verification_token.site_meta", "token"), + resource.TestCheckResourceAttr("data.google_site_verification_token.site_meta", "type", "SITE"), + resource.TestCheckResourceAttr("data.google_site_verification_token.site_meta", "identifier", context["site"].(string)), + resource.TestCheckResourceAttr("data.google_site_verification_token.site_meta", "verification_method", "META"), + ), + }, + }, + }) +} + +func testAccSiteVerificationToken_siteverificationTokenSite(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + alias = "scoped" + user_project_override = true + scopes = [ + "https://www.googleapis.com/auth/siteverification", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + ] +} + +data "google_site_verification_token" "site_meta" { + provider = google.scoped + type = "SITE" + identifier = "%{site}" + verification_method = "META" +} +`, context) +} + +func TestAccSiteVerificationToken_siteverificationTokenDomain(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "domain": "www.example.com", + } + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + Steps: []resource.TestStep{ + { + Config: testAccSiteVerificationToken_siteverificationTokenDomain(context), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("data.google_site_verification_token.dns_text", "token"), + resource.TestCheckResourceAttr("data.google_site_verification_token.dns_text", "type", "INET_DOMAIN"), + resource.TestCheckResourceAttr("data.google_site_verification_token.dns_text", "identifier", context["domain"].(string)), + resource.TestCheckResourceAttr("data.google_site_verification_token.dns_text", "verification_method", "DNS_TXT"), + ), + }, + }, + }) +} + +func testAccSiteVerificationToken_siteverificationTokenDomain(context map[string]interface{}) string { + return acctest.Nprintf(` +provider "google" { + alias = "scoped" + user_project_override = true + scopes = [ + "https://www.googleapis.com/auth/siteverification", + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + ] +} + +data "google_site_verification_token" "dns_text" { + provider = google.scoped + type = "INET_DOMAIN" + identifier = "%{domain}" + verification_method = "DNS_TXT" +} +`, context) +} diff --git a/mmv1/third_party/terraform/website/docs/d/site_verification_token.html.markdown b/mmv1/third_party/terraform/website/docs/d/site_verification_token.html.markdown new file mode 100644 index 000000000000..066370eda29d --- /dev/null +++ b/mmv1/third_party/terraform/website/docs/d/site_verification_token.html.markdown @@ -0,0 +1,84 @@ +subcategory: "Site Verification" +description: |- + A verification token is used to demonstrate ownership of a website or domain. +--- + +# google_site_verification_token + +A verification token is used to demonstrate ownership of a website or domain. + + +To get more information about Token, see: + +* [API documentation](https://developers.google.com/site-verification/v1) +* How-to Guides + * [Getting Started](https://developers.google.com/site-verification/v1/getting_started) + + + +## Example Usage - Site Verification via Site META Tag + +```hcl +data "google_site_verification_token" "example" { + type = "SITE" + identifier = "https://www.example.com" + verification_method = "META" +} +``` + +## Example Usage - Site Verification via DNS TXT Record + +```hcl +data "google_site_verification_token" "example" { + type = "INET_DOMAIN" + identifier = "www.example.com" + verification_method = "DNS_TXT" +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `type` - + (Required) + The type of resource to be verified, either a domain or a web site. + Possible values are: `INET_DOMAIN`, `SITE`. + +* `identifier` - + (Required) + The site identifier. If the type is set to SITE, the identifier is a URL. If the type is + set to INET_DOMAIN, the identifier is a domain name. + +* `verification_method` - + (Required) + The verification method for the Site Verification system to use to verify + this site or domain. + Possible values are: `ANALYTICS`, `DNS_CNAME`, `DNS_TXT`, `FILE`, `META`, `TAG_MANAGER`. + + +- - - + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `token` - + The generated token for use in subsequent verification steps. + + +## Timeouts + +This data source provides the following +[Timeouts](https://developer.hashicorp.com/terraform/plugin/sdkv2/resources/retries-and-customizable-timeouts) configuration options: + +- `read` - Default is 5 minutes. + +## User Project Overrides + +This data source supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override). From 14548b08a01f90690a4c804394149d08f714d116 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Wed, 10 Jul 2024 02:16:06 +0100 Subject: [PATCH 291/356] Update SarahFrench PTO (#11127) --- .ci/magician/github/membership.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index c38a935c1360..3255fd85eb96 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -69,8 +69,8 @@ var ( }, { id: "SarahFrench", - startDate: newDate(2024, 5, 17, bstLoc), - endDate: newDate(2024, 5, 19, bstLoc), + startDate: newDate(2024, 7, 10, bstLoc), + endDate: newDate(2024, 7, 28, bstLoc), }, { id: "shuyama1", From ed5ea59cbf5a8cf9ed189a5d24e16b1d595d24da Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Wed, 10 Jul 2024 02:16:47 +0100 Subject: [PATCH 292/356] Make `TestAccCloudRunV2Job_cloudrunv2JobRunJobExample ` test beta-only (#11114) --- mmv1/products/cloudrunv2/Job.yaml | 1 + mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb | 1 + 2 files changed, 2 insertions(+) diff --git a/mmv1/products/cloudrunv2/Job.yaml b/mmv1/products/cloudrunv2/Job.yaml index 024f32e0a788..12fb755344ff 100644 --- a/mmv1/products/cloudrunv2/Job.yaml +++ b/mmv1/products/cloudrunv2/Job.yaml @@ -116,6 +116,7 @@ examples: cloud_run_job_name: 'cloudrun-job' - !ruby/object:Provider::Terraform::Examples name: 'cloudrunv2_job_run_job' + min_version: beta primary_resource_id: 'default' primary_resource_name: "fmt.Sprintf(\"tf-test-cloudrun-job%s\", context[\"random_suffix\"\ ])" diff --git a/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb b/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb index 8197e5833aeb..16725f25fd99 100644 --- a/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb +++ b/mmv1/templates/terraform/examples/cloudrunv2_job_run_job.tf.erb @@ -1,4 +1,5 @@ resource "google_cloud_run_v2_job" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta name = "<%= ctx[:vars]['cloud_run_job_name'] %>" location = "us-central1" start_execution_token = "start-once-created" From 19e0c3ede2b509b767d13ae4eac4635eabbbb056 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Wed, 10 Jul 2024 02:17:08 +0100 Subject: [PATCH 293/356] Make `TestAccDataprocMetastoreService_dataprocMetastoreServiceAutoscalingNoLimitConfigExample` beta-only (#11111) --- mmv1/products/metastore/Service.yaml | 1 + ...dataproc_metastore_service_autoscaling_no_limit_config.tf.erb | 1 + 2 files changed, 2 insertions(+) diff --git a/mmv1/products/metastore/Service.yaml b/mmv1/products/metastore/Service.yaml index 764d61d23cfe..1836c904df2b 100644 --- a/mmv1/products/metastore/Service.yaml +++ b/mmv1/products/metastore/Service.yaml @@ -160,6 +160,7 @@ examples: metastore_service_name: 'test-service' - !ruby/object:Provider::Terraform::Examples name: 'dataproc_metastore_service_autoscaling_no_limit_config' + min_version: beta primary_resource_id: 'test_resource' vars: metastore_service_name: 'test-service' diff --git a/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb index 069df1b6e767..a27bc222d06d 100644 --- a/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb +++ b/mmv1/templates/terraform/examples/dataproc_metastore_service_autoscaling_no_limit_config.tf.erb @@ -1,4 +1,5 @@ resource "google_dataproc_metastore_service" "<%= ctx[:primary_resource_id] %>" { + provider = google-beta service_id = "<%= ctx[:vars]['metastore_service_name'] %>" location = "us-central1" From 1bd899b3cb5a2ec3d7e3128090509d61098b43a3 Mon Sep 17 00:00:00 2001 From: Sebastian Kunze Date: Wed, 10 Jul 2024 18:53:50 +0200 Subject: [PATCH 294/356] Add `member` property to `google_project_service_identity` (#11130) --- .../apigee_environment_patch_update_test.tf.erb | 2 +- .../terraform/examples/apigee_instance_full.tf.erb | 2 +- .../examples/apigee_instance_full_test.tf.erb | 2 +- .../terraform/examples/apigee_nat_address_basic.tf.erb | 2 +- .../examples/apigee_organization_cloud_full.tf.erb | 2 +- ..._organization_cloud_full_disable_vpc_peering.tf.erb | 2 +- ...nization_cloud_full_disable_vpc_peering_test.tf.erb | 2 +- .../apigee_organization_cloud_full_test.tf.erb | 2 +- .../examples/apigee_organization_drz_test.tf.erb | 2 +- .../examples/apigee_organization_retention_test.tf.erb | 2 +- .../terraform/examples/cloudfunctions2_cmek.tf.erb | 2 +- .../examples/cloudfunctions2_cmek_docs.tf.erb | 4 ++-- .../go/apigee_environment_patch_update_test.tf.tmpl | 2 +- .../examples/go/apigee_environment_type_test.tf.tmpl | 2 +- .../terraform/examples/go/apigee_instance_full.tf.tmpl | 2 +- .../examples/go/apigee_instance_full_test.tf.tmpl | 2 +- .../examples/go/apigee_nat_address_basic.tf.tmpl | 2 +- .../examples/go/apigee_organization_cloud_full.tf.tmpl | 2 +- ...organization_cloud_full_disable_vpc_peering.tf.tmpl | 2 +- ...ization_cloud_full_disable_vpc_peering_test.tf.tmpl | 2 +- .../go/apigee_organization_cloud_full_test.tf.tmpl | 2 +- .../examples/go/apigee_organization_drz_test.tf.tmpl | 2 +- .../go/apigee_organization_retention_test.tf.tmpl | 2 +- .../terraform/examples/go/cloudfunctions2_cmek.tf.tmpl | 2 +- .../examples/go/cloudfunctions2_cmek_docs.tf.tmpl | 4 ++-- ...twork_security_tls_inspection_policy_custom.tf.tmpl | 2 +- .../go/privateca_certificate_authority_byo_key.tf.tmpl | 4 ++-- .../terraform/examples/go/sql_instance_cmek.tf.tmpl | 2 +- .../examples/go/sql_instance_iam_condition.tf.tmpl | 2 +- ...etwork_security_tls_inspection_policy_custom.tf.erb | 2 +- .../privateca_certificate_authority_byo_key.tf.erb | 4 ++-- .../terraform/examples/sql_instance_cmek.tf.erb | 2 +- .../examples/sql_instance_iam_condition.tf.erb | 2 +- .../go/resource_apigee_environment_type_test.go.tmpl | 2 +- .../resource_apigee_environment_type_test.go.erb | 2 +- ...network_security_tls_inspection_policy_test.go.tmpl | 6 +++--- ..._network_security_tls_inspection_policy_test.go.erb | 6 +++--- .../go/resource_project_service_identity.go.tmpl | 8 ++++++++ .../go/resource_project_service_identity_test.go.tmpl | 10 ++++++++++ .../resource_project_service_identity.go.erb | 8 ++++++++ .../resource_project_service_identity_test.go.erb | 10 ++++++++++ .../spanner/go/resource_spanner_database_test.go.tmpl | 4 ++-- .../spanner/resource_spanner_database_test.go.erb | 4 ++-- .../docs/r/project_service_identity.html.markdown | 3 ++- 44 files changed, 87 insertions(+), 50 deletions(-) diff --git a/mmv1/templates/terraform/examples/apigee_environment_patch_update_test.tf.erb b/mmv1/templates/terraform/examples/apigee_environment_patch_update_test.tf.erb index 0bd61fe84237..b884c2c7f03f 100644 --- a/mmv1/templates/terraform/examples/apigee_environment_patch_update_test.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_environment_patch_update_test.tf.erb @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/apigee_instance_full.tf.erb b/mmv1/templates/terraform/examples/apigee_instance_full.tf.erb index 26445ef11b85..0caead6e431c 100644 --- a/mmv1/templates/terraform/examples/apigee_instance_full.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_instance_full.tf.erb @@ -42,7 +42,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/apigee_instance_full_test.tf.erb b/mmv1/templates/terraform/examples/apigee_instance_full_test.tf.erb index 8690e4320f25..7b549cc9f036 100644 --- a/mmv1/templates/terraform/examples/apigee_instance_full_test.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_instance_full_test.tf.erb @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/apigee_nat_address_basic.tf.erb b/mmv1/templates/terraform/examples/apigee_nat_address_basic.tf.erb index cbcbc32c10dc..5cae72d63b9d 100644 --- a/mmv1/templates/terraform/examples/apigee_nat_address_basic.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_nat_address_basic.tf.erb @@ -42,7 +42,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/apigee_organization_cloud_full.tf.erb b/mmv1/templates/terraform/examples/apigee_organization_cloud_full.tf.erb index 6d37ffa69dbc..459295a4fa34 100644 --- a/mmv1/templates/terraform/examples/apigee_organization_cloud_full.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_organization_cloud_full.tf.erb @@ -42,7 +42,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "org" { diff --git a/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering.tf.erb b/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering.tf.erb index 42f797ce28c4..f05b6dd2cce2 100644 --- a/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering.tf.erb @@ -24,7 +24,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "org" { diff --git a/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering_test.tf.erb b/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering_test.tf.erb index f39d3fd4e9ef..a6f70393fc76 100644 --- a/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering_test.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_organization_cloud_full_disable_vpc_peering_test.tf.erb @@ -57,7 +57,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "<%= ctx[:primary_resource_id] %>" { diff --git a/mmv1/templates/terraform/examples/apigee_organization_cloud_full_test.tf.erb b/mmv1/templates/terraform/examples/apigee_organization_cloud_full_test.tf.erb index 5a859ec6b7e6..f5ae4774fcb7 100644 --- a/mmv1/templates/terraform/examples/apigee_organization_cloud_full_test.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_organization_cloud_full_test.tf.erb @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "<%= ctx[:primary_resource_id] %>" { diff --git a/mmv1/templates/terraform/examples/apigee_organization_drz_test.tf.erb b/mmv1/templates/terraform/examples/apigee_organization_drz_test.tf.erb index a7c15b4fc072..4189c4be072e 100644 --- a/mmv1/templates/terraform/examples/apigee_organization_drz_test.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_organization_drz_test.tf.erb @@ -96,7 +96,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "<%= ctx[:primary_resource_id] %>" { diff --git a/mmv1/templates/terraform/examples/apigee_organization_retention_test.tf.erb b/mmv1/templates/terraform/examples/apigee_organization_retention_test.tf.erb index 0de08ec9eb4b..e9e569222b51 100644 --- a/mmv1/templates/terraform/examples/apigee_organization_retention_test.tf.erb +++ b/mmv1/templates/terraform/examples/apigee_organization_retention_test.tf.erb @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "<%= ctx[:primary_resource_id] %>" { diff --git a/mmv1/templates/terraform/examples/cloudfunctions2_cmek.tf.erb b/mmv1/templates/terraform/examples/cloudfunctions2_cmek.tf.erb index 0db9b17ad3de..5b9823300b59 100644 --- a/mmv1/templates/terraform/examples/cloudfunctions2_cmek.tf.erb +++ b/mmv1/templates/terraform/examples/cloudfunctions2_cmek.tf.erb @@ -88,7 +88,7 @@ resource "google_kms_crypto_key_iam_member" "gcf_cmek_keyuser_5" { crypto_key_id = "<%= ctx[:vars]['kms_key_name'] %>" role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.ea_sa.email}" + member = google_project_service_identity.ea_sa.member } resource "google_artifact_registry_repository" "encoded-ar-repo" { diff --git a/mmv1/templates/terraform/examples/cloudfunctions2_cmek_docs.tf.erb b/mmv1/templates/terraform/examples/cloudfunctions2_cmek_docs.tf.erb index 8d719665a418..ce52ea01409e 100644 --- a/mmv1/templates/terraform/examples/cloudfunctions2_cmek_docs.tf.erb +++ b/mmv1/templates/terraform/examples/cloudfunctions2_cmek_docs.tf.erb @@ -59,7 +59,7 @@ resource "google_kms_crypto_key_iam_binding" "gcf_cmek_keyuser" { "serviceAccount:service-${data.google_project.project.number}@gcp-sa-artifactregistry.iam.gserviceaccount.com", "serviceAccount:service-${data.google_project.project.number}@gs-project-accounts.iam.gserviceaccount.com", "serviceAccount:service-${data.google_project.project.number}@serverless-robot-prod.iam.gserviceaccount.com", - "serviceAccount:${google_project_service_identity.ea_sa.email}", + google_project_service_identity.ea_sa.member, ] depends_on = [ @@ -110,4 +110,4 @@ resource "google_cloudfunctions2_function" "<%= ctx[:primary_resource_id] %>" { google_kms_crypto_key_iam_binding.gcf_cmek_keyuser ] -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_patch_update_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_patch_update_test.tf.tmpl index bfecee94559b..62f000185328 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_patch_update_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_patch_update_test.tf.tmpl @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_environment_type_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_environment_type_test.tf.tmpl index e4fe8512cb35..9bff358f70f0 100644 --- a/mmv1/templates/terraform/examples/go/apigee_environment_type_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_environment_type_test.tf.tmpl @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_instance_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_instance_full.tf.tmpl index d0e67fcbbf04..068787dd7c7f 100644 --- a/mmv1/templates/terraform/examples/go/apigee_instance_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_instance_full.tf.tmpl @@ -42,7 +42,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_instance_full_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_instance_full_test.tf.tmpl index 1fc16d7634a4..0a2b9b0ce1f9 100644 --- a/mmv1/templates/terraform/examples/go/apigee_instance_full_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_instance_full_test.tf.tmpl @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_nat_address_basic.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_nat_address_basic.tf.tmpl index d1c08ba6006e..c9a6f727ae24 100644 --- a/mmv1/templates/terraform/examples/go/apigee_nat_address_basic.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_nat_address_basic.tf.tmpl @@ -42,7 +42,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full.tf.tmpl index 6d37ffa69dbc..459295a4fa34 100644 --- a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full.tf.tmpl @@ -42,7 +42,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering.tf.tmpl index 42f797ce28c4..f05b6dd2cce2 100644 --- a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering.tf.tmpl @@ -24,7 +24,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "org" { diff --git a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering_test.tf.tmpl index 278c882d4bb9..de620539eef3 100644 --- a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_disable_vpc_peering_test.tf.tmpl @@ -57,7 +57,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_test.tf.tmpl index ef1ad76b5f4d..70ef75f38fca 100644 --- a/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_organization_cloud_full_test.tf.tmpl @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/apigee_organization_drz_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_organization_drz_test.tf.tmpl index 89ddacc44e5a..280e00517261 100644 --- a/mmv1/templates/terraform/examples/go/apigee_organization_drz_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_organization_drz_test.tf.tmpl @@ -96,7 +96,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/apigee_organization_retention_test.tf.tmpl b/mmv1/templates/terraform/examples/go/apigee_organization_retention_test.tf.tmpl index 77dd0c21e33f..482896c22f35 100644 --- a/mmv1/templates/terraform/examples/go/apigee_organization_retention_test.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/apigee_organization_retention_test.tf.tmpl @@ -92,7 +92,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek.tf.tmpl index 1560ed169b8e..1b49f007b727 100644 --- a/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek.tf.tmpl @@ -88,7 +88,7 @@ resource "google_kms_crypto_key_iam_member" "gcf_cmek_keyuser_5" { crypto_key_id = "{{index $.Vars "kms_key_name"}}" role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.ea_sa.email}" + member = google_project_service_identity.ea_sa.member } resource "google_artifact_registry_repository" "encoded-ar-repo" { diff --git a/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek_docs.tf.tmpl b/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek_docs.tf.tmpl index 3adcbd8ddd6c..4186413d2d8a 100644 --- a/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek_docs.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/cloudfunctions2_cmek_docs.tf.tmpl @@ -59,7 +59,7 @@ resource "google_kms_crypto_key_iam_binding" "gcf_cmek_keyuser" { "serviceAccount:service-${data.google_project.project.number}@gcp-sa-artifactregistry.iam.gserviceaccount.com", "serviceAccount:service-${data.google_project.project.number}@gs-project-accounts.iam.gserviceaccount.com", "serviceAccount:service-${data.google_project.project.number}@serverless-robot-prod.iam.gserviceaccount.com", - "serviceAccount:${google_project_service_identity.ea_sa.email}", + google_project_service_identity.ea_sa.member, ] depends_on = [ @@ -110,4 +110,4 @@ resource "google_cloudfunctions2_function" "{{$.PrimaryResourceId}}" { google_kms_crypto_key_iam_binding.gcf_cmek_keyuser ] -} \ No newline at end of file +} diff --git a/mmv1/templates/terraform/examples/go/network_security_tls_inspection_policy_custom.tf.tmpl b/mmv1/templates/terraform/examples/go/network_security_tls_inspection_policy_custom.tf.tmpl index 6eba9b741df1..9a7bd4c8a0e6 100644 --- a/mmv1/templates/terraform/examples/go/network_security_tls_inspection_policy_custom.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/network_security_tls_inspection_policy_custom.tf.tmpl @@ -75,7 +75,7 @@ resource "google_privateca_ca_pool_iam_member" "default" { provider = google-beta ca_pool = google_privateca_ca_pool.default.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.ns_sa.email}" + member = google_project_service_identity.ns_sa.member } resource "google_certificate_manager_trust_config" "default" { diff --git a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl index 3179b700af9f..5007897394c7 100644 --- a/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/privateca_certificate_authority_byo_key.tf.tmpl @@ -6,13 +6,13 @@ resource "google_kms_crypto_key_iam_member" "privateca_sa_keyuser_signerverifier crypto_key_id = "{{index $.Vars "kms_key_name"}}" role = "roles/cloudkms.signerVerifier" - member = "serviceAccount:${google_project_service_identity.privateca_sa.email}" + member = google_project_service_identity.privateca_sa.member } resource "google_kms_crypto_key_iam_member" "privateca_sa_keyuser_viewer" { crypto_key_id = "{{index $.Vars "kms_key_name"}}" role = "roles/viewer" - member = "serviceAccount:${google_project_service_identity.privateca_sa.email}" + member = google_project_service_identity.privateca_sa.member } resource "google_privateca_certificate_authority" "{{$.PrimaryResourceId}}" { diff --git a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl index 254ce6225bb3..29e4ab2e606a 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_cmek.tf.tmpl @@ -21,7 +21,7 @@ resource "google_kms_crypto_key_iam_member" "crypto_key" { crypto_key_id = google_kms_crypto_key.key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}" + member = google_project_service_identity.gcp_sa_cloud_sql.member } resource "google_sql_database_instance" "mysql_instance_with_cmek" { diff --git a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl index b95a271bf5e6..98fc75ef44ac 100644 --- a/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl +++ b/mmv1/templates/terraform/examples/go/sql_instance_iam_condition.tf.tmpl @@ -10,7 +10,7 @@ data "google_iam_policy" "sql_iam_policy" { binding { role = "roles/cloudsql.client" members = [ - "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}", + google_project_service_identity.gcp_sa_cloud_sql.member, ] condition { expression = "resource.name == 'google_sql_database_instance.default.id' && resource.service == 'sqladmin.googleapis.com'" diff --git a/mmv1/templates/terraform/examples/network_security_tls_inspection_policy_custom.tf.erb b/mmv1/templates/terraform/examples/network_security_tls_inspection_policy_custom.tf.erb index 903241efbf86..1d1f785230bc 100644 --- a/mmv1/templates/terraform/examples/network_security_tls_inspection_policy_custom.tf.erb +++ b/mmv1/templates/terraform/examples/network_security_tls_inspection_policy_custom.tf.erb @@ -75,7 +75,7 @@ resource "google_privateca_ca_pool_iam_member" "default" { provider = google-beta ca_pool = google_privateca_ca_pool.default.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.ns_sa.email}" + member = google_project_service_identity.ns_sa.member } resource "google_certificate_manager_trust_config" "default" { diff --git a/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb b/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb index c91014c663ed..765d92780bcc 100644 --- a/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb +++ b/mmv1/templates/terraform/examples/privateca_certificate_authority_byo_key.tf.erb @@ -6,13 +6,13 @@ resource "google_kms_crypto_key_iam_member" "privateca_sa_keyuser_signerverifier crypto_key_id = "<%= ctx[:vars]['kms_key_name'] %>" role = "roles/cloudkms.signerVerifier" - member = "serviceAccount:${google_project_service_identity.privateca_sa.email}" + member = google_project_service_identity.privateca_sa.member } resource "google_kms_crypto_key_iam_member" "privateca_sa_keyuser_viewer" { crypto_key_id = "<%= ctx[:vars]['kms_key_name'] %>" role = "roles/viewer" - member = "serviceAccount:${google_project_service_identity.privateca_sa.email}" + member = google_project_service_identity.privateca_sa.member } resource "google_privateca_certificate_authority" "<%= ctx[:primary_resource_id] %>" { diff --git a/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb b/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb index 83236aa87fea..a11d4cba34da 100644 --- a/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_cmek.tf.erb @@ -21,7 +21,7 @@ resource "google_kms_crypto_key_iam_member" "crypto_key" { crypto_key_id = google_kms_crypto_key.key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}" + member = google_project_service_identity.gcp_sa_cloud_sql.member } resource "google_sql_database_instance" "mysql_instance_with_cmek" { diff --git a/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb b/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb index dba3989e5014..0165439da3c5 100644 --- a/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb +++ b/mmv1/templates/terraform/examples/sql_instance_iam_condition.tf.erb @@ -10,7 +10,7 @@ data "google_iam_policy" "sql_iam_policy" { binding { role = "roles/cloudsql.client" members = [ - "serviceAccount:${google_project_service_identity.gcp_sa_cloud_sql.email}", + google_project_service_identity.gcp_sa_cloud_sql.member, ] condition { expression = "resource.name == 'google_sql_database_instance.default.id' && resource.service == 'sqladmin.googleapis.com'" diff --git a/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl b/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl index f28376428e1e..b284e49aee61 100644 --- a/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl +++ b/mmv1/third_party/terraform/services/apigee/go/resource_apigee_environment_type_test.go.tmpl @@ -142,7 +142,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/third_party/terraform/services/apigee/resource_apigee_environment_type_test.go.erb b/mmv1/third_party/terraform/services/apigee/resource_apigee_environment_type_test.go.erb index 676d1465df77..103b51f86bfe 100644 --- a/mmv1/third_party/terraform/services/apigee/resource_apigee_environment_type_test.go.erb +++ b/mmv1/third_party/terraform/services/apigee/resource_apigee_environment_type_test.go.erb @@ -143,7 +143,7 @@ resource "google_kms_crypto_key_iam_member" "apigee_sa_keyuser" { crypto_key_id = google_kms_crypto_key.apigee_key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.apigee_sa.email}" + member = google_project_service_identity.apigee_sa.member } resource "google_apigee_organization" "apigee_org" { diff --git a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl index 51820210982d..c6224fa2d480 100644 --- a/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl +++ b/mmv1/third_party/terraform/services/networksecurity/go/resource_network_security_tls_inspection_policy_test.go.tmpl @@ -119,7 +119,7 @@ resource "google_privateca_ca_pool_iam_member" "default" { provider = google-beta ca_pool = google_privateca_ca_pool.default.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.default.email}" + member = google_project_service_identity.default.member } resource "google_certificate_manager_trust_config" "default" { @@ -317,14 +317,14 @@ resource "google_privateca_ca_pool_iam_member" "default" { provider = google-beta ca_pool = google_privateca_ca_pool.default.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.default.email}" + member = google_project_service_identity.default.member } resource "google_privateca_ca_pool_iam_member" "default_updated" { provider = google-beta ca_pool = google_privateca_ca_pool.default_updated.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.default.email}" + member = google_project_service_identity.default.member } resource "google_certificate_manager_trust_config" "default" { diff --git a/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb b/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb index f59072ffa828..996e26d2beba 100644 --- a/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb +++ b/mmv1/third_party/terraform/services/networksecurity/resource_network_security_tls_inspection_policy_test.go.erb @@ -120,7 +120,7 @@ resource "google_privateca_ca_pool_iam_member" "default" { provider = google-beta ca_pool = google_privateca_ca_pool.default.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.default.email}" + member = google_project_service_identity.default.member } resource "google_certificate_manager_trust_config" "default" { @@ -318,14 +318,14 @@ resource "google_privateca_ca_pool_iam_member" "default" { provider = google-beta ca_pool = google_privateca_ca_pool.default.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.default.email}" + member = google_project_service_identity.default.member } resource "google_privateca_ca_pool_iam_member" "default_updated" { provider = google-beta ca_pool = google_privateca_ca_pool.default_updated.id role = "roles/privateca.certificateManager" - member = "serviceAccount:${google_project_service_identity.default.email}" + member = google_project_service_identity.default.member } resource "google_certificate_manager_trust_config" "default" { diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl index e2dfd8624d81..d96fdee6682e 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity.go.tmpl @@ -46,6 +46,11 @@ func ResourceProjectServiceIdentity() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "member": { + Type: schema.TypeString, + Computed: true, + Description: `The Identity of the Google managed service account in the form 'serviceAccount:{email}'. This value is often used to refer to the service account in order to grant IAM permissions.`, + }, }, UseJSONNumber: true, } @@ -113,6 +118,9 @@ func resourceProjectServiceIdentityCreate(d *schema.ResourceData, meta interface if err := d.Set("email", email); err != nil { return fmt.Errorf("Error setting email: %s", err) } + if err := d.Set("member", "serviceAccount:"+email); err != nil { + return fmt.Errorf("Error setting member: %s", err) + } } return nil } diff --git a/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl index c759bc47aee2..5b99a60ffd9f 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl +++ b/mmv1/third_party/terraform/services/resourcemanager/go/resource_project_service_identity_test.go.tmpl @@ -27,9 +27,19 @@ func TestAccProjectServiceIdentity_basic(t *testing.T) { } return fmt.Errorf("hc_sa service identity email value was %s, expected a valid email", value) }), + // Member field for healthcare service account should be non-empty, start with "serviceAccount:" and contain at least an "@". + resource.TestCheckResourceAttrWith("google_project_service_identity.hc_sa", "member", func(value string) error { + if strings.HasPrefix(value, "serviceAccount:") && strings.Contains(value, "@") { + return nil + } + return fmt.Errorf("hc_sa service identity member value was %s, expected a valid email with prefix serviceAccount:", value) + }), // Email field for logging service identity will be empty for as long as // `gcloud beta services identity create --service=logging.googleapis.com` doesn't return an email address resource.TestCheckNoResourceAttr("google_project_service_identity.log_sa", "email"), + // Member field for logging service identity will be empty for as long as + // `gcloud beta services identity create --service=logging.googleapis.com` doesn't return an email address + resource.TestCheckNoResourceAttr("google_project_service_identity.log_sa", "member"), ), }, }, diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity.go.erb b/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity.go.erb index 351fa637401c..ba4e090ef9d9 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity.go.erb +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity.go.erb @@ -47,6 +47,11 @@ func ResourceProjectServiceIdentity() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "member": { + Type: schema.TypeString, + Computed: true, + Description: `The Identity of the Google managed service account in the form 'serviceAccount:{email}'. This value is often used to refer to the service account in order to grant IAM permissions.`, + }, }, UseJSONNumber: true, } @@ -114,6 +119,9 @@ func resourceProjectServiceIdentityCreate(d *schema.ResourceData, meta interface if err := d.Set("email", email); err != nil { return fmt.Errorf("Error setting email: %s", err) } + if err := d.Set("member", "serviceAccount:"+email); err != nil { + return fmt.Errorf("Error setting member: %s", err) + } } return nil } diff --git a/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity_test.go.erb b/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity_test.go.erb index 2e2308058981..8fb16c27eb5e 100644 --- a/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity_test.go.erb +++ b/mmv1/third_party/terraform/services/resourcemanager/resource_project_service_identity_test.go.erb @@ -28,9 +28,19 @@ func TestAccProjectServiceIdentity_basic(t *testing.T) { } return fmt.Errorf("hc_sa service identity email value was %s, expected a valid email", value) }), + // Member field for healthcare service account should be non-empty, start with "serviceAccount:" and contain at least an "@". + resource.TestCheckResourceAttrWith("google_project_service_identity.hc_sa", "member", func(value string) error { + if strings.HasPrefix(value, "serviceAccount:") && strings.Contains(value, "@") { + return nil + } + return fmt.Errorf("hc_sa service identity member value was %s, expected a valid email with prefix serviceAccount:", value) + }), // Email field for logging service identity will be empty for as long as // `gcloud beta services identity create --service=logging.googleapis.com` doesn't return an email address resource.TestCheckNoResourceAttr("google_project_service_identity.log_sa", "email"), + // Member field for logging service identity will be empty for as long as + // `gcloud beta services identity create --service=logging.googleapis.com` doesn't return an email address + resource.TestCheckNoResourceAttr("google_project_service_identity.log_sa", "member"), ), }, }, diff --git a/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl b/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl index 7fc2cecbf66e..083688b518b1 100644 --- a/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl +++ b/mmv1/third_party/terraform/services/spanner/go/resource_spanner_database_test.go.tmpl @@ -590,7 +590,7 @@ resource "google_kms_crypto_key_iam_member" "crypto-key-binding" { crypto_key_id = google_kms_crypto_key.example-key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.ck_sa.email}" + member = google_project_service_identity.ck_sa.member } data "google_project" "project" { @@ -605,4 +605,4 @@ resource "google_project_service_identity" "ck_sa" { `, context) } -{{- end }} \ No newline at end of file +{{- end }} diff --git a/mmv1/third_party/terraform/services/spanner/resource_spanner_database_test.go.erb b/mmv1/third_party/terraform/services/spanner/resource_spanner_database_test.go.erb index 75f625c37217..6043d461b8c2 100644 --- a/mmv1/third_party/terraform/services/spanner/resource_spanner_database_test.go.erb +++ b/mmv1/third_party/terraform/services/spanner/resource_spanner_database_test.go.erb @@ -591,7 +591,7 @@ resource "google_kms_crypto_key_iam_member" "crypto-key-binding" { crypto_key_id = google_kms_crypto_key.example-key.id role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" - member = "serviceAccount:${google_project_service_identity.ck_sa.email}" + member = google_project_service_identity.ck_sa.member } data "google_project" "project" { @@ -606,4 +606,4 @@ resource "google_project_service_identity" "ck_sa" { `, context) } -<% end -%> \ No newline at end of file +<% end -%> diff --git a/mmv1/third_party/terraform/website/docs/r/project_service_identity.html.markdown b/mmv1/third_party/terraform/website/docs/r/project_service_identity.html.markdown index 327090b410cc..fe9f8b04aebc 100644 --- a/mmv1/third_party/terraform/website/docs/r/project_service_identity.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/project_service_identity.html.markdown @@ -37,7 +37,7 @@ resource "google_project_service_identity" "hc_sa" { resource "google_project_iam_member" "hc_sa_bq_jobuser" { project = data.google_project.project.project_id role = "roles/bigquery.jobUser" - member = "serviceAccount:${google_project_service_identity.hc_sa.email}" + member = google_project_service_identity.hc_sa.member } ``` @@ -59,6 +59,7 @@ The following arguments are supported: In addition to the arguments listed above, the following computed attributes are exported: * `email` - The email address of the Google managed service account. +* `member` - The Identity of the Google managed service account in the form 'serviceAccount:{email}'. This value is often used to refer to the service account in order to grant IAM permissions. ## Import From b8a463edcd5a423613ca5aa1adf95f59095c2d73 Mon Sep 17 00:00:00 2001 From: Swamita Gupta <55314843+swamitagupta@users.noreply.github.com> Date: Wed, 10 Jul 2024 23:12:05 +0530 Subject: [PATCH 295/356] Support Delayed Deletion for Vmwareengine Private Cloud (#10764) --- mmv1/products/vmwareengine/PrivateCloud.yaml | 17 ++- .../vmwareengine_private_cloud.go.erb | 33 +++++ .../vmwareengine_private_cloud_type.go.erb | 6 - .../vmware_engine_private_cloud_basic.tf.erb | 1 - .../vmware_engine_private_cloud_full.tf.erb | 3 +- .../post_delete/private_cloud.go.erb | 7 + .../vmwareengine_private_cloud.go.erb | 8 ++ ..._vmwareengine_external_access_rule_test.go | 2 + ...urce_vmwareengine_external_address_test.go | 2 + ...esource_vmwareengine_private_cloud_test.go | 129 +++++++++++------- .../resource_vmwareengine_subnet_test.go | 2 + 11 files changed, 149 insertions(+), 61 deletions(-) create mode 100644 mmv1/templates/terraform/constants/vmwareengine_private_cloud.go.erb delete mode 100644 mmv1/templates/terraform/constants/vmwareengine_private_cloud_type.go.erb create mode 100644 mmv1/templates/terraform/pre_delete/vmwareengine_private_cloud.go.erb diff --git a/mmv1/products/vmwareengine/PrivateCloud.yaml b/mmv1/products/vmwareengine/PrivateCloud.yaml index 3d7e3db1381f..3910ae34e454 100644 --- a/mmv1/products/vmwareengine/PrivateCloud.yaml +++ b/mmv1/products/vmwareengine/PrivateCloud.yaml @@ -15,7 +15,7 @@ name: 'PrivateCloud' base_url: 'projects/{{project}}/locations/{{location}}/privateClouds' self_link: 'projects/{{project}}/locations/{{location}}/privateClouds/{{name}}' -delete_url: 'projects/{{project}}/locations/{{location}}/privateClouds/{{name}}?delay_hours=0' +delete_url: 'projects/{{project}}/locations/{{location}}/privateClouds/{{name}}' create_url: 'projects/{{project}}/locations/{{location}}/privateClouds?privateCloudId={{name}}' update_verb: :PATCH references: !ruby/object:Api::Resource::ReferenceLinks @@ -45,11 +45,12 @@ async: !ruby/object:Api::OpAsync import_format: ["projects/{{project}}/locations/{{location}}/privateClouds/{{name}}"] autogen_async: true custom_code: !ruby/object:Provider::Terraform::CustomCode - post_delete: "templates/terraform/post_delete/private_cloud.go.erb" + constants: templates/terraform/constants/vmwareengine_private_cloud.go.erb decoder: "templates/terraform/decoders/private_cloud.go.erb" + pre_delete: templates/terraform/pre_delete/vmwareengine_private_cloud.go.erb + post_delete: "templates/terraform/post_delete/private_cloud.go.erb" post_update: "templates/terraform/post_update/private_cloud.go.erb" update_encoder: "templates/terraform/update_encoder/private_cloud.go.erb" - constants: templates/terraform/constants/vmwareengine_private_cloud_type.go.erb examples: - !ruby/object:Provider::Terraform::Examples name: "vmware_engine_private_cloud_basic" @@ -90,6 +91,16 @@ parameters: description: | The ID of the PrivateCloud. +virtual_fields: + - !ruby/object:Api::Type::Integer + name: "deletion_delay_hours" + description: | + The number of hours to delay this request. You can set this value to an hour between 0 to 8, where setting it to 0 starts the deletion request immediately. If no value is set, a default value is set at the API Level. + - !ruby/object:Api::Type::Boolean + name: "send_deletion_delay_hours_if_zero" + description: | + While set true, deletion_delay_hours value will be sent in the request even for zero value of the field. This field is only useful for setting 0 value to the deletion_delay_hours field. It can be used both alone and together with deletion_delay_hours. + properties: - !ruby/object:Api::Type::String name: 'description' diff --git a/mmv1/templates/terraform/constants/vmwareengine_private_cloud.go.erb b/mmv1/templates/terraform/constants/vmwareengine_private_cloud.go.erb new file mode 100644 index 000000000000..b2941d68a57c --- /dev/null +++ b/mmv1/templates/terraform/constants/vmwareengine_private_cloud.go.erb @@ -0,0 +1,33 @@ +func vmwareenginePrivateCloudStandardTypeDiffSuppressFunc(_, old, new string, d *schema.ResourceData) bool { + if (old == "STANDARD" && new == "") || (old == "" && new == "STANDARD") { + return true + } + if (isMultiNodePrivateCloud(d) && old == "TIME_LIMITED" && new == "STANDARD") { + log.Printf("[DEBUG] Multinode Private Cloud found, facilitating TYPE change to STANDARD") + return true + } + return false +} + +func isMultiNodePrivateCloud(d *schema.ResourceData) bool { + nodeConfigMap := d.Get("management_cluster.0.node_type_configs").(*schema.Set).List() + totalNodeCount := 0 + for _, nodeConfig := range nodeConfigMap { + configMap, ok := nodeConfig.(map[string]interface{}) + if !ok { + log.Printf("[DEBUG] Invalid node configuration format for private cloud.") + continue + } + nodeCount, ok := configMap["node_count"].(int) + if !ok { + log.Printf("[DEBUG] Invalid node_count format for private cloud.") + continue + } + totalNodeCount += nodeCount + } + log.Printf("[DEBUG] The node count of the private cloud is found to be %v nodes.", totalNodeCount) + if totalNodeCount > 2 { + return true + } + return false +} diff --git a/mmv1/templates/terraform/constants/vmwareengine_private_cloud_type.go.erb b/mmv1/templates/terraform/constants/vmwareengine_private_cloud_type.go.erb deleted file mode 100644 index 7ad51797adc9..000000000000 --- a/mmv1/templates/terraform/constants/vmwareengine_private_cloud_type.go.erb +++ /dev/null @@ -1,6 +0,0 @@ -func vmwareenginePrivateCloudStandardTypeDiffSuppressFunc(_, old, new string, _ *schema.ResourceData) bool { - if (old == "STANDARD" && new == "") || (old == "" && new == "STANDARD") { - return true - } - return false -} \ No newline at end of file diff --git a/mmv1/templates/terraform/examples/vmware_engine_private_cloud_basic.tf.erb b/mmv1/templates/terraform/examples/vmware_engine_private_cloud_basic.tf.erb index 03e7508c7bb3..697f1d713f2d 100644 --- a/mmv1/templates/terraform/examples/vmware_engine_private_cloud_basic.tf.erb +++ b/mmv1/templates/terraform/examples/vmware_engine_private_cloud_basic.tf.erb @@ -6,7 +6,6 @@ resource "google_vmwareengine_private_cloud" "<%= ctx[:primary_resource_id] %>" management_cidr = "192.168.30.0/24" vmware_engine_network = google_vmwareengine_network.pc-nw.id } - management_cluster { cluster_id = "<%= ctx[:vars]['management_cluster_id'] %>" node_type_configs { diff --git a/mmv1/templates/terraform/examples/vmware_engine_private_cloud_full.tf.erb b/mmv1/templates/terraform/examples/vmware_engine_private_cloud_full.tf.erb index ee12d6bfdd24..20ada7f3f902 100644 --- a/mmv1/templates/terraform/examples/vmware_engine_private_cloud_full.tf.erb +++ b/mmv1/templates/terraform/examples/vmware_engine_private_cloud_full.tf.erb @@ -7,7 +7,6 @@ resource "google_vmwareengine_private_cloud" "<%= ctx[:primary_resource_id] %>" management_cidr = "192.168.30.0/24" vmware_engine_network = google_vmwareengine_network.pc-nw.id } - management_cluster { cluster_id = "<%= ctx[:vars]['management_cluster_id'] %>" node_type_configs { @@ -16,6 +15,8 @@ resource "google_vmwareengine_private_cloud" "<%= ctx[:primary_resource_id] %>" custom_core_count = 32 } } + deletion_delay_hours = 0 + send_deletion_delay_hours_if_zero = true } resource "google_vmwareengine_network" "pc-nw" { diff --git a/mmv1/templates/terraform/post_delete/private_cloud.go.erb b/mmv1/templates/terraform/post_delete/private_cloud.go.erb index 669377a2f9ab..b7bad0c11c37 100644 --- a/mmv1/templates/terraform/post_delete/private_cloud.go.erb +++ b/mmv1/templates/terraform/post_delete/private_cloud.go.erb @@ -29,6 +29,13 @@ privateCloudPollRead := func(d *schema.ResourceData, meta interface{}) transport if err != nil { return res, err } + // if resource exists but is marked for deletion + log.Printf("[DEBUG] Fetching state of the private cloud.") + v, ok := res["state"] + if ok && v.(string) == "DELETED" { + log.Printf("[DEBUG] The Private cloud has been successfully marked for delayed deletion.") + return nil, nil + } return res, nil } } diff --git a/mmv1/templates/terraform/pre_delete/vmwareengine_private_cloud.go.erb b/mmv1/templates/terraform/pre_delete/vmwareengine_private_cloud.go.erb new file mode 100644 index 000000000000..459238f69f00 --- /dev/null +++ b/mmv1/templates/terraform/pre_delete/vmwareengine_private_cloud.go.erb @@ -0,0 +1,8 @@ +// Delay deletion of the Private Cloud if delationDelayHours value is set +delationDelayHours := d.Get("deletion_delay_hours").(int) +if delationDelayHours > 0 || (delationDelayHours == 0 && d.Get("send_deletion_delay_hours_if_zero").(bool) == true) { + log.Printf("[DEBUG] Triggering delete of the Private Cloud with a delay of %v hours.\n", delationDelayHours) + url = url + "?delay_hours=" + fmt.Sprintf("%v", delationDelayHours) +} else { + log.Printf("[DEBUG] No deletion delay provided, triggering DELETE API without setting delay hours.\n") +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_access_rule_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_access_rule_test.go index a7af23b2a88c..f28b2970c404 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_access_rule_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_access_rule_test.go @@ -9,6 +9,8 @@ import ( ) func TestAccVmwareengineExternalAccessRule_vmwareEngineExternalAccessRuleUpdate(t *testing.T) { + // Temporarily skipping so that this test does not run and consume resources during PR pushes. It is bound to fail and is being fixed by PR #10992 + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go index 3d81180a0a93..0a7681e81649 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_external_address_test.go @@ -14,6 +14,8 @@ import ( ) func TestAccVmwareengineExternalAddress_vmwareEngineExternalAddressUpdate(t *testing.T) { + // Temporarily skipping so that this test does not run and consume resources during PR pushes. It is bound to fail and is being fixed by PR #10992 + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go index 162cad8d35c5..4423b9106d93 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_private_cloud_test.go @@ -18,7 +18,7 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T t.Parallel() context := map[string]interface{}{ - "region": "southamerica-west1", + "region": "me-west1", "random_suffix": acctest.RandString(t, 10), "org_id": envvar.GetTestOrgFromEnv(t), "billing_account": envvar.GetTestBillingAccountFromEnv(t), @@ -33,82 +33,66 @@ func TestAccVmwareenginePrivateCloud_vmwareEnginePrivateCloudUpdate(t *testing.T CheckDestroy: testAccCheckVmwareenginePrivateCloudDestroyProducer(t), Steps: []resource.TestStep{ { - Config: testPrivateCloudUpdateConfig(context, "description1", 1), + Config: testPrivateCloudCreateConfig(context), Check: resource.ComposeTestCheckFunc( - acctest.CheckDataSourceStateMatchesResourceStateWithIgnores("data.google_vmwareengine_private_cloud.ds", "google_vmwareengine_private_cloud.vmw-engine-pc", map[string]struct{}{"type": {}}), + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + "data.google_vmwareengine_private_cloud.ds", + "google_vmwareengine_private_cloud.vmw-engine-pc", + map[string]struct{}{ + "type": {}, + "deletion_delay_hours": {}, + "send_deletion_delay_hours_if_zero": {}, + }), testAccCheckGoogleVmwareengineNsxCredentialsMeta("data.google_vmwareengine_nsx_credentials.nsx-ds"), testAccCheckGoogleVmwareengineVcenterCredentialsMeta("data.google_vmwareengine_vcenter_credentials.vcenter-ds"), ), }, + { ResourceName: "google_vmwareengine_private_cloud.vmw-engine-pc", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "update_time", "type"}, - }, - { - Config: testPrivateCloudUpdateConfig(context, "description2", 4), // Expand PC - }, - { - ResourceName: "google_vmwareengine_private_cloud.vmw-engine-pc", - ImportState: true, - ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "update_time", "type"}, + ImportStateVerifyIgnore: []string{"location", "name", "update_time", "type", "deletion_parameters"}, }, { - Config: testPrivateCloudUpdateConfig(context, "description2", 3), // Shrink PC + Config: testPrivateCloudUpdateConfig(context), + Check: resource.ComposeTestCheckFunc( + acctest.CheckDataSourceStateMatchesResourceStateWithIgnores( + "data.google_vmwareengine_private_cloud.ds", + "google_vmwareengine_private_cloud.vmw-engine-pc", + map[string]struct{}{ + "type": {}, + "deletion_delay_hours": {}, + "send_deletion_delay_hours_if_zero": {}, + }), + ), }, + { ResourceName: "google_vmwareengine_private_cloud.vmw-engine-pc", ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"location", "name", "update_time", "type"}, + ImportStateVerifyIgnore: []string{"location", "name", "update_time", "type", "deletion_parameters"}, }, }, }) } -func testPrivateCloudUpdateConfig(context map[string]interface{}, description string, nodeCount int) string { - context["node_count"] = nodeCount - context["description"] = description - +func testPrivateCloudCreateConfig(context map[string]interface{}) string { return acctest.Nprintf(` -resource "google_project" "project" { - project_id = "tf-test%{random_suffix}" - name = "tf-test%{random_suffix}" - org_id = "%{org_id}" - billing_account = "%{billing_account}" -} - -resource "google_project_service" "vmwareengine" { - project = google_project.project.project_id - service = "vmwareengine.googleapis.com" -} - -resource "time_sleep" "sleep" { - create_duration = "1m" - depends_on = [ - google_project_service.vmwareengine, - ] -} - resource "google_vmwareengine_network" "default-nw" { - project = google_project.project.project_id name = "tf-test-pc-nw-%{random_suffix}" location = "global" type = "STANDARD" description = "PC network description." - depends_on = [ - time_sleep.sleep # Sleep allows permissions in the new project to propagate - ] } resource "google_vmwareengine_private_cloud" "vmw-engine-pc" { - project = google_project.project.project_id - location = "%{region}-a" + location = "%{region}-b" name = "tf-test-sample-pc%{random_suffix}" - description = "%{description}" + description = "test description" type = "TIME_LIMITED" + deletion_delay_hours = 1 network_config { management_cidr = "192.168.30.0/24" vmware_engine_network = google_vmwareengine_network.default-nw.id @@ -117,15 +101,14 @@ resource "google_vmwareengine_private_cloud" "vmw-engine-pc" { cluster_id = "tf-test-sample-mgmt-cluster-custom-core-count%{random_suffix}" node_type_configs { node_type_id = "standard-72" - node_count = "%{node_count}" + node_count = 1 custom_core_count = 32 } } } data "google_vmwareengine_private_cloud" "ds" { - project = google_project.project.project_id - location = "%{region}-a" + location = "%{region}-b" name = "tf-test-sample-pc%{random_suffix}" depends_on = [ google_vmwareengine_private_cloud.vmw-engine-pc, @@ -144,6 +127,46 @@ data "google_vmwareengine_vcenter_credentials" "vcenter-ds" { `, context) } +func testPrivateCloudUpdateConfig(context map[string]interface{}) string { + return acctest.Nprintf(` +resource "google_vmwareengine_network" "default-nw" { + name = "tf-test-pc-nw-%{random_suffix}" + location = "global" + type = "STANDARD" + description = "PC network description." +} + +resource "google_vmwareengine_private_cloud" "vmw-engine-pc" { + location = "%{region}-b" + name = "tf-test-sample-pc%{random_suffix}" + description = "updated description" + type = "STANDARD" + deletion_delay_hours = 0 + send_deletion_delay_hours_if_zero = true + network_config { + management_cidr = "192.168.30.0/24" + vmware_engine_network = google_vmwareengine_network.default-nw.id + } + management_cluster { + cluster_id = "tf-test-sample-mgmt-cluster-custom-core-count%{random_suffix}" + node_type_configs { + node_type_id = "standard-72" + node_count = 3 + custom_core_count = 32 + } + } +} + +data "google_vmwareengine_private_cloud" "ds" { + location = "%{region}-b" + name = "tf-test-sample-pc%{random_suffix}" + depends_on = [ + google_vmwareengine_private_cloud.vmw-engine-pc, + ] +} +`, context) +} + func testAccCheckGoogleVmwareengineNsxCredentialsMeta(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -198,7 +221,7 @@ func testAccCheckVmwareenginePrivateCloudDestroyProducer(t *testing.T) func(s *t if config.BillingProject != "" { billingProject = config.BillingProject } - _, err = transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ + res, err := transport_tpg.SendRequest(transport_tpg.SendRequestOptions{ Config: config, Method: "GET", Project: billingProject, @@ -206,7 +229,13 @@ func testAccCheckVmwareenginePrivateCloudDestroyProducer(t *testing.T) func(s *t UserAgent: config.UserAgent, }) if err == nil { - return fmt.Errorf("VmwareenginePrivateCloud still exists at %s", url) + pcState, ok := res["state"] + if !ok { + return fmt.Errorf("Unable to fetch state for existing VmwareenginePrivateCloud %s", url) + } + if pcState.(string) != "DELETED" { + return fmt.Errorf("VmwareenginePrivateCloud still exists at %s", url) + } } } return nil diff --git a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go index 1ad5612d5a28..dfe7c476e6cd 100644 --- a/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go +++ b/mmv1/third_party/terraform/services/vmwareengine/resource_vmwareengine_subnet_test.go @@ -8,6 +8,8 @@ import ( ) func TestAccVmwareengineSubnet_vmwareEngineUserDefinedSubnetUpdate(t *testing.T) { + // Temporarily skipping so that this test does not run and consume resources during PR pushes. It is bound to fail and is being fixed by PR #10992 + acctest.SkipIfVcr(t) t.Parallel() context := map[string]interface{}{ From 2f6cbf5265638f0c185d61b9a3530d441a118977 Mon Sep 17 00:00:00 2001 From: Sarah French <15078782+SarahFrench@users.noreply.github.com> Date: Wed, 10 Jul 2024 18:43:10 +0100 Subject: [PATCH 296/356] TeamCity v6.0.0 feature branch testing (#11104) --- .../components/builds/build_triggers.kt | 16 ++- .../FEATURE-BRANCH-major-release-6.0.0.kt | 103 ++++++++++++++++++ .../projects/google_beta_subproject.kt | 11 +- .../projects/google_ga_subproject.kt | 11 +- .../projects/project_sweeper_project.kt | 2 +- .../components/projects/reused/mm_upstream.kt | 8 +- .../projects/reused/nightly_tests.kt | 11 +- .../components/projects/root_project.kt | 5 + .../.teamcity/components/unique_id.kt | 4 +- .../terraform/.teamcity/tests/sweepers.kt | 4 +- 10 files changed, 145 insertions(+), 30 deletions(-) create mode 100644 mmv1/third_party/terraform/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-major-release-6.0.0.kt diff --git a/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt b/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt index 14628a112b21..2880d5015abd 100644 --- a/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt +++ b/mmv1/third_party/terraform/.teamcity/components/builds/build_triggers.kt @@ -18,10 +18,20 @@ import jetbrains.buildServer.configs.kotlin.triggers.schedule class NightlyTriggerConfiguration( val branch: String = DefaultBranchName, val nightlyTestsEnabled: Boolean = true, - val startHour: Int = DefaultStartHour, - val daysOfWeek: String = DefaultDaysOfWeek, + var startHour: Int = DefaultStartHour, + var daysOfWeek: String = DefaultDaysOfWeek, val daysOfMonth: String = DefaultDaysOfMonth -) +){ + fun clone(): NightlyTriggerConfiguration{ + return NightlyTriggerConfiguration( + this.branch, + this.nightlyTestsEnabled, + this.startHour, + this.daysOfWeek, + this.daysOfMonth + ) + } +} fun Triggers.runNightly(config: NightlyTriggerConfiguration) { diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-major-release-6.0.0.kt b/mmv1/third_party/terraform/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-major-release-6.0.0.kt new file mode 100644 index 000000000000..f34904d69718 --- /dev/null +++ b/mmv1/third_party/terraform/.teamcity/components/projects/feature_branches/FEATURE-BRANCH-major-release-6.0.0.kt @@ -0,0 +1,103 @@ +/* + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: MPL-2.0 + */ + +// This file is controlled by MMv1, any changes made here will be overwritten + +package projects.feature_branches + +import ProviderNameBeta +import ProviderNameGa +import builds.* +import jetbrains.buildServer.configs.kotlin.Project +import jetbrains.buildServer.configs.kotlin.vcs.GitVcsRoot +import projects.reused.nightlyTests +import replaceCharsId + +const val branchName = "FEATURE-BRANCH-major-release-6.0.0" + +// VCS Roots specifically for pulling code from the feature branches in the downstream repos + +object HashicorpVCSRootGa_featureBranchMajorRelease600: GitVcsRoot({ + name = "VCS root for the hashicorp/terraform-provider-${ProviderNameGa} repo @ refs/heads/${branchName}" + url = "https://github.com/hashicorp/terraform-provider-${ProviderNameGa}" + branch = "refs/heads/${branchName}" + branchSpec = """ + +:FEATURE-BRANCH-major-release-6* + """.trimIndent() +}) + +object HashicorpVCSRootBeta_featureBranchMajorRelease600: GitVcsRoot({ + name = "VCS root for the hashicorp/terraform-provider-${ProviderNameBeta} repo @ refs/heads/${branchName}" + url = "https://github.com/hashicorp/terraform-provider-${ProviderNameBeta}" + branch = "refs/heads/${branchName}" + branchSpec = """ + +:FEATURE-BRANCH-major-release-6* + """.trimIndent() +}) + +fun featureBranchMajorRelease600_Project(allConfig: AllContextParameters): Project { + + val projectId = replaceCharsId(branchName) + val gaProjectId = replaceCharsId(projectId + "_GA") + val betaProjectId= replaceCharsId(projectId + "_BETA") + + // Get config for using the GA and Beta identities + val gaConfig = getGaAcceptanceTestConfig(allConfig) + val betaConfig = getBetaAcceptanceTestConfig(allConfig) + + return Project{ + id(projectId) + name = "6.0.0 Major Release Testing" + description = "Subproject for testing feature branch $branchName" + + // Register feature branch-specific VCS roots in the project + vcsRoot(HashicorpVCSRootGa_featureBranchMajorRelease600) + vcsRoot(HashicorpVCSRootBeta_featureBranchMajorRelease600) + + // Nested Nightly Test project that uses hashicorp/terraform-provider-google + subProject( + Project{ + id(gaProjectId) + name = "Google" + subProject( + nightlyTests( + gaProjectId, + ProviderNameGa, + HashicorpVCSRootGa_featureBranchMajorRelease600, + gaConfig, + NightlyTriggerConfiguration( + branch = branchName, // Make triggered builds use the feature branch + daysOfWeek = "5" // Thursday for GA, TeamCity numbers days Sun=1...Sat=7 + ), + ) + ) + } + ) + + // Nested Nightly Test project that uses hashicorp/terraform-provider-google-beta + subProject( + Project { + id(betaProjectId) + name = "Google Beta" + subProject( + nightlyTests( + betaProjectId, + ProviderNameBeta, + HashicorpVCSRootBeta_featureBranchMajorRelease600, + betaConfig, + NightlyTriggerConfiguration( + branch = branchName, // Make triggered builds use the feature branch + daysOfWeek="6" // Friday for Beta, TeamCity numbers days Sun=1...Sat=7 + ), + ) + ) + } + ) + + params { + readOnlySettings() + } + } +} \ No newline at end of file diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt b/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt index 6b6e694f1866..ca9c45d7c8c7 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/google_beta_subproject.kt @@ -8,10 +8,7 @@ package projects import ProviderNameBeta -import builds.AllContextParameters -import builds.getBetaAcceptanceTestConfig -import builds.getVcrAcceptanceTestConfig -import builds.readOnlySettings +import builds.* import jetbrains.buildServer.configs.kotlin.Project import projects.reused.mmUpstream import projects.reused.nightlyTests @@ -23,7 +20,7 @@ import vcs_roots.ModularMagicianVCSRootBeta // googleSubProjectBeta returns a subproject that is used for testing terraform-provider-google-beta (Beta) fun googleSubProjectBeta(allConfig: AllContextParameters): Project { - var betaId = replaceCharsId("GOOGLE_BETA") + val betaId = replaceCharsId("GOOGLE_BETA") // Get config for using the Beta and VCR identities val betaConfig = getBetaAcceptanceTestConfig(allConfig) @@ -35,10 +32,10 @@ fun googleSubProjectBeta(allConfig: AllContextParameters): Project { description = "Subproject containing builds for testing the Beta version of the Google provider" // Nightly Test project that uses hashicorp/terraform-provider-google-beta - subProject(nightlyTests(betaId, ProviderNameBeta, HashiCorpVCSRootBeta, betaConfig)) + subProject(nightlyTests(betaId, ProviderNameBeta, HashiCorpVCSRootBeta, betaConfig, NightlyTriggerConfiguration(daysOfWeek="1-5,7"))) // All nights except Friday (6) for Beta; feature branch testing happens on Fridays and TeamCity numbers days Sun=1...Sat=7 // MM Upstream project that uses modular-magician/terraform-provider-google-beta - subProject(mmUpstream(betaId, ProviderNameBeta, ModularMagicianVCSRootBeta, HashiCorpVCSRootBeta, vcrConfig)) + subProject(mmUpstream(betaId, ProviderNameBeta, ModularMagicianVCSRootBeta, HashiCorpVCSRootBeta, vcrConfig, NightlyTriggerConfiguration())) // VCR recording project that allows VCR recordings to be made using hashicorp/terraform-provider-google-beta OR modular-magician/terraform-provider-google-beta // This is only present for the Beta provider, as only TPGB VCR recordings are used. diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt b/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt index 0f0605766ea6..9e7e2caa2844 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/google_ga_subproject.kt @@ -8,10 +8,7 @@ package projects import ProviderNameGa -import builds.AllContextParameters -import builds.getGaAcceptanceTestConfig -import builds.getVcrAcceptanceTestConfig -import builds.readOnlySettings +import builds.* import jetbrains.buildServer.configs.kotlin.Project import projects.reused.mmUpstream import projects.reused.nightlyTests @@ -22,7 +19,7 @@ import vcs_roots.ModularMagicianVCSRootGa // googleSubProjectGa returns a subproject that is used for testing terraform-provider-google (GA) fun googleSubProjectGa(allConfig: AllContextParameters): Project { - var gaId = replaceCharsId("GOOGLE") + val gaId = replaceCharsId("GOOGLE") // Get config for using the GA and VCR identities val gaConfig = getGaAcceptanceTestConfig(allConfig) @@ -34,10 +31,10 @@ fun googleSubProjectGa(allConfig: AllContextParameters): Project { description = "Subproject containing builds for testing the GA version of the Google provider" // Nightly Test project that uses hashicorp/terraform-provider-google - subProject(nightlyTests(gaId, ProviderNameGa, HashiCorpVCSRootGa, gaConfig)) + subProject(nightlyTests(gaId, ProviderNameGa, HashiCorpVCSRootGa, gaConfig, NightlyTriggerConfiguration(daysOfWeek="1-4,6-7"))) // All nights except Thursday (5) for GA; feature branch testing happens on Thursdays and TeamCity numbers days Sun=1...Sat=7 // MM Upstream project that uses modular-magician/terraform-provider-google - subProject(mmUpstream(gaId, ProviderNameGa, ModularMagicianVCSRootGa, HashiCorpVCSRootGa, vcrConfig)) + subProject(mmUpstream(gaId, ProviderNameGa, ModularMagicianVCSRootGa, HashiCorpVCSRootGa, vcrConfig, NightlyTriggerConfiguration())) params { readOnlySettings() diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt b/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt index 2a484f583f9b..1d54e841d015 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/project_sweeper_project.kt @@ -23,7 +23,7 @@ fun projectSweeperSubProject(allConfig: AllContextParameters): Project { val projectId = replaceCharsId("PROJECT_SWEEPER") - // Get config for using the GA identity (arbitrary choice as sweeper isn't confined by GA/Beta etc) + // Get config for using the GA identity (arbitrary choice as sweeper isn't confined by GA/Beta etc.) val gaConfig = getGaAcceptanceTestConfig(allConfig) // List of ALL shared resources; avoid clashing with any other running build diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt b/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt index 83efcf91f109..288df583bf25 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/reused/mm_upstream.kt @@ -25,7 +25,7 @@ import jetbrains.buildServer.configs.kotlin.Project import jetbrains.buildServer.configs.kotlin.vcs.GitVcsRoot import replaceCharsId -fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, cronSweeperVcsRoot: GitVcsRoot, config: AccTestConfiguration): Project { +fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, cronSweeperVcsRoot: GitVcsRoot, config: AccTestConfiguration, cron: NightlyTriggerConfiguration): Project { // Create unique ID for the dynamically-created project var projectId = "${parentProject}_${MMUpstreamProjectId}" @@ -45,11 +45,13 @@ fun mmUpstream(parentProject: String, providerName: String, vcsRoot: GitVcsRoot, ProviderNameBeta -> sweepersList = SweepersListBeta else -> throw Exception("Provider name not supplied when generating a nightly test subproject") } + + // This build is for manually-initiated runs of sweepers, to test changes to sweepers from the upstream repo val serviceSweeperManualConfig = BuildConfigurationForServiceSweeper(providerName, ServiceSweeperManualName, sweepersList, projectId, vcsRoot, sharedResources, config) + // This build runs on a schedule to do actual sweeping of the VCR project, using the downstream repo's code val serviceSweeperCronConfig = BuildConfigurationForServiceSweeper(providerName, ServiceSweeperCronName, sweepersList, projectId, cronSweeperVcsRoot, sharedResources, config) - val trigger = NightlyTriggerConfiguration(startHour=12) - serviceSweeperCronConfig.addTrigger(trigger) // Only the sweeper is on a schedule in this project + serviceSweeperCronConfig.addTrigger(cron) return Project { id(projectId) diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt b/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt index 75fd33a9d529..90fa2d49947d 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/reused/nightly_tests.kt @@ -20,7 +20,7 @@ import jetbrains.buildServer.configs.kotlin.Project import jetbrains.buildServer.configs.kotlin.vcs.GitVcsRoot import replaceCharsId -fun nightlyTests(parentProject:String, providerName: String, vcsRoot: GitVcsRoot, config: AccTestConfiguration): Project { +fun nightlyTests(parentProject:String, providerName: String, vcsRoot: GitVcsRoot, config: AccTestConfiguration, cron: NightlyTriggerConfiguration): Project { // Create unique ID for the dynamically-created project var projectId = "${parentProject}_${NightlyTestsProjectId}" @@ -36,11 +36,11 @@ fun nightlyTests(parentProject:String, providerName: String, vcsRoot: GitVcsRoot } // Create build configs to run acceptance tests for each package defined in packages.kt and services.kt files + // and add cron trigger to them all val allPackages = getAllPackageInProviderVersion(providerName) val packageBuildConfigs = BuildConfigurationsForPackages(allPackages, providerName, projectId, vcsRoot, sharedResources, config) - val accTestTrigger = NightlyTriggerConfiguration() packageBuildConfigs.forEach { buildConfiguration -> - buildConfiguration.addTrigger(accTestTrigger) + buildConfiguration.addTrigger(cron) } // Create build config for sweeping the nightly test project @@ -51,8 +51,9 @@ fun nightlyTests(parentProject:String, providerName: String, vcsRoot: GitVcsRoot else -> throw Exception("Provider name not supplied when generating a nightly test subproject") } val serviceSweeperConfig = BuildConfigurationForServiceSweeper(providerName, ServiceSweeperName, sweepersList, projectId, vcsRoot, sharedResources, config) - val sweeperTrigger = NightlyTriggerConfiguration(startHour=11) // Override hour - serviceSweeperConfig.addTrigger(sweeperTrigger) + val sweeperCron = cron.clone() + sweeperCron.startHour += 5 // Ensure triggered after the package test builds are triggered + serviceSweeperConfig.addTrigger(sweeperCron) return Project { id(projectId) diff --git a/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt b/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt index 7130a9c35ea8..208cfc9617da 100644 --- a/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt +++ b/mmv1/third_party/terraform/.teamcity/components/projects/root_project.kt @@ -18,6 +18,8 @@ import generated.ServicesListBeta import generated.ServicesListGa import jetbrains.buildServer.configs.kotlin.Project import jetbrains.buildServer.configs.kotlin.sharedResource +import projects.feature_branches.featureBranchMajorRelease600_Project + // googleCloudRootProject returns a root project that contains a subprojects for the GA and Beta version of the // Google provider. There are also resources to help manage the test projects used for acceptance tests. @@ -62,6 +64,9 @@ fun googleCloudRootProject(allConfig: AllContextParameters): Project { subProject(googleSubProjectBeta(allConfig)) subProject(projectSweeperSubProject(allConfig)) + // Feature branch testing + subProject(featureBranchMajorRelease600_Project(allConfig)) // FEATURE-BRANCH-major-release-6.0.0 + params { readOnlySettings() } diff --git a/mmv1/third_party/terraform/.teamcity/components/unique_id.kt b/mmv1/third_party/terraform/.teamcity/components/unique_id.kt index b9a56d762186..ec9e1890d7b5 100644 --- a/mmv1/third_party/terraform/.teamcity/components/unique_id.kt +++ b/mmv1/third_party/terraform/.teamcity/components/unique_id.kt @@ -6,8 +6,8 @@ // This file is maintained in the GoogleCloudPlatform/magic-modules repository and copied into the downstream provider repositories. Any changes to this file in the downstream will be overwritten. fun replaceCharsId(id: String): String{ - var newId = id.replace("-", "") - newId = newId.replace(" ", "_") + // ID should start with a latin letter and contain only latin letters, digits and underscores + var newId = id.replace("-", "").replace(" ", "_").replace(".", "_") newId = newId.uppercase() return newId diff --git a/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt b/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt index 727675e09731..f6babaa4807a 100644 --- a/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt +++ b/mmv1/third_party/terraform/.teamcity/tests/sweepers.kt @@ -151,7 +151,7 @@ class SweeperTests { val cronBeta = stBeta.schedulingPolicy as ScheduleTrigger.SchedulingPolicy.Cron val stProject = projectSweeper.triggers.items[0] as ScheduleTrigger val cronProject = stProject.schedulingPolicy as ScheduleTrigger.SchedulingPolicy.Cron - assertTrue("Service sweeper for the GA Nightly Test project should be triggered at an earlier hour than the project sweeper", cronGa.hours.toString() < cronProject.hours.toString()) // Values are strings like "11", "12" - assertTrue("Service sweeper for the Beta Nightly Test project should be triggered at an earlier hour than the project sweeper", cronBeta.hours.toString() < cronProject.hours.toString() ) + assertTrue("Service sweeper for the GA Nightly Test project should be triggered at an earlier hour than the project sweeper", cronGa.hours.toString().toInt() < cronProject.hours.toString().toInt()) // Converting nullable strings to ints + assertTrue("Service sweeper for the Beta Nightly Test project should be triggered at an earlier hour than the project sweeper", cronBeta.hours.toString().toInt() < cronProject.hours.toString().toInt() ) } } From 07fdf0f0a750f542ba8efff38fb1bae1401965ff Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 10 Jul 2024 13:06:33 -0500 Subject: [PATCH 297/356] Update membership.go (#11133) --- .ci/magician/github/membership.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.ci/magician/github/membership.go b/.ci/magician/github/membership.go index 3255fd85eb96..0bfa56f83a30 100644 --- a/.ci/magician/github/membership.go +++ b/.ci/magician/github/membership.go @@ -87,6 +87,11 @@ var ( startDate: newDate(2024, 7, 5, pdtLoc), endDate: newDate(2024, 7, 16, pdtLoc), }, + { + id: "c2thorn", + startDate: newDate(2024, 7, 10, pdtLoc), + endDate: newDate(2024, 7, 16, pdtLoc), + }, } ) From a1b89f3af9a89fc7081e1d5db81f054aea7894ba Mon Sep 17 00:00:00 2001 From: nifflets <5343516+nifflets@users.noreply.github.com> Date: Wed, 10 Jul 2024 11:08:13 -0700 Subject: [PATCH 298/356] Add buildServiceAccount field to cloudfunctions (#11100) --- .../cloudfunctions/CloudFunction.yaml | 4 + .../resource_cloudfunctions_function.go | 19 +++ ...source_cloudfunctions_function_test.go.erb | 112 ++++++++++++++++++ .../r/cloudfunctions_function.html.markdown | 4 +- 4 files changed, 138 insertions(+), 1 deletion(-) diff --git a/mmv1/products/cloudfunctions/CloudFunction.yaml b/mmv1/products/cloudfunctions/CloudFunction.yaml index 03d28a182c84..c4b92107f2fa 100644 --- a/mmv1/products/cloudfunctions/CloudFunction.yaml +++ b/mmv1/products/cloudfunctions/CloudFunction.yaml @@ -118,6 +118,10 @@ properties: name: 'serviceAccountEmail' output: true description: 'The email of the service account for this function.' + - !ruby/object:Api::Type::String + name: 'buildServiceAccount' + default_from_api: true + description: 'The fully-qualified name of the service account to be used for the build step of deploying this function' - !ruby/object:Api::Type::String name: 'updateTime' output: true diff --git a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go index 4c3c1489dd66..0be8a90f4676 100644 --- a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go +++ b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function.go @@ -298,6 +298,13 @@ func ResourceCloudFunctionsFunction() *schema.Resource { Description: ` If provided, the self-provided service account to run the function with.`, }, + "build_service_account": { + Type: schema.TypeString, + Optional: true, + Computed: true, + Description: `The fully-qualified name of the service account to be used for the build step of deploying this function`, + }, + "vpc_connector": { Type: schema.TypeString, Optional: true, @@ -627,6 +634,10 @@ func resourceCloudFunctionsCreate(d *schema.ResourceData, meta interface{}) erro function.MinInstances = int64(v.(int)) } + if v, ok := d.GetOk("build_service_account"); ok { + function.BuildServiceAccount = v.(string) + } + log.Printf("[DEBUG] Creating cloud function: %s", function.Name) // We retry the whole create-and-wait because Cloud Functions @@ -714,6 +725,9 @@ func resourceCloudFunctionsRead(d *schema.ResourceData, meta interface{}) error if err := d.Set("service_account_email", function.ServiceAccountEmail); err != nil { return fmt.Errorf("Error setting service_account_email: %s", err) } + if err := d.Set("build_service_account", function.BuildServiceAccount); err != nil { + return fmt.Errorf("Error setting build_service_account: %s", err) + } if err := d.Set("environment_variables", function.EnvironmentVariables); err != nil { return fmt.Errorf("Error setting environment_variables: %s", err) } @@ -945,6 +959,11 @@ func resourceCloudFunctionsUpdate(d *schema.ResourceData, meta interface{}) erro updateMaskArr = append(updateMaskArr, "minInstances") } + if d.HasChange("build_service_account") { + function.BuildServiceAccount = d.Get("build_service_account").(string) + updateMaskArr = append(updateMaskArr, "buildServiceAccount") + } + if len(updateMaskArr) > 0 { log.Printf("[DEBUG] Send Patch CloudFunction Configuration request: %#v", function) updateMask := strings.Join(updateMaskArr, ",") diff --git a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb index b63b04ab3b57..ec2f194d7184 100644 --- a/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb +++ b/mmv1/third_party/terraform/services/cloudfunctions/resource_cloudfunctions_function_test.go.erb @@ -531,6 +531,54 @@ func TestAccCloudFunctionsFunction_secretMount(t *testing.T) { }) } +func TestAccCloudFunctionsFunction_buildServiceAccount(t *testing.T) { + t.Parallel() + + funcResourceName := "google_cloudfunctions_function.function" + functionName := fmt.Sprintf("tf-test-%s", acctest.RandString(t, 10)) + bucketName := fmt.Sprintf("tf-test-bucket-%d", acctest.RandInt(t)) + zipFilePath := acctest.CreateZIPArchiveForCloudFunctionSource(t, testHTTPTriggerPath) + proj := envvar.GetTestProjectFromEnv() + serviceAccount1 := fmt.Sprintf("tf-test-build1-%s", acctest.RandString(t, 10)) + serviceAccount2 := fmt.Sprintf("tf-test-build2-%s", acctest.RandString(t, 10)) + defer os.Remove(zipFilePath) // clean up + + acctest.VcrTest(t, resource.TestCase{ + PreCheck: func() { acctest.AccTestPreCheck(t) }, + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories(t), + ExternalProviders: map[string]resource.ExternalProvider{ + "time": {}, + }, + CheckDestroy: testAccCheckCloudFunctionsFunctionDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudFunctionsFunction_buildServiceAccount("sa1", functionName, bucketName, zipFilePath, serviceAccount1), + Check: resource.TestCheckResourceAttr(funcResourceName, + "build_service_account", + fmt.Sprintf("projects/%[1]s/serviceAccounts/%s@%[1]s.iam.gserviceaccount.com", proj, serviceAccount1)), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + { + Config: testAccCloudFunctionsFunction_buildServiceAccount("sa2", functionName, bucketName, zipFilePath, serviceAccount2), + Check: resource.TestCheckResourceAttr(funcResourceName, + "build_service_account", + fmt.Sprintf("projects/%[1]s/serviceAccounts/%s@%[1]s.iam.gserviceaccount.com", proj, serviceAccount2)), + }, + { + ResourceName: funcResourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"build_environment_variables"}, + }, + }, + }) +} + func testAccCheckCloudFunctionsFunctionDestroyProducer(t *testing.T) func(s *terraform.State) error { return func(s *terraform.State) error { config := acctest.GoogleProviderConfig(t) @@ -1292,3 +1340,67 @@ resource "google_cloudfunctions_function" "function" { } `, accountId, secretName, versionName, bucketName, zipFilePath, functionName, projectNumber, versionNumber) } + +func testAccCloudFunctionsFunction_buildServiceAccount(saName, functionName, bucketName, zipFilePath, serviceAccount string) string { + return fmt.Sprintf(` +data "google_project" "project" {} + +resource "google_storage_bucket" "bucket" { + name = "%s" + location = "US" + uniform_bucket_level_access = true +} + +resource "google_storage_bucket_object" "archive" { + name = "index.zip" + bucket = google_storage_bucket.bucket.name + source = "%s" +} + +resource "google_service_account" "cloud_function_build_account_%[3]s" { + account_id = "%s" + display_name = "Testing Cloud Function build service account" +} + +resource "google_project_iam_member" "storage_object_admin_%[3]s" { + project = data.google_project.project.project_id + role = "roles/storage.objectAdmin" + member = "serviceAccount:${google_service_account.cloud_function_build_account_%[3]s.email}" +} + +resource "google_project_iam_member" "artifact_registry_writer_%[3]s" { + project = data.google_project.project.project_id + role = "roles/artifactregistry.writer" + member = "serviceAccount:${google_service_account.cloud_function_build_account_%[3]s.email}" +} + +resource "google_project_iam_member" "log_writer_%[3]s" { + project = data.google_project.project.project_id + role = "roles/logging.logWriter" + member = "serviceAccount:${google_service_account.cloud_function_build_account_%[3]s.email}" +} + +resource "time_sleep" "wait_iam_roles_%[3]s" { + depends_on = [ + google_project_iam_member.storage_object_admin_%[3]s, + google_project_iam_member.artifact_registry_writer_%[3]s, + google_project_iam_member.log_writer_%[3]s, + ] + create_duration = "60s" +} + +resource "google_cloudfunctions_function" "function" { + depends_on = [time_sleep.wait_iam_roles_%[3]s] + name = "%[5]s" + runtime = "nodejs10" + + source_archive_bucket = google_storage_bucket.bucket.name + source_archive_object = google_storage_bucket_object.archive.name + + build_service_account = google_service_account.cloud_function_build_account_%[3]s.name + + trigger_http = true + entry_point = "helloGET" +} +`, bucketName, zipFilePath, saName, serviceAccount, functionName) +} diff --git a/mmv1/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown b/mmv1/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown index daba06bc397d..6fae33d41e1c 100644 --- a/mmv1/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown +++ b/mmv1/third_party/terraform/website/docs/r/cloudfunctions_function.html.markdown @@ -145,6 +145,8 @@ Please refer to the field 'effective_labels' for all of the labels present on th * `service_account_email` - (Optional) If provided, the self-provided service account to run the function with. +* `build_service_account` - (Optional) If provided, the self-provided service account to use to build the function. The format of this field is `projects/{project}/serviceAccounts/{serviceAccountEmail}` + * `environment_variables` - (Optional) A set of key/value environment variable pairs to assign to the function. * `build_environment_variables` - (Optional) A set of key/value environment variable pairs available during build time. @@ -160,7 +162,7 @@ Please refer to the field 'effective_labels' for all of the labels present on th * `source_archive_object` - (Optional) The source archive object (file) in archive bucket. * `source_repository` - (Optional) Represents parameters related to source repository where a function is hosted. - Cannot be set alongside `source_archive_bucket` or `source_archive_object`. Structure is [documented below](#nested_source_repository). It must match the pattern `projects/{project}/locations/{location}/repositories/{repository}`.* + Cannot be set alongside `source_archive_bucket` or `source_archive_object`. Structure is [documented below](#nested_source_repository). It must match the pattern `projects/{project}/locations/{location}/repositories/{repository}`.* * `docker_registry` - (Optional) Docker Registry to use for storing the function's Docker images. Allowed values are ARTIFACT_REGISTRY (default) and CONTAINER_REGISTRY. From aea5962758932577f12adf740223d2fcb98912cf Mon Sep 17 00:00:00 2001 From: Cameron Thornton Date: Wed, 10 Jul 2024 13:51:24 -0500 Subject: [PATCH 299/356] cloudrunv2 and dialogflowcx (#11132) --- mmv1/products/alloydb/go_Backup.yaml | 1 + mmv1/products/alloydb/go_Cluster.yaml | 8 + mmv1/products/alloydb/go_Instance.yaml | 1 + mmv1/products/cloudrunv2/go_Job.yaml | 744 +++++++++++ mmv1/products/cloudrunv2/go_Service.yaml | 1018 +++++++++++++++ mmv1/products/cloudrunv2/go_product.yaml | 24 + .../compute/go_NetworkAttachment.yaml | 2 +- mmv1/products/compute/go_ResourcePolicy.yaml | 3 +- mmv1/products/dialogflowcx/go_Agent.yaml | 216 ++++ mmv1/products/dialogflowcx/go_EntityType.yaml | 141 ++ .../products/dialogflowcx/go_Environment.yaml | 119 ++ mmv1/products/dialogflowcx/go_Flow.yaml | 586 +++++++++ mmv1/products/dialogflowcx/go_Intent.yaml | 193 +++ mmv1/products/dialogflowcx/go_Page.yaml | 1149 +++++++++++++++++ .../dialogflowcx/go_SecuritySettings.yaml | 166 +++ mmv1/products/dialogflowcx/go_TestCase.yaml | 448 +++++++ mmv1/products/dialogflowcx/go_Version.yaml | 146 +++ mmv1/products/dialogflowcx/go_Webhook.yaml | 144 +++ mmv1/products/dialogflowcx/go_product.yaml | 22 + ...vileged_access_manager_entitlement.go.tmpl | 2 +- .../datasource_iam.html.markdown.tmpl | 5 +- .../go/cloudrunv2_job_run_job.tf.tmpl | 1 + ...ervice_autoscaling_no_limit_config.tf.tmpl | 1 + .../network_attachment_instance_usage.tf.tmpl | 4 + ...d_access_manager_entitlement_basic.tf.tmpl | 21 +- ..._project_notification_config_basic.tf.tmpl | 14 + mmv1/templates/terraform/iam_policy.go.tmpl | 6 +- .../pre_create/dialogflow_set_location.go.erb | 17 +- ...lowcx_set_location_skip_default_obj.go.erb | 1 - .../go/dialogflow_set_location.go.tmpl | 17 +- ...owcx_set_location_skip_default_obj.go.tmpl | 1 - ...lowcx_set_location_skip_default_obj.go.erb | 1 - ...owcx_set_location_skip_default_obj.go.tmpl | 1 - mmv1/templates/terraform/resource.go.tmpl | 5 +- .../terraform/resource_iam.html.markdown.tmpl | 2 +- .../go/resource_compute_disk_test.go.tmpl | 65 + ...ce_gke_hub_feature_membership_test.go.tmpl | 28 +- ...ileged_access_manager_entitlement_test.go} | 7 +- 38 files changed, 5270 insertions(+), 60 deletions(-) create mode 100644 mmv1/products/cloudrunv2/go_Job.yaml create mode 100644 mmv1/products/cloudrunv2/go_Service.yaml create mode 100644 mmv1/products/cloudrunv2/go_product.yaml create mode 100644 mmv1/products/dialogflowcx/go_Agent.yaml create mode 100644 mmv1/products/dialogflowcx/go_EntityType.yaml create mode 100644 mmv1/products/dialogflowcx/go_Environment.yaml create mode 100644 mmv1/products/dialogflowcx/go_Flow.yaml create mode 100644 mmv1/products/dialogflowcx/go_Intent.yaml create mode 100644 mmv1/products/dialogflowcx/go_Page.yaml create mode 100644 mmv1/products/dialogflowcx/go_SecuritySettings.yaml create mode 100644 mmv1/products/dialogflowcx/go_TestCase.yaml create mode 100644 mmv1/products/dialogflowcx/go_Version.yaml create mode 100644 mmv1/products/dialogflowcx/go_Webhook.yaml create mode 100644 mmv1/products/dialogflowcx/go_product.yaml create mode 100644 mmv1/templates/terraform/examples/go/scc_project_notification_config_basic.tf.tmpl rename mmv1/third_party/terraform/services/privilegedaccessmanager/go/{resource_privileged_access_manager_entitlement_test.go.tmpl => resource_privileged_access_manager_entitlement_test.go} (96%) diff --git a/mmv1/products/alloydb/go_Backup.yaml b/mmv1/products/alloydb/go_Backup.yaml index 301727973cb8..c5ad9794c13b 100644 --- a/mmv1/products/alloydb/go_Backup.yaml +++ b/mmv1/products/alloydb/go_Backup.yaml @@ -33,6 +33,7 @@ timeouts: delete_minutes: 10 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' diff --git a/mmv1/products/alloydb/go_Cluster.yaml b/mmv1/products/alloydb/go_Cluster.yaml index e88025f5671b..0e8c4a2cd6c1 100644 --- a/mmv1/products/alloydb/go_Cluster.yaml +++ b/mmv1/products/alloydb/go_Cluster.yaml @@ -24,6 +24,13 @@ docs: Users can promote a secondary cluster to a primary cluster with the help of `cluster_type`. To promote, users have to set the `cluster_type` property as `PRIMARY` and remove the `secondary_config` field from cluster configuration. [See Example](https://github.com/hashicorp/terraform-provider-google/pull/16413). + + Switchover is supported in terraform by refreshing the state of the terraform configurations. + The switchover operation still needs to be called outside of terraform. + After the switchover operation is completed successfully: + 1. Refresh the state of the AlloyDB resources by running `terraform apply -refresh-only --auto-approve` . + 2. Manually update the terraform configuration file(s) to match the actual state of the resources by modifying the `cluster_type` and `secondary_config` fields. + 3. Verify the sync of terraform state by running `terraform plan` and ensure that the infrastructure matches the configuration and no changes are required. base_url: 'projects/{{project}}/locations/{{location}}/clusters' self_link: 'projects/{{project}}/locations/{{location}}/clusters/{{cluster_id}}' create_url: 'projects/{{project}}/locations/{{location}}/clusters?clusterId={{cluster_id}}' @@ -38,6 +45,7 @@ timeouts: delete_minutes: 30 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' diff --git a/mmv1/products/alloydb/go_Instance.yaml b/mmv1/products/alloydb/go_Instance.yaml index b6d15291e136..897e937f92ad 100644 --- a/mmv1/products/alloydb/go_Instance.yaml +++ b/mmv1/products/alloydb/go_Instance.yaml @@ -36,6 +36,7 @@ timeouts: delete_minutes: 120 autogen_async: true async: + actions: ['create', 'delete', 'update'] type: 'OpAsync' operation: base_url: '{{op_id}}' diff --git a/mmv1/products/cloudrunv2/go_Job.yaml b/mmv1/products/cloudrunv2/go_Job.yaml new file mode 100644 index 000000000000..ed2b78d7a96f --- /dev/null +++ b/mmv1/products/cloudrunv2/go_Job.yaml @@ -0,0 +1,744 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Job' +description: | + A Cloud Run Job resource that references a container image which is run to completion. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/run/docs/' + api: 'https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.jobs' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' +base_url: 'projects/{{project}}/locations/{{location}}/jobs' +self_link: 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' +create_url: 'projects/{{project}}/locations/{{location}}/jobs?jobId={{name}}' +update_verb: 'PATCH' +import_format: + - 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'name' + base_url: 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/jobs/{{name}}' + - '{{name}}' +custom_code: +taint_resource_on_failed_create: true +examples: + - name: 'cloudrunv2_job_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' + vars: + cloud_run_job_name: 'cloudrun-job' + - name: 'cloudrunv2_job_limits' + primary_resource_id: 'default' + vars: + cloud_run_job_name: 'cloudrun-job' + - name: 'cloudrunv2_job_sql' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_job_name: 'cloudrun-job' + secret_id: 'secret' + cloud_run_sql_name: 'cloudrun-sql' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + - name: 'cloudrunv2_job_vpcaccess' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_job_name: 'cloudrun-job' + vpc_access_connector_name: 'run-vpc' + vpc_compute_subnetwork_name: 'run-subnetwork' + compute_network_name: 'run-network' + - name: 'cloudrunv2_job_directvpc' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' + vars: + cloud_run_job_name: 'cloudrun-job' + - name: 'cloudrunv2_job_secret' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_job_name: 'cloudrun-job' + secret_id: 'secret' + - name: 'cloudrunv2_job_emptydir' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' + min_version: 'beta' + vars: + cloud_run_job_name: 'cloudrun-job' + - name: 'cloudrunv2_job_run_job' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-job%s", context["random_suffix"])' + min_version: 'beta' + vars: + cloud_run_job_name: 'cloudrun-job' +parameters: + - name: 'location' + type: String + description: The location of the cloud run job + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the Job. + url_param_only: true + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' + - name: 'uid' + type: String + description: | + Server assigned unique identifier for the Execution. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted. + output: true + - name: 'generation' + type: String + description: | + A number that monotonically increases every time the user modifies the desired state. + output: true + - name: 'labels' + type: KeyValueLabels + description: |- + Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, + environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. + + Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. + All system labels in v1 now have a corresponding field in v2 Job. + immutable: false + - name: 'annotations' + type: KeyValueAnnotations + description: |- + Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + + Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected on new resources. + All system annotations in v1 now have a corresponding field in v2 Job. + + This field follows Kubernetes annotations' namespacing, limits, and rules. + - name: 'createTime' + type: Time + description: |- + The creation time. + output: true + - name: 'updateTime' + type: Time + description: |- + The last-modified time. + output: true + - name: 'deleteTime' + type: Time + description: |- + The deletion time. + output: true + - name: 'expireTime' + type: Time + description: |- + For a deleted resource, the time after which it will be permamently deleted. + output: true + - name: 'creator' + type: String + description: |- + Email address of the authenticated creator. + output: true + - name: 'lastModifier' + type: String + description: |- + Email address of the last authenticated modifier. + output: true + - name: 'client' + type: String + description: | + Arbitrary identifier for the API client. + - name: 'clientVersion' + type: String + description: | + Arbitrary version identifier for the API client. + - name: 'launchStage' + type: Enum + description: | + The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/products#product-launch-stages). Cloud Run supports ALPHA, BETA, and GA. + If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. + + For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. + default_from_api: true + enum_values: + - 'UNIMPLEMENTED' + - 'PRELAUNCH' + - 'EARLY_ACCESS' + - 'ALPHA' + - 'BETA' + - 'GA' + - 'DEPRECATED' + - name: 'binaryAuthorization' + type: NestedObject + description: | + Settings for the Binary Authorization feature. + properties: + - name: 'breakglassJustification' + type: String + description: | + If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass + - name: 'useDefault' + type: Boolean + description: | + If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + - name: 'startExecutionToken' + type: String + description: |- + A unique string used as a suffix creating a new execution upon job create or update. The Job will become ready when the execution is successfully started. + The sum of job name and token length must be fewer than 63 characters. + min_version: 'beta' + conflicts: + - run_execution_token + - name: 'runExecutionToken' + type: String + description: |- + A unique string used as a suffix creating a new execution upon job create or update. The Job will become ready when the execution is successfully completed. + The sum of job name and token length must be fewer than 63 characters. + min_version: 'beta' + conflicts: + - start_execution_token + - name: 'template' + type: NestedObject + description: | + The template used to create executions for this Job. + required: true + properties: + - name: 'labels' + type: KeyValuePairs + description: |- + Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, + or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or + https://cloud.google.com/run/docs/configuring/labels. + + Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. + All system labels in v1 now have a corresponding field in v2 ExecutionTemplate. + - name: 'annotations' + type: KeyValuePairs + description: |- + Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + + Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. + All system annotations in v1 now have a corresponding field in v2 ExecutionTemplate. + + This field follows Kubernetes annotations' namespacing, limits, and rules. + - name: 'parallelism' + type: Integer + description: |- + Specifies the maximum desired number of tasks the execution should run at given time. Must be <= taskCount. When the job is run, if this field is 0 or unset, the maximum possible value will be used for that execution. The actual number of tasks running in steady state will be less than this number when there are fewer tasks waiting to be completed remaining, i.e. when the work left to do is less than max parallelism. + default_from_api: true + - name: 'taskCount' + type: Integer + description: |- + Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + default_from_api: true + - name: 'template' + type: NestedObject + description: | + Describes the task(s) that will be created when executing an execution + required: true + properties: + - name: 'containers' + type: Array + description: |- + Holds the single container that defines the unit of execution for this task. + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + Name of the container specified as a DNS_LABEL. + - name: 'image' + type: String + description: |- + URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images + required: true + - name: 'command' + type: Array + description: |- + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + item_type: + type: String + - name: 'args' + type: Array + description: |- + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + item_type: + type: String + - name: 'env' + type: Array + description: |- + List of environment variables to set in the container. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters. + required: true + - name: 'value' + type: String + description: |- + Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + # exactly_one_of: + # - template.0.template.0.containers.0.env.0.value + # - template.0.template.0.containers.0.env.0.valueSource + - name: 'valueSource' + type: NestedObject + description: |- + Source for the environment variable's value. + # exactly_one_of: + # - template.0.template.0.containers.0.env.0.value + # - template.0.template.0.containers.0.env.0.valueSource + properties: + - name: 'secretKeyRef' + type: NestedObject + description: |- + Selects a secret and a specific version from Cloud Secret Manager. + properties: + - name: 'secret' + type: String + description: |- + The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. + required: true + - name: 'version' + type: String + description: |- + The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. + required: true + - name: 'resources' + type: NestedObject + description: |- + Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + default_from_api: true + properties: + - name: 'limits' + type: KeyValuePairs + description: |- + Only memory and CPU are supported. Use key `cpu` for CPU limit and `memory` for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + default_from_api: true + - name: 'ports' + type: Array + description: |- + List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. + + If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + - name: 'containerPort' + type: Integer + description: |- + Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. + - name: 'volumeMounts' + type: Array + description: |- + Volume to mount into the container's filesystem. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + This must match the Name of a Volume. + required: true + - name: 'mountPath' + type: String + description: |- + Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run + required: true + - name: 'workingDir' + type: String + description: |- + Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. + - name: 'volumes' + type: Array + description: |- + A list of Volumes to make available to containers. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + Volume's name. + required: true + - name: 'secret' + type: NestedObject + description: |- + Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + # exactly_one_of: + # - template.0.template.0.volumes.0.secret + # - template.0.template.0.volumes.0.cloudSqlInstance + # - template.0.template.0.volumes.0.emptyDir + # - template.0.volumes.0.gcs + # - template.0.volumes.0.nfs + properties: + - name: 'secret' + type: String + description: |- + The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. + required: true + - name: 'defaultMode' + type: Integer + description: |- + Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. + - name: 'items' + type: Array + description: |- + If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. + item_type: + type: NestedObject + properties: + - name: 'path' + type: String + description: |- + The relative path of the secret in the container. + required: true + - name: 'version' + type: String + description: |- + The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version + required: true + - name: 'mode' + type: Integer + description: |- + Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. + - name: 'cloudSqlInstance' + type: NestedObject + description: |- + For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. + # exactly_one_of: + # - template.0.template.0.volumes.0.secret + # - template.0.template.0.volumes.0.cloudSqlInstance + # - template.0.template.0.volumes.0.emptyDir + # - template.0.volumes.0.gcs + # - template.0.volumes.0.nfs + properties: + - name: 'instances' + type: Array + description: |- + The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} + item_type: + type: String + - name: 'emptyDir' + type: NestedObject + description: |- + Ephemeral storage used as a shared volume. + min_version: 'beta' + properties: + - name: 'medium' + type: Enum + description: |- + The different types of medium supported for EmptyDir. + default_value: "MEMORY" + enum_values: + - 'MEMORY' + - name: 'sizeLimit' + type: String + description: |- + Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + - name: 'gcs' + type: NestedObject + description: |- + Cloud Storage bucket mounted as a volume using GCSFuse. This feature requires the launch stage to be set to ALPHA or BETA. + min_version: 'beta' + properties: + - name: 'bucket' + type: String + description: |- + Name of the cloud storage bucket to back the volume. The resource service account must have permission to access the bucket. + required: true + - name: 'readOnly' + type: Boolean + description: |- + If true, mount this volume as read-only in all mounts. If false, mount this volume as read-write. + - name: 'nfs' + type: NestedObject + description: |- + NFS share mounted as a volume. This feature requires the launch stage to be set to ALPHA or BETA. + min_version: 'beta' + properties: + - name: 'server' + type: String + description: |- + Hostname or IP address of the NFS server. + required: true + - name: 'path' + type: String + description: |- + Path that is exported by the NFS server. + - name: 'readOnly' + type: Boolean + description: |- + If true, mount this volume as read-only in all mounts. + - name: 'timeout' + type: String + description: |- + Max allowed time duration the Task may be active before the system will actively try to mark it failed and kill associated containers. This applies per attempt of a task, meaning each retry can run for the full timeout. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + default_from_api: true + validation: + regex: '^[0-9]+(?:\.[0-9]{1,9})?s$' + - name: 'serviceAccount' + type: String + description: |- + Email address of the IAM service account associated with the Task of a Job. The service account represents the identity of the running task, and determines what permissions the task has. If not provided, the task will use the project's default service account. + default_from_api: true + - name: 'executionEnvironment' + type: Enum + description: |- + The execution environment being used to host this Task. + default_from_api: true + enum_values: + - 'EXECUTION_ENVIRONMENT_GEN1' + - 'EXECUTION_ENVIRONMENT_GEN2' + - name: 'encryptionKey' + type: String + description: |- + A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek + - name: 'vpcAccess' + type: NestedObject + description: |- + VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. + properties: + - name: 'connector' + type: String + description: |- + VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. + - name: 'egress' + type: Enum + description: |- + Traffic VPC egress settings. + default_from_api: true + enum_values: + - 'ALL_TRAFFIC' + - 'PRIVATE_RANGES_ONLY' + - name: 'networkInterfaces' + type: Array + description: |- + Direct VPC egress settings. Currently only single network interface is supported. + item_type: + type: NestedObject + properties: + - name: 'network' + type: String + description: |- + The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be + looked up from the subnetwork. + default_from_api: true + - name: 'subnetwork' + type: String + description: |- + The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the + subnetwork with the same name with the network will be used. + default_from_api: true + - name: 'tags' + type: Array + description: |- + Network tags applied to this Cloud Run job. + item_type: + type: String + - name: 'maxRetries' + type: Integer + description: |- + Number of retries allowed per Task, before marking this Task failed. + send_empty_value: true + default_value: 3 + - name: 'observedGeneration' + type: String + description: | + The generation of this Job. See comments in reconciling for additional information on reconciliation process in Cloud Run. + output: true + - name: 'terminalCondition' + type: NestedObject + description: | + The Condition of this Job, containing its readiness status, and detailed error information in case it did not reach the desired state + output: true + properties: + - name: 'type' + type: String + description: |- + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + output: true + - name: 'state' + type: String + description: |- + State of the condition. + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + output: true + - name: 'lastTransitionTime' + type: Time + description: |- + Last time the condition transitioned from one status to another. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'severity' + type: String + description: |- + How to interpret failures of this condition, one of Error, Warning, Info + output: true + - name: 'reason' + type: String + description: |- + A common (service-level) reason for this condition. + output: true + - name: 'revisionReason' + type: String + description: |- + A reason for the revision condition. + output: true + - name: 'executionReason' + type: String + description: |- + A reason for the execution condition. + output: true + - name: 'conditions' + type: Array + description: |- + The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Job does not reach its desired state. See comments in reconciling for additional information on `reconciliation` process in Cloud Run. + output: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: |- + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + output: true + - name: 'state' + type: String + description: |- + State of the condition. + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + output: true + - name: 'lastTransitionTime' + type: Time + description: |- + Last time the condition transitioned from one status to another. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'severity' + type: String + description: |- + How to interpret failures of this condition, one of Error, Warning, Info + output: true + - name: 'reason' + type: String + description: |- + A common (service-level) reason for this condition. + output: true + - name: 'revisionReason' + type: String + description: |- + A reason for the revision condition. + output: true + - name: 'executionReason' + type: String + description: |- + A reason for the execution condition. + output: true + - name: 'executionCount' + type: Integer + description: | + Number of executions created for this job. + output: true + - name: 'latestCreatedExecution' + type: NestedObject + description: | + Name of the last created execution. + output: true + properties: + - name: 'name' + type: String + description: | + Name of the execution. + output: true + - name: 'createTime' + type: Time + description: | + Creation timestamp of the execution. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'completionTime' + type: Time + description: | + Completion timestamp of the execution. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'reconciling' + type: Boolean + description: | + Returns true if the Job is currently being acted upon by the system to bring it into the desired state. + + When a new Job is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Job to the desired state. This process is called reconciliation. While reconciliation is in process, observedGeneration and latest_succeeded_execution, will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the state matches the Job, or there was an error, and reconciliation failed. This state can be found in terminalCondition.state. + + If reconciliation succeeded, the following fields will match: observedGeneration and generation, latest_succeeded_execution and latestCreatedExecution. + + If reconciliation failed, observedGeneration and latest_succeeded_execution will have the state of the last succeeded execution or empty for newly created Job. Additional information on the failure can be found in terminalCondition and conditions + output: true + - name: 'etag' + type: String + description: | + A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates. + output: true diff --git a/mmv1/products/cloudrunv2/go_Service.yaml b/mmv1/products/cloudrunv2/go_Service.yaml new file mode 100644 index 000000000000..e93c0ae54380 --- /dev/null +++ b/mmv1/products/cloudrunv2/go_Service.yaml @@ -0,0 +1,1018 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Service' +description: | + Service acts as a top-level container that manages a set of configurations and revision templates which implement a network service. Service exists to provide a singular abstraction which can be access controlled, reasoned about, and which encapsulates software lifecycle decisions such as rollout policy and team resource ownership. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/run/docs/' + api: 'https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.services' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/services/{{name}}' +base_url: 'projects/{{project}}/locations/{{location}}/services' +self_link: 'projects/{{project}}/locations/{{location}}/services/{{name}}' +create_url: 'projects/{{project}}/locations/{{location}}/services?serviceId={{name}}' +update_verb: 'PATCH' +import_format: + - 'projects/{{project}}/locations/{{location}}/services/{{name}}' +timeouts: + insert_minutes: 20 + update_minutes: 20 + delete_minutes: 20 +autogen_async: true +async: + actions: ['create', 'delete', 'update'] + type: 'OpAsync' + operation: + base_url: '{{op_id}}' + path: 'name' + wait_ms: 1000 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +iam_policy: + method_name_separator: ':' + parent_resource_attribute: 'name' + base_url: 'projects/{{project}}/locations/{{location}}/services/{{name}}' + example_config_body: 'templates/terraform/iam/go/iam_attributes.go.tmpl' + import_format: + - 'projects/{{project}}/locations/{{location}}/services/{{name}}' + - '{{name}}' +custom_code: +taint_resource_on_failed_create: true +examples: + - name: 'cloudrunv2_service_basic' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + - name: 'cloudrunv2_service_limits' + primary_resource_id: 'default' + vars: + cloud_run_service_name: 'cloudrun-service' + - name: 'cloudrunv2_service_sql' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + secret_id: 'secret-1' + cloud_run_sql_name: 'cloudrun-sql' + deletion_protection: 'true' + test_vars_overrides: + 'deletion_protection': 'false' + - name: 'cloudrunv2_service_vpcaccess' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + vpc_access_connector_name: 'run-vpc' + vpc_compute_subnetwork_name: 'run-subnetwork' + compute_network_name: 'run-network' + - name: 'cloudrunv2_service_directvpc' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + - name: 'cloudrunv2_service_probes' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + - name: 'cloudrunv2_service_secret' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-srv%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + secret_id: 'secret-1' + - name: 'cloudrunv2_service_multicontainer' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service%s", context["random_suffix"])' + min_version: 'beta' + vars: + cloud_run_service_name: 'cloudrun-service' + - name: 'cloudrunv2_service_mount_gcs' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' + - name: 'cloudrunv2_service_mount_nfs' + primary_resource_id: 'default' + primary_resource_name: 'fmt.Sprintf("tf-test-cloudrun-service-%s", context["random_suffix"])' + vars: + cloud_run_service_name: 'cloudrun-service' +parameters: + - name: 'location' + type: String + description: The location of the cloud run service + url_param_only: true + required: true + immutable: true +properties: + - name: 'name' + type: String + description: | + Name of the Service. + url_param_only: true + required: true + immutable: true + diff_suppress_func: 'tpgresource.CompareSelfLinkOrResourceName' + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/resource_from_self_link.go.tmpl' + - name: 'description' + type: String + description: | + User-provided description of the Service. This field currently has a 512-character limit. + - name: 'uid' + type: String + description: | + Server assigned unique identifier for the trigger. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted. + output: true + - name: 'generation' + type: String + description: | + A number that monotonically increases every time the user modifies the desired state. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. + output: true + - name: 'labels' + type: KeyValueLabels + description: |- + Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, + environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. + + Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. + All system labels in v1 now have a corresponding field in v2 Service. + immutable: false + - name: 'annotations' + type: KeyValueAnnotations + description: |- + Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + + Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected in new resources. + All system annotations in v1 now have a corresponding field in v2 Service. + + This field follows Kubernetes annotations' namespacing, limits, and rules. + - name: 'createTime' + type: Time + description: |- + The creation time. + output: true + - name: 'updateTime' + type: Time + description: |- + The last-modified time. + output: true + - name: 'deleteTime' + type: Time + description: |- + The deletion time. + output: true + - name: 'expireTime' + type: Time + description: |- + For a deleted resource, the time after which it will be permamently deleted. + output: true + - name: 'creator' + type: String + description: |- + Email address of the authenticated creator. + output: true + - name: 'lastModifier' + type: String + description: |- + Email address of the last authenticated modifier. + output: true + - name: 'client' + type: String + description: | + Arbitrary identifier for the API client. + - name: 'clientVersion' + type: String + description: | + Arbitrary version identifier for the API client. + - name: 'ingress' + type: Enum + description: | + Provides the ingress settings for this Service. On output, returns the currently observed ingress settings, or INGRESS_TRAFFIC_UNSPECIFIED if no revision is active. + default_from_api: true + enum_values: + - 'INGRESS_TRAFFIC_ALL' + - 'INGRESS_TRAFFIC_INTERNAL_ONLY' + - 'INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER' + - name: 'launchStage' + type: Enum + description: | + The launch stage as defined by [Google Cloud Platform Launch Stages](https://cloud.google.com/products#product-launch-stages). Cloud Run supports ALPHA, BETA, and GA. + If no value is specified, GA is assumed. Set the launch stage to a preview stage on input to allow use of preview features in that stage. On read (or output), describes whether the resource uses preview features. + + For example, if ALPHA is provided as input, but only BETA and GA-level features are used, this field will be BETA on output. + default_from_api: true + enum_values: + - 'UNIMPLEMENTED' + - 'PRELAUNCH' + - 'EARLY_ACCESS' + - 'ALPHA' + - 'BETA' + - 'GA' + - 'DEPRECATED' + - name: 'binaryAuthorization' + type: NestedObject + description: | + Settings for the Binary Authorization feature. + properties: + - name: 'breakglassJustification' + type: String + description: | + If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass + - name: 'useDefault' + type: Boolean + description: | + If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + - name: 'customAudiences' + type: Array + description: | + One or more custom audiences that you want this service to support. Specify each custom audience as the full URL in a string. The custom audiences are encoded in the token and used to authenticate requests. + For more information, see https://cloud.google.com/run/docs/configuring/custom-audiences. + item_type: + type: String + - name: 'scaling' + type: NestedObject + description: | + Scaling settings that apply to the whole service + min_version: 'beta' + properties: + - name: 'minInstanceCount' + type: Integer + description: | + Minimum number of instances for the service, to be divided among all revisions receiving traffic. + - name: 'defaultUriDisabled' + type: Boolean + description: |- + Disables public resolution of the default URI of this service. + min_version: 'beta' + - name: 'template' + type: NestedObject + description: | + The template used to create revisions for this Service. + required: true + properties: + - name: 'revision' + type: String + description: |- + The unique name for the revision. If this field is omitted, it will be automatically generated based on the Service name. + - name: 'labels' + type: KeyValuePairs + description: |- + Unstructured key value map that can be used to organize and categorize objects. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. + For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels. + + Cloud Run API v2 does not support labels with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. + All system labels in v1 now have a corresponding field in v2 RevisionTemplate. + - name: 'annotations' + type: KeyValuePairs + description: |- + Unstructured key value map that may be set by external tools to store and arbitrary metadata. They are not queryable and should be preserved when modifying objects. + + Cloud Run API v2 does not support annotations with `run.googleapis.com`, `cloud.googleapis.com`, `serving.knative.dev`, or `autoscaling.knative.dev` namespaces, and they will be rejected. + All system annotations in v1 now have a corresponding field in v2 RevisionTemplate. + + This field follows Kubernetes annotations' namespacing, limits, and rules. + - name: 'scaling' + type: NestedObject + description: | + Scaling settings for this Revision. + default_from_api: true + properties: + - name: 'minInstanceCount' + type: Integer + description: |- + Minimum number of serving instances that this resource should have. + - name: 'maxInstanceCount' + type: Integer + description: |- + Maximum number of serving instances that this resource should have. + - name: 'vpcAccess' + type: NestedObject + description: |- + VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. + properties: + - name: 'connector' + type: String + description: |- + VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. + - name: 'egress' + type: Enum + description: |- + Traffic VPC egress settings. + default_from_api: true + enum_values: + - 'ALL_TRAFFIC' + - 'PRIVATE_RANGES_ONLY' + - name: 'networkInterfaces' + type: Array + description: |- + Direct VPC egress settings. Currently only single network interface is supported. + item_type: + type: NestedObject + properties: + - name: 'network' + type: String + description: |- + The VPC network that the Cloud Run resource will be able to send traffic to. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If network is not specified, it will be + looked up from the subnetwork. + default_from_api: true + - name: 'subnetwork' + type: String + description: |- + The VPC subnetwork that the Cloud Run resource will get IPs from. At least one of network or subnetwork must be specified. If both + network and subnetwork are specified, the given VPC subnetwork must belong to the given VPC network. If subnetwork is not specified, the + subnetwork with the same name with the network will be used. + default_from_api: true + - name: 'tags' + type: Array + description: |- + Network tags applied to this Cloud Run service. + item_type: + type: String + - name: 'timeout' + type: String + description: |- + Max allowed time for an instance to respond to a request. + + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + default_from_api: true + validation: + regex: '^[0-9]+(?:\.[0-9]{1,9})?s$' + - name: 'serviceAccount' + type: String + description: |- + Email address of the IAM service account associated with the revision of the service. The service account represents the identity of the running revision, and determines what permissions the revision has. If not provided, the revision will use the project's default service account. + default_from_api: true + - name: 'containers' + type: Array + description: |- + Holds the containers that define the unit of execution for this Service. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + Name of the container specified as a DNS_LABEL. + - name: 'image' + type: String + description: |- + URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images + required: true + - name: 'command' + type: Array + description: |- + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + item_type: + type: String + - name: 'args' + type: Array + description: |- + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + item_type: + type: String + - name: 'env' + type: Array + description: |- + List of environment variables to set in the container. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters. + required: true + - name: 'value' + type: String + description: |- + Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + # exactly_one_of: + # - template.0.containers.0.env.0.value + # - template.0.containers.0.env.0.valueSource + - name: 'valueSource' + type: NestedObject + description: |- + Source for the environment variable's value. + # exactly_one_of: + # - template.0.containers.0.env.0.value + # - template.0.containers.0.env.0.valueSource + properties: + - name: 'secretKeyRef' + type: NestedObject + description: |- + Selects a secret and a specific version from Cloud Secret Manager. + properties: + - name: 'secret' + type: String + description: |- + The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. + required: true + - name: 'version' + type: String + description: |- + The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. + - name: 'resources' + type: NestedObject + description: |- + Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + default_from_api: true + properties: + - name: 'limits' + type: KeyValuePairs + description: |- + Only memory and CPU are supported. Use key `cpu` for CPU limit and `memory` for memory limit. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + default_from_api: true + - name: 'cpuIdle' + type: Boolean + description: |- + Determines whether CPU is only allocated during requests. True by default if the parent `resources` field is not set. However, if + `resources` is set, this field must be explicitly set to true to preserve the default behavior. + - name: 'startupCpuBoost' + type: Boolean + description: |- + Determines whether CPU should be boosted on startup of a new container instance above the requested CPU threshold, this can help reduce cold-start latency. + - name: 'ports' + type: Array + description: |- + List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. + + If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + default_from_api: true + - name: 'containerPort' + type: Integer + description: |- + Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. + max_size: 1 + - name: 'volumeMounts' + type: Array + description: |- + Volume to mount into the container's filesystem. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + This must match the Name of a Volume. + required: true + - name: 'mountPath' + type: String + description: |- + Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run + required: true + - name: 'workingDir' + type: String + description: |- + Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. + - name: 'livenessProbe' + type: NestedObject + description: |- + Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + default_from_api: true + properties: + - name: 'initialDelaySeconds' + type: Integer + description: |- + Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + default_value: 0 + - name: 'timeoutSeconds' + type: Integer + description: |- + Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + default_value: 1 + - name: 'periodSeconds' + type: Integer + description: |- + How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds + default_value: 10 + - name: 'failureThreshold' + type: Integer + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + default_value: 3 + - name: 'httpGet' + type: NestedObject + description: |- + HTTPGet specifies the http request to perform. + send_empty_value: true + allow_empty_object: true + properties: + - name: 'path' + type: String + description: |- + Path to access on the HTTP server. Defaults to '/'. + default_value: "/" + - name: 'port' + type: Integer + description: |- + Port number to access on the container. Number must be in the range 1 to 65535. + If not specified, defaults to the same value as container.ports[0].containerPort. + default_from_api: true + - name: 'httpHeaders' + type: Array + description: |- + Custom headers to set in the request. HTTP allows repeated headers. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + The header field name + required: true + - name: 'value' + type: String + description: |- + The header field value + send_empty_value: true + default_value: "" + - name: 'grpc' + type: NestedObject + description: |- + GRPC specifies an action involving a GRPC port. + send_empty_value: true + allow_empty_object: true + properties: + - name: 'port' + type: Integer + description: |- + Port number to access on the container. Number must be in the range 1 to 65535. + If not specified, defaults to the same value as container.ports[0].containerPort. + default_from_api: true + - name: 'service' + type: String + description: |- + The name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC. + - name: 'tcpSocket' + type: NestedObject + description: TCPSocketAction describes an action based on opening a socket + properties: + - name: 'port' + type: Integer + description: |- + Port number to access on the container. Must be in the range 1 to 65535. + If not specified, defaults to the exposed port of the container, which + is the value of container.ports[0].containerPort. + required: true + - name: 'startupProbe' + type: NestedObject + description: |- + Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + default_from_api: true + properties: + - name: 'initialDelaySeconds' + type: Integer + description: |- + Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + default_value: 0 + - name: 'timeoutSeconds' + type: Integer + description: |- + Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + default_value: 1 + - name: 'periodSeconds' + type: Integer + description: |- + How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds + default_value: 10 + - name: 'failureThreshold' + type: Integer + description: |- + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + default_value: 3 + - name: 'httpGet' + type: NestedObject + description: |- + HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. + send_empty_value: true + allow_empty_object: true + properties: + - name: 'path' + type: String + description: |- + Path to access on the HTTP server. Defaults to '/'. + default_value: "/" + - name: 'port' + type: Integer + description: |- + Port number to access on the container. Must be in the range 1 to 65535. + If not specified, defaults to the same value as container.ports[0].containerPort. + default_from_api: true + - name: 'httpHeaders' + type: Array + description: |- + Custom headers to set in the request. HTTP allows repeated headers. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + The header field name + required: true + - name: 'value' + type: String + description: |- + The header field value + send_empty_value: true + default_value: "" + - name: 'tcpSocket' + type: NestedObject + description: |- + TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. + send_empty_value: true + allow_empty_object: true + properties: + - name: 'port' + type: Integer + description: |- + Port number to access on the container. Must be in the range 1 to 65535. + If not specified, defaults to the same value as container.ports[0].containerPort. + default_from_api: true + - name: 'grpc' + type: NestedObject + description: |- + GRPC specifies an action involving a GRPC port. + send_empty_value: true + allow_empty_object: true + properties: + - name: 'port' + type: Integer + description: |- + Port number to access on the container. Number must be in the range 1 to 65535. + If not specified, defaults to the same value as container.ports[0].containerPort. + default_from_api: true + - name: 'service' + type: String + description: |- + The name of the service to place in the gRPC HealthCheckRequest + (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). + If this is not specified, the default behavior is defined by gRPC. + - name: 'dependsOn' + type: Array + description: |- + Containers which should be started before this container. If specified the container will wait to start until all containers with the listed names are healthy. + item_type: + type: String + - name: 'volumes' + type: Array + description: |- + A list of Volumes to make available to containers. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: |- + Volume's name. + required: true + - name: 'secret' + type: NestedObject + description: |- + Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + # exactly_one_of: + # - template.0.volumes.0.secret + # - template.0.volumes.0.cloudSqlInstance + # - template.0.volumes.0.emptyDir + properties: + - name: 'secret' + type: String + description: |- + The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. + required: true + - name: 'defaultMode' + type: Integer + description: |- + Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. + - name: 'items' + type: Array + description: |- + If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. + item_type: + type: NestedObject + properties: + - name: 'path' + type: String + description: |- + The relative path of the secret in the container. + required: true + - name: 'version' + type: String + description: |- + The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version + - name: 'mode' + type: Integer + description: |- + Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. + - name: 'cloudSqlInstance' + type: NestedObject + description: |- + For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. + # exactly_one_of: + # - template.0.volumes.0.secret + # - template.0.volumes.0.cloudSqlInstance + # - template.0.volumes.0.emptyDir + # - template.0.volumes.0.gcs + properties: + - name: 'instances' + type: Array + description: |- + The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} + is_set: true + item_type: + type: String + - name: 'emptyDir' + type: NestedObject + description: |- + Ephemeral storage used as a shared volume. + min_version: 'beta' + properties: + - name: 'medium' + type: Enum + description: |- + The different types of medium supported for EmptyDir. + default_value: "MEMORY" + enum_values: + - 'MEMORY' + - name: 'sizeLimit' + type: String + description: |- + Limit on the storage usable by this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. This field's values are of the 'Quantity' k8s type: https://kubernetes.io/docs/reference/kubernetes-api/common-definitions/quantity/. The default is nil which means that the limit is undefined. More info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir. + - name: 'gcs' + type: NestedObject + description: |- + Cloud Storage bucket mounted as a volume using GCSFuse. This feature is only supported in the gen2 execution environment and requires launch-stage to be set to ALPHA or BETA. + # exactly_one_of: + # - template.0.volumes.0.secret + # - template.0.volumes.0.cloudSqlInstance + # - template.0.volumes.0.emptyDir + # - template.0.volumes.0.gcs + properties: + - name: 'bucket' + type: String + description: GCS Bucket name + required: true + - name: 'readOnly' + type: Boolean + description: If true, mount the GCS bucket as read-only + required: false + - name: 'nfs' + type: NestedObject + description: Represents an NFS mount. + properties: + - name: 'server' + type: String + description: Hostname or IP address of the NFS server + required: true + - name: 'path' + type: String + description: Path that is exported by the NFS server. + required: true + - name: 'readOnly' + type: Boolean + description: If true, mount the NFS volume as read only + required: false + - name: 'executionEnvironment' + type: Enum + description: |- + The sandbox environment to host this Revision. + enum_values: + - 'EXECUTION_ENVIRONMENT_GEN1' + - 'EXECUTION_ENVIRONMENT_GEN2' + - name: 'encryptionKey' + type: String + description: |- + A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek + - name: 'maxInstanceRequestConcurrency' + type: Integer + description: |- + Sets the maximum number of requests that each serving instance can receive. + default_from_api: true + - name: 'sessionAffinity' + type: Boolean + description: |- + Enables session affinity. For more information, go to https://cloud.google.com/run/docs/configuring/session-affinity + - name: 'traffic' + type: Array + description: |- + Specifies how to distribute traffic over a collection of Revisions belonging to the Service. If traffic is empty or not provided, defaults to 100% traffic to the latest Ready Revision. + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: Enum + description: | + The allocation type for this traffic target. + enum_values: + - 'TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST' + - 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION' + - name: 'revision' + type: String + description: | + Revision to which to send this portion of traffic, if traffic allocation is by revision. + - name: 'percent' + type: Integer + description: | + Specifies percent of the traffic to this Revision. This defaults to zero if unspecified. + default_from_api: true + - name: 'tag' + type: String + description: | + Indicates a string to be part of the URI to exclusively reference this target. + - name: 'observedGeneration' + type: String + description: | + The generation of this Service currently serving traffic. See comments in reconciling for additional information on reconciliation process in Cloud Run. Please note that unlike v1, this is an int64 value. As with most Google APIs, its JSON representation will be a string instead of an integer. + output: true + - name: 'terminalCondition' + type: NestedObject + description: | + The Condition of this Service, containing its readiness status, and detailed error information in case it did not reach a serving state. See comments in reconciling for additional information on reconciliation process in Cloud Run. + output: true + properties: + - name: 'type' + type: String + description: |- + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + output: true + - name: 'state' + type: String + description: |- + State of the condition. + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + output: true + - name: 'lastTransitionTime' + type: Time + description: |- + Last time the condition transitioned from one status to another. + output: true + - name: 'severity' + type: String + description: |- + How to interpret failures of this condition, one of Error, Warning, Info + output: true + - name: 'reason' + type: String + description: |- + A common (service-level) reason for this condition. + output: true + - name: 'revisionReason' + type: String + description: |- + A reason for the revision condition. + output: true + - name: 'executionReason' + type: String + description: |- + A reason for the execution condition. + output: true + - name: 'conditions' + type: Array + description: |- + The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Service does not reach its Serving state. See comments in reconciling for additional information on reconciliation process in Cloud Run. + output: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: |- + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + output: true + - name: 'state' + type: String + description: |- + State of the condition. + output: true + - name: 'message' + type: String + description: |- + Human readable message indicating details about the current status. + output: true + - name: 'lastTransitionTime' + type: Time + description: |- + Last time the condition transitioned from one status to another. + + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + output: true + - name: 'severity' + type: String + description: |- + How to interpret failures of this condition, one of Error, Warning, Info + output: true + - name: 'reason' + type: String + description: |- + A common (service-level) reason for this condition. + output: true + - name: 'revisionReason' + type: String + description: |- + A reason for the revision condition. + output: true + - name: 'executionReason' + type: String + description: |- + A reason for the execution condition. + output: true + - name: 'latestReadyRevision' + type: String + description: | + Name of the latest revision that is serving traffic. See comments in reconciling for additional information on reconciliation process in Cloud Run. + output: true + - name: 'latestCreatedRevision' + type: String + description: | + Name of the last created revision. See comments in reconciling for additional information on reconciliation process in Cloud Run. + output: true + - name: 'trafficStatuses' + type: Array + description: |- + Detailed status information for corresponding traffic targets. See comments in reconciling for additional information on reconciliation process in Cloud Run. + output: true + item_type: + type: NestedObject + properties: + - name: 'type' + type: String + description: |- + The allocation type for this traffic target. + output: true + - name: 'revision' + type: String + description: |- + Revision to which this traffic is sent. + output: true + - name: 'percent' + type: Integer + description: |- + Specifies percent of the traffic to this Revision. + output: true + - name: 'tag' + type: String + description: |- + Indicates the string used in the URI to exclusively reference this target. + output: true + - name: 'uri' + type: String + description: |- + Displays the target URI. + output: true + - name: 'uri' + type: String + description: | + The main URI in which this Service is serving traffic. + output: true + - name: 'reconciling' + type: Boolean + description: | + Returns true if the Service is currently being acted upon by the system to bring it into the desired state. + + When a new Service is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Service to the desired serving state. This process is called reconciliation. While reconciliation is in process, observedGeneration, latest_ready_revison, trafficStatuses, and uri will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the serving state matches the Service, or there was an error, and reconciliation failed. This state can be found in terminalCondition.state. + + If reconciliation succeeded, the following fields will match: traffic and trafficStatuses, observedGeneration and generation, latestReadyRevision and latestCreatedRevision. + + If reconciliation failed, trafficStatuses, observedGeneration, and latestReadyRevision will have the state of the last serving revision, or empty for newly created Services. Additional information on the failure can be found in terminalCondition and conditions. + output: true + - name: 'etag' + type: String + description: | + A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates. + output: true diff --git a/mmv1/products/cloudrunv2/go_product.yaml b/mmv1/products/cloudrunv2/go_product.yaml new file mode 100644 index 000000000000..f565c2f94e2e --- /dev/null +++ b/mmv1/products/cloudrunv2/go_product.yaml @@ -0,0 +1,24 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'CloudRunV2' +display_name: 'Cloud Run (v2 API)' +versions: + - name: 'ga' + base_url: 'https://run.googleapis.com/v2/' + - name: 'beta' + base_url: 'https://run.googleapis.com/v2/' +scopes: + - 'https://www.googleapis.com/auth/cloud-platform' diff --git a/mmv1/products/compute/go_NetworkAttachment.yaml b/mmv1/products/compute/go_NetworkAttachment.yaml index 1afc76217e11..635bee8c5862 100644 --- a/mmv1/products/compute/go_NetworkAttachment.yaml +++ b/mmv1/products/compute/go_NetworkAttachment.yaml @@ -57,8 +57,8 @@ examples: org_id: 'ORG_ID' billing_account: 'BILLING_ACCT' - name: 'network_attachment_instance_usage' - min_version: beta primary_resource_id: 'default' + min_version: 'beta' vars: resource_name: 'basic-network-attachment' network_name: 'basic-network' diff --git a/mmv1/products/compute/go_ResourcePolicy.yaml b/mmv1/products/compute/go_ResourcePolicy.yaml index c0cd3c3bee7c..38a7877c8d83 100644 --- a/mmv1/products/compute/go_ResourcePolicy.yaml +++ b/mmv1/products/compute/go_ResourcePolicy.yaml @@ -23,7 +23,8 @@ references: docs: base_url: 'projects/{{project}}/regions/{{region}}/resourcePolicies' has_self_link: true -immutable: true +update_url: 'projects/{{project}}/regions/{{region}}/resourcePolicies/{{name}}' +update_verb: 'PATCH' timeouts: insert_minutes: 20 update_minutes: 20 diff --git a/mmv1/products/dialogflowcx/go_Agent.yaml b/mmv1/products/dialogflowcx/go_Agent.yaml new file mode 100644 index 000000000000..9fe47dc0d496 --- /dev/null +++ b/mmv1/products/dialogflowcx/go_Agent.yaml @@ -0,0 +1,216 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Agent' +description: | + Agents are best described as Natural Language Understanding (NLU) modules that transform user requests into actionable data. You can include agents in your app, product, or service to determine user intent and respond to the user in a natural way. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/agents/{{name}}' +base_url: 'projects/{{project}}/locations/{{location}}/agents' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/agents/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +custom_code: +skip_sweeper: true +examples: + - name: 'dialogflowcx_agent_full' + primary_resource_id: 'full_agent' + vars: + agent_name: 'dialogflowcx-agent' + bucket_name: 'dialogflowcx-bucket' + ignore_read_extra: + - 'git_integration_settings.0.github_settings.0.access_token' +parameters: +properties: + - name: 'name' + type: String + description: | + The unique identifier of the agent. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'location' + type: String + description: | + The name of the location this agent is located in. + + ~> **Note:** The first time you are deploying an Agent in your project you must configure location settings. + This is a one time step but at the moment you can only [configure location settings](https://cloud.google.com/dialogflow/cx/docs/concept/region#location-settings) via the Dialogflow CX console. + Another options is to use global location so you don't need to manually configure location settings. + url_param_only: true + required: true + immutable: true + - name: 'displayName' + type: String + description: | + The human-readable name of the agent, unique within the location. + required: true + - name: 'defaultLanguageCode' + type: String + description: | + The default language of the agent as a language tag. [See Language Support](https://cloud.google.com/dialogflow/cx/docs/reference/language) + for a list of the currently supported language codes. This field cannot be updated after creation. + required: true + immutable: true + - name: 'supportedLanguageCodes' + type: Array + description: | + The list of all languages supported by this agent (except for the default_language_code). + item_type: + type: String + - name: 'timeZone' + type: String + description: | + The time zone of this agent from the [time zone database](https://www.iana.org/time-zones), e.g., America/New_York, + Europe/Paris. + required: true + - name: 'description' + type: String + description: | + The description of this agent. The maximum length is 500 characters. If exceeded, the request is rejected. + validation: + function: 'validation.StringLenBetween(0, 500)' + - name: 'avatarUri' + type: String + description: | + The URI of the agent's avatar. Avatars are used throughout the Dialogflow console and in the self-hosted Web Demo integration. + - name: 'speechToTextSettings' + type: NestedObject + description: | + Settings related to speech recognition. + properties: + - name: 'enableSpeechAdaptation' + type: Boolean + description: | + Whether to use speech adaptation for speech recognition. + - name: 'startFlow' + type: String + description: | + Name of the start flow in this agent. A start flow will be automatically created when the agent is created, and can only be deleted by deleting the agent. Format: projects//locations//agents//flows/. + output: true + - name: 'securitySettings' + type: String + description: | + Name of the SecuritySettings reference for the agent. Format: projects//locations//securitySettings/. + - name: 'enableStackdriverLogging' + type: Boolean + description: | + Determines whether this agent should log conversation queries. + - name: 'enableSpellCorrection' + type: Boolean + description: | + Indicates if automatic spell correction is enabled in detect intent requests. + - name: 'advancedSettings' + type: NestedObject + description: | + Hierarchical advanced settings for this agent. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + default_from_api: true + properties: + - name: 'audioExportGcsDestination' + type: NestedObject + description: | + If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: + * Agent level + * Flow level + properties: + - name: 'uri' + type: String + description: | + The Google Cloud Storage URI for the exported objects. Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. + Format: gs://bucket/object-name-or-prefix + - name: 'dtmfSettings' + type: NestedObject + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - name: 'enabled' + type: Boolean + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - name: 'maxDigits' + type: Integer + description: | + Max length of DTMF digits. + - name: 'finishDigit' + type: String + description: | + The digit that terminates a DTMF digit sequence. + - name: 'gitIntegrationSettings' + type: NestedObject + description: | + Git integration settings for this agent. + allow_empty_object: true + properties: + - name: 'githubSettings' + type: NestedObject + description: | + Settings of integration with GitHub. + custom_flatten: 'templates/terraform/custom_flatten/go/dialogflowcx_agent_git_integration_settings_github_settings.go.tmpl' + properties: + - name: 'displayName' + type: String + description: | + The unique repository display name for the GitHub repository. + - name: 'repositoryUri' + type: String + description: | + The GitHub repository URI related to the agent. + - name: 'trackingBranch' + type: String + description: | + The branch of the GitHub repository tracked for this agent. + - name: 'accessToken' + type: String + description: | + The access token used to authenticate the access to the GitHub repository. + ignore_read: true + sensitive: true + - name: 'branches' + type: Array + description: | + A list of branches configured to be used from Dialogflow. + item_type: + type: String + - name: 'textToSpeechSettings' + type: NestedObject + description: | + Settings related to speech synthesizing. + allow_empty_object: true + properties: + - name: 'synthesizeSpeechConfigs' + type: String + description: | + Configuration of how speech should be synthesized, mapping from [language](https://cloud.google.com/dialogflow/cx/docs/reference/language) to [SynthesizeSpeechConfig](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents#synthesizespeechconfig). + These settings affect: + * The phone gateway synthesize configuration set via Agent.text_to_speech_settings. + * How speech is synthesized when invoking session APIs. `Agent.text_to_speech_settings` only applies if `OutputAudioConfig.synthesize_speech_config` is not specified. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' diff --git a/mmv1/products/dialogflowcx/go_EntityType.yaml b/mmv1/products/dialogflowcx/go_EntityType.yaml new file mode 100644 index 000000000000..b82842e7ccbc --- /dev/null +++ b/mmv1/products/dialogflowcx/go_EntityType.yaml @@ -0,0 +1,141 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'EntityType' +description: | + Entities are extracted from user input and represent parameters that are meaningful to your application. + For example, a date range, a proper name such as a geographic location or landmark, and so on. Entities represent actionable data for your application. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.entityTypes' +docs: +id_format: '{{parent}}/entityTypes/{{name}}' +base_url: '{{parent}}/entityTypes' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{parent}}/entityTypes/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_read: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/dialogflowcx_entity_type.go.tmpl' +skip_sweeper: true +examples: + - name: 'dialogflowcx_entity_type_full' + primary_resource_id: 'basic_entity_type' + vars: + agent_name: 'dialogflowcx-agent' +parameters: + - name: 'parent' + type: String + description: | + The agent to create a entity type for. + Format: projects//locations//agents/. + url_param_only: true + immutable: true + - name: 'languageCode' + type: String + description: | + The language of the following fields in entityType: + EntityType.entities.value + EntityType.entities.synonyms + EntityType.excluded_phrases.value + If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used. + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The unique identifier of the entity type. + Format: projects//locations//agents//entityTypes/. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'displayName' + type: String + description: | + The human-readable name of the entity type, unique within the agent. + required: true + validation: + function: 'validation.StringLenBetween(0, 64)' + - name: 'kind' + type: Enum + description: | + Indicates whether the entity type can be automatically expanded. + * KIND_MAP: Map entity types allow mapping of a group of synonyms to a canonical value. + * KIND_LIST: List entity types contain a set of entries that do not map to canonical values. However, list entity types can contain references to other entity types (with or without aliases). + * KIND_REGEXP: Regexp entity types allow to specify regular expressions in entries values. + required: true + enum_values: + - 'KIND_MAP' + - 'KIND_LIST' + - 'KIND_REGEXP' + - name: 'autoExpansionMode' + type: Enum + description: | + Represents kinds of entities. + * AUTO_EXPANSION_MODE_UNSPECIFIED: Auto expansion disabled for the entity. + * AUTO_EXPANSION_MODE_DEFAULT: Allows an agent to recognize values that have not been explicitly listed in the entity. + enum_values: + - 'AUTO_EXPANSION_MODE_DEFAULT' + - 'AUTO_EXPANSION_MODE_UNSPECIFIED' + - name: 'entities' + type: Array + description: | + The collection of entity entries associated with the entity type. + required: true + item_type: + type: NestedObject + properties: + - name: 'value' + type: String + description: | + The primary value associated with this entity entry. For example, if the entity type is vegetable, the value could be scallions. + For KIND_MAP entity types: A canonical value to be used in place of synonyms. + For KIND_LIST entity types: A string that can contain references to other entity types (with or without aliases). + - name: 'synonyms' + type: Array + description: | + A collection of value synonyms. For example, if the entity type is vegetable, and value is scallions, a synonym could be green onions. + For KIND_LIST entity types: This collection must contain exactly one synonym equal to value. + item_type: + type: String + - name: 'excludedPhrases' + type: Array + description: | + Collection of exceptional words and phrases that shouldn't be matched. For example, if you have a size entity type with entry giant(an adjective), you might consider adding giants(a noun) as an exclusion. + If the kind of entity type is KIND_MAP, then the phrases specified by entities and excluded phrases should be mutually exclusive. + item_type: + type: NestedObject + properties: + - name: 'value' + type: String + description: | + The word or phrase to be excluded. + - name: 'enableFuzzyExtraction' + type: Boolean + description: | + Enables fuzzy entity extraction during classification. + - name: 'redact' + type: Boolean + description: | + Indicates whether parameters of the entity type should be redacted in log. If redaction is enabled, page parameters and intent parameters referring to the entity type will be replaced by parameter name when logging. diff --git a/mmv1/products/dialogflowcx/go_Environment.yaml b/mmv1/products/dialogflowcx/go_Environment.yaml new file mode 100644 index 000000000000..c9b61f4288fe --- /dev/null +++ b/mmv1/products/dialogflowcx/go_Environment.yaml @@ -0,0 +1,119 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Environment' +description: | + Represents an environment for an agent. You can create multiple versions of your agent and publish them to separate environments. + When you edit an agent, you are editing the draft agent. At any point, you can save the draft agent as an agent version, which is an immutable snapshot of your agent. + When you save the draft agent, it is published to the default environment. When you create agent versions, you can publish them to custom environments. You can create a variety of custom environments for testing, development, production, etc. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.environments' +docs: +id_format: '{{parent}}/environments/{{name}}' +base_url: '{{parent}}/environments' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{parent}}/environments/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +async: + actions: ['create', 'update'] + type: 'OpAsync' + operation: + full_url: 'https://{{location}}-dialogflow.googleapis.com/v3/{{op_id}}' + path: 'name' + wait_ms: 1000 + timeouts: + insert_minutes: 60 + update_minutes: 60 + delete_minutes: 20 + result: + path: 'response' + resource_inside_response: true + error: + path: 'error' + message: 'message' +custom_code: + pre_create: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_read: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/dialogflowcx_environment.go.tmpl' +custom_diff: + - 'tpgresource.DefaultProviderProject' +skip_sweeper: true +examples: + - name: 'dialogflowcx_environment_full' + primary_resource_id: 'development' + vars: + agent_name: 'dialogflowcx-agent' + - name: 'dialogflowcx_environment_regional' + primary_resource_id: 'development' + vars: + agent_name: 'issue-12880' + skip_docs: true +parameters: + - name: 'parent' + type: String + description: | + The Agent to create an Environment for. + Format: projects//locations//agents/. + url_param_only: true + immutable: true +properties: + - name: 'name' + type: String + description: | + The name of the environment. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'displayName' + type: String + description: | + The human-readable name of the environment (unique in an agent). Limit of 64 characters. + required: true + validation: + function: 'validation.StringLenBetween(0, 64)' + - name: 'description' + type: String + description: | + The human-readable description of the environment. The maximum length is 500 characters. If exceeded, the request is rejected. + validation: + function: 'validation.StringLenBetween(0, 500)' + - name: 'versionConfigs' + type: Array + description: | + A list of configurations for flow versions. You should include version configs for all flows that are reachable from [Start Flow][Agent.start_flow] in the agent. Otherwise, an error will be returned. + required: true + item_type: + type: NestedObject + properties: + - name: 'version' + type: String + description: | + Format: projects/{{project}}/locations/{{location}}/agents/{{agent}}/flows/{{flow}}/versions/{{version}}. + required: true + - name: 'updateTime' + type: Time + description: + 'Update time of this environment. A timestamp in RFC3339 UTC "Zulu" + format, with nanosecond resolution and up to nine fractional digits. + Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".' + output: true diff --git a/mmv1/products/dialogflowcx/go_Flow.yaml b/mmv1/products/dialogflowcx/go_Flow.yaml new file mode 100644 index 000000000000..d06bb82ed8b1 --- /dev/null +++ b/mmv1/products/dialogflowcx/go_Flow.yaml @@ -0,0 +1,586 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Flow' +description: | + Flows represents the conversation flows when you build your chatbot agent. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.flows' +docs: +id_format: '{{parent}}/flows/{{name}}' +base_url: '{{parent}}/flows' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{parent}}/flows/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/pre_create/go/dialogflowcx_set_location_skip_default_obj.go.tmpl' + pre_read: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/dialogflowcx_set_location_skip_default_obj.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/dialogflowcx_flow.go.tmpl' +skip_sweeper: true +examples: + - name: 'dialogflowcx_flow_basic' + primary_resource_id: 'basic_flow' + vars: + agent_name: 'dialogflowcx-agent' + - name: 'dialogflowcx_flow_full' + primary_resource_id: 'basic_flow' + vars: + agent_name: 'dialogflowcx-agent' + bucket_name: 'dialogflowcx-bucket' + - name: 'dialogflowcx_flow_default_start_flow' + primary_resource_id: 'default_start_flow' + vars: + agent_name: 'dialogflowcx-agent' + skip_docs: true +virtual_fields: + - name: 'is_default_start_flow' + description: | + Marks this as the [Default Start Flow](https://cloud.google.com/dialogflow/cx/docs/concept/flow#start) for an agent. When you create an agent, the Default Start Flow is created automatically. + The Default Start Flow cannot be deleted; deleting the `google_dialogflow_cx_flow` resource does nothing to the underlying GCP resources. + + ~> Avoid having multiple `google_dialogflow_cx_flow` resources linked to the same agent with `is_default_start_flow = true` because they will compete to control a single Default Start Flow resource in GCP. + type: Boolean + immutable: true +parameters: + - name: 'parent' + type: String + description: | + The agent to create a flow for. + Format: projects//locations//agents/. + url_param_only: true + immutable: true + - name: 'languageCode' + type: String + description: | + The language of the following fields in flow: + Flow.event_handlers.trigger_fulfillment.messages + Flow.event_handlers.trigger_fulfillment.conditional_cases + Flow.transition_routes.trigger_fulfillment.messages + Flow.transition_routes.trigger_fulfillment.conditional_cases + If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used. + immutable: true +properties: + - name: 'name' + type: String + description: | + The unique identifier of the flow. + Format: projects//locations//agents//flows/. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'displayName' + type: String + description: | + The human-readable name of the flow. + required: true + - name: 'description' + type: String + description: | + The description of the flow. The maximum length is 500 characters. If exceeded, the request is rejected. + validation: + function: 'validation.StringLenBetween(0, 500)' + - name: 'transitionRoutes' + type: Array + description: | + A flow's transition routes serve two purposes: + They are responsible for matching the user's first utterances in the flow. + They are inherited by every page's [transition routes][Page.transition_routes] and can support use cases such as the user saying "help" or "can I talk to a human?", which can be handled in a common way regardless of the current page. Transition routes defined in the page have higher priority than those defined in the flow. + + TransitionRoutes are evalauted in the following order: + TransitionRoutes with intent specified. + TransitionRoutes with only condition specified. + TransitionRoutes with intent specified are inherited by pages in the flow. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The unique identifier of this transition route. + output: true + - name: 'intent' + type: String + description: | + The unique identifier of an Intent. + Format: projects//locations//agents//intents/. Indicates that the transition can only happen when the given intent is matched. At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. + - name: 'condition' + type: String + description: | + The condition to evaluate against form parameters or session parameters. + At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. + - name: 'triggerFulfillment' + type: NestedObject + description: | + The fulfillment to call when the condition is satisfied. At least one of triggerFulfillment and target must be specified. When both are defined, triggerFulfillment is executed first. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'targetPage' + type: String + description: | + The target page to transition to. + Format: projects//locations//agents//flows//pages/. + - name: 'targetFlow' + type: String + description: | + The target flow to transition to. + Format: projects//locations//agents//flows/. + - name: 'eventHandlers' + type: Array + description: | + A flow's event handlers serve two purposes: + They are responsible for handling events (e.g. no match, webhook errors) in the flow. + They are inherited by every page's [event handlers][Page.event_handlers], which can be used to handle common events regardless of the current page. Event handlers defined in the page have higher priority than those defined in the flow. + Unlike transitionRoutes, these handlers are evaluated on a first-match basis. The first one that matches the event get executed, with the rest being ignored. + default_from_api: true + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The unique identifier of this event handler. + output: true + - name: 'event' + type: String + description: | + The name of the event to handle. + - name: 'triggerFulfillment' + type: NestedObject + description: | + The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'targetPage' + type: String + description: | + The target page to transition to. + Format: projects//locations//agents//flows//pages/. + - name: 'targetFlow' + type: String + description: | + The target flow to transition to. + Format: projects//locations//agents//flows/. + - name: 'transitionRouteGroups' + type: Array + description: | + A flow's transition route group serve two purposes: + They are responsible for matching the user's first utterances in the flow. + They are inherited by every page's [transition route groups][Page.transition_route_groups]. Transition route groups defined in the page have higher priority than those defined in the flow. + Format:projects//locations//agents//flows//transitionRouteGroups/. + item_type: + type: String + - name: 'nluSettings' + type: NestedObject + description: | + NLU related settings of the flow. + properties: + - name: 'modelType' + type: Enum + description: | + Indicates the type of NLU model. + * MODEL_TYPE_STANDARD: Use standard NLU model. + * MODEL_TYPE_ADVANCED: Use advanced NLU model. + enum_values: + - 'MODEL_TYPE_STANDARD' + - 'MODEL_TYPE_ADVANCED' + - name: 'classificationThreshold' + type: Double + description: | + To filter out false positive results and still get variety in matched natural language inputs for your agent, you can tune the machine learning classification threshold. + If the returned score value is less than the threshold value, then a no-match event will be triggered. The score values range from 0.0 (completely uncertain) to 1.0 (completely certain). If set to 0.0, the default of 0.3 is used. + - name: 'modelTrainingMode' + type: Enum + description: | + Indicates NLU model training mode. + * MODEL_TRAINING_MODE_AUTOMATIC: NLU model training is automatically triggered when a flow gets modified. User can also manually trigger model training in this mode. + * MODEL_TRAINING_MODE_MANUAL: User needs to manually trigger NLU model training. Best for large flows whose models take long time to train. + enum_values: + - 'MODEL_TRAINING_MODE_AUTOMATIC' + - 'MODEL_TRAINING_MODE_MANUAL' + - name: 'advancedSettings' + type: NestedObject + description: | + Hierarchical advanced settings for this flow. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + properties: + - name: 'audioExportGcsDestination' + type: NestedObject + description: | + If present, incoming audio is exported by Dialogflow to the configured Google Cloud Storage destination. Exposed at the following levels: + * Agent level + * Flow level + properties: + - name: 'uri' + type: String + description: | + The Google Cloud Storage URI for the exported objects. Whether a full object name, or just a prefix, its usage depends on the Dialogflow operation. + Format: gs://bucket/object-name-or-prefix + - name: 'dtmfSettings' + type: NestedObject + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - name: 'enabled' + type: Boolean + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - name: 'maxDigits' + type: Integer + description: | + Max length of DTMF digits. + - name: 'finishDigit' + type: String + description: | + The digit that terminates a DTMF digit sequence. diff --git a/mmv1/products/dialogflowcx/go_Intent.yaml b/mmv1/products/dialogflowcx/go_Intent.yaml new file mode 100644 index 000000000000..7d027647d9e3 --- /dev/null +++ b/mmv1/products/dialogflowcx/go_Intent.yaml @@ -0,0 +1,193 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Intent' +description: | + An intent represents a user's intent to interact with a conversational agent. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.intents' +docs: +id_format: '{{parent}}/intents/{{name}}' +base_url: '{{parent}}/intents' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{parent}}/intents/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/pre_create/go/dialogflowcx_set_location_skip_default_obj.go.tmpl' + pre_read: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_delete: 'templates/terraform/pre_delete/go/dialogflowcx_set_location_skip_default_obj.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/dialogflowcx_intent.go.tmpl' +skip_sweeper: true +examples: + - name: 'dialogflowcx_intent_full' + primary_resource_id: 'basic_intent' + vars: + agent_name: 'dialogflowcx-agent' + - name: 'dialogflowcx_intent_default_negative_intent' + primary_resource_id: 'default_negative_intent' + vars: + agent_name: 'dialogflowcx-agent' + skip_docs: true + - name: 'dialogflowcx_intent_default_welcome_intent' + primary_resource_id: 'default_welcome_intent' + vars: + agent_name: 'dialogflowcx-agent' + skip_docs: true +virtual_fields: + - name: 'is_default_welcome_intent' + description: | + Marks this as the [Default Welcome Intent](https://cloud.google.com/dialogflow/cx/docs/concept/intent#welcome) for an agent. When you create an agent, a Default Welcome Intent is created automatically. + The Default Welcome Intent cannot be deleted; deleting the `google_dialogflow_cx_intent` resource does nothing to the underlying GCP resources. + + ~> Avoid having multiple `google_dialogflow_cx_intent` resources linked to the same agent with `is_default_welcome_intent = true` because they will compete to control a single Default Welcome Intent resource in GCP. + type: Boolean + immutable: true + - name: 'is_default_negative_intent' + description: | + Marks this as the [Default Negative Intent](https://cloud.google.com/dialogflow/cx/docs/concept/intent#negative) for an agent. When you create an agent, a Default Negative Intent is created automatically. + The Default Negative Intent cannot be deleted; deleting the `google_dialogflow_cx_intent` resource does nothing to the underlying GCP resources. + + ~> Avoid having multiple `google_dialogflow_cx_intent` resources linked to the same agent with `is_default_negative_intent = true` because they will compete to control a single Default Negative Intent resource in GCP. + type: Boolean + immutable: true +parameters: + - name: 'parent' + type: String + description: | + The agent to create an intent for. + Format: projects//locations//agents/. + url_param_only: true + immutable: true + - name: 'languageCode' + type: String + description: | + The language of the following fields in intent: + Intent.training_phrases.parts.text + If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used. + immutable: true +properties: + - name: 'name' + type: String + description: | + The unique identifier of the intent. + Format: projects//locations//agents//intents/. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'displayName' + type: String + description: | + The human-readable name of the intent, unique within the agent. + required: true + validation: + function: 'validation.StringLenBetween(0, 64)' + - name: 'trainingPhrases' + type: Array + description: | + The collection of training phrases the agent is trained on to identify the intent. + item_type: + type: NestedObject + properties: + - name: 'id' + type: String + description: | + The unique identifier of the training phrase. + output: true + - name: 'parts' + type: Array + description: | + The ordered list of training phrase parts. The parts are concatenated in order to form the training phrase. + Note: The API does not automatically annotate training phrases like the Dialogflow Console does. + Note: Do not forget to include whitespace at part boundaries, so the training phrase is well formatted when the parts are concatenated. + If the training phrase does not need to be annotated with parameters, you just need a single part with only the Part.text field set. + If you want to annotate the training phrase, you must create multiple parts, where the fields of each part are populated in one of two ways: + Part.text is set to a part of the phrase that has no parameters. + Part.text is set to a part of the phrase that you want to annotate, and the parameterId field is set. + required: true + item_type: + type: NestedObject + properties: + - name: 'text' + type: String + description: | + The text for this part. + required: true + - name: 'parameterId' + type: String + description: | + The parameter used to annotate this part of the training phrase. This field is required for annotated parts of the training phrase. + - name: 'repeatCount' + type: Integer + description: | + Indicates how many times this example was added to the intent. + - name: 'parameters' + type: Array + description: | + The collection of parameters associated with the intent. + item_type: + type: NestedObject + properties: + - name: 'id' + type: String + description: | + The unique identifier of the parameter. This field is used by training phrases to annotate their parts. + required: true + - name: 'entityType' + type: String + description: | + The entity type of the parameter. + Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types. + required: true + - name: 'isList' + type: Boolean + description: | + Indicates whether the parameter represents a list of values. + - name: 'redact' + type: Boolean + description: | + Indicates whether the parameter content should be redacted in log. If redaction is enabled, the parameter content will be replaced by parameter name during logging. + Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled. + - name: 'priority' + type: Integer + description: | + The priority of this intent. Higher numbers represent higher priorities. + If the supplied value is unspecified or 0, the service translates the value to 500,000, which corresponds to the Normal priority in the console. + If the supplied value is negative, the intent is ignored in runtime detect intent requests. + - name: 'isFallback' + type: Boolean + description: | + Indicates whether this is a fallback intent. Currently only default fallback intent is allowed in the agent, which is added upon agent creation. + Adding training phrases to fallback intent is useful in the case of requests that are mistakenly matched, since training phrases assigned to fallback intents act as negative examples that triggers no-match event. + To manage the fallback intent, set `is_default_negative_intent = true` + - name: 'labels' + type: KeyValueLabels + description: | + The key/value metadata to label an intent. Labels can contain lowercase letters, digits and the symbols '-' and '_'. International characters are allowed, including letters from unicase alphabets. Keys must start with a letter. Keys and values can be no longer than 63 characters and no more than 128 bytes. + Prefix "sys-" is reserved for Dialogflow defined labels. Currently allowed Dialogflow defined labels include: * sys-head * sys-contextual The above labels do not require value. "sys-head" means the intent is a head intent. "sys.contextual" means the intent is a contextual intent. + An object containing a list of "key": value pairs. Example: { "name": "wrench", "mass": "1.3kg", "count": "3" }. + immutable: false + - name: 'description' + type: String + description: | + Human readable description for better understanding an intent like its scope, content, result etc. Maximum character limit: 140 characters. + validation: + function: 'validation.StringLenBetween(0, 140)' diff --git a/mmv1/products/dialogflowcx/go_Page.yaml b/mmv1/products/dialogflowcx/go_Page.yaml new file mode 100644 index 000000000000..11ea0268d8d7 --- /dev/null +++ b/mmv1/products/dialogflowcx/go_Page.yaml @@ -0,0 +1,1149 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'Page' +description: | + A Dialogflow CX conversation (session) can be described and visualized as a state machine. The states of a CX session are represented by pages. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.agents.flows.pages' +docs: +id_format: '{{parent}}/pages/{{name}}' +base_url: '{{parent}}/pages' +update_verb: 'PATCH' +update_mask: true +import_format: + - '{{parent}}/pages/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +custom_code: + pre_create: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_read: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_update: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + pre_delete: 'templates/terraform/pre_create/go/dialogflow_set_location.go.tmpl' + custom_import: 'templates/terraform/custom_import/go/dialogflowcx_page.go.tmpl' +skip_sweeper: true +examples: + - name: 'dialogflowcx_page_full' + primary_resource_id: 'basic_page' + vars: + agent_name: 'dialogflowcx-agent' +parameters: + - name: 'parent' + type: String + description: | + The flow to create a page for. + Format: projects//locations//agents//flows/. + url_param_only: true + immutable: true + - name: 'languageCode' + type: String + description: | + The language of the following fields in page: + + Page.entry_fulfillment.messages + Page.entry_fulfillment.conditional_cases + Page.event_handlers.trigger_fulfillment.messages + Page.event_handlers.trigger_fulfillment.conditional_cases + Page.form.parameters.fill_behavior.initial_prompt_fulfillment.messages + Page.form.parameters.fill_behavior.initial_prompt_fulfillment.conditional_cases + Page.form.parameters.fill_behavior.reprompt_event_handlers.messages + Page.form.parameters.fill_behavior.reprompt_event_handlers.conditional_cases + Page.transition_routes.trigger_fulfillment.messages + Page.transition_routes.trigger_fulfillment.conditional_cases + If not specified, the agent's default language is used. Many languages are supported. Note: languages must be enabled in the agent before they can be used. + immutable: true +properties: + - name: 'name' + type: String + description: | + The unique identifier of the page. + Format: projects//locations//agents//flows//pages/. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'displayName' + type: String + description: | + The human-readable name of the page, unique within the agent. + required: true + validation: + function: 'validation.StringLenBetween(0, 64)' + - name: 'entryFulfillment' + type: NestedObject + description: | + The fulfillment to call when the session is entering the page. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'form' + type: NestedObject + description: | + The form associated with the page, used for collecting parameters relevant to the page. + properties: + - name: 'parameters' + type: Array + description: | + Parameters to collect from the user. + item_type: + type: NestedObject + properties: + - name: 'displayName' + type: String + description: | + The human-readable name of the parameter, unique within the form. + - name: 'required' + type: Boolean + description: | + Indicates whether the parameter is required. Optional parameters will not trigger prompts; however, they are filled if the user specifies them. + Required parameters must be filled before form filling concludes. + - name: 'entityType' + type: String + description: | + The entity type of the parameter. + Format: projects/-/locations/-/agents/-/entityTypes/ for system entity types (for example, projects/-/locations/-/agents/-/entityTypes/sys.date), or projects//locations//agents//entityTypes/ for developer entity types. + - name: 'isList' + type: Boolean + description: | + Indicates whether the parameter represents a list of values. + - name: 'fillBehavior' + type: NestedObject + description: | + Defines fill behavior for the parameter. + properties: + - name: 'initialPromptFulfillment' + type: NestedObject + description: | + The fulfillment to provide the initial prompt that the agent can present to the user in order to fill the parameter. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'repromptEventHandlers' + type: Array + description: | + The handlers for parameter-level events, used to provide reprompt for the parameter or transition to a different page/flow. The supported events are: + * sys.no-match-, where N can be from 1 to 6 + * sys.no-match-default + * sys.no-input-, where N can be from 1 to 6 + * sys.no-input-default + * sys.invalid-parameter + [initialPromptFulfillment][initialPromptFulfillment] provides the first prompt for the parameter. + If the user's response does not fill the parameter, a no-match/no-input event will be triggered, and the fulfillment associated with the sys.no-match-1/sys.no-input-1 handler (if defined) will be called to provide a prompt. The sys.no-match-2/sys.no-input-2 handler (if defined) will respond to the next no-match/no-input event, and so on. + A sys.no-match-default or sys.no-input-default handler will be used to handle all following no-match/no-input events after all numbered no-match/no-input handlers for the parameter are consumed. + A sys.invalid-parameter handler can be defined to handle the case where the parameter values have been invalidated by webhook. For example, if the user's response fill the parameter, however the parameter was invalidated by webhook, the fulfillment associated with the sys.invalid-parameter handler (if defined) will be called to provide a prompt. + If the event handler for the corresponding event can't be found on the parameter, initialPromptFulfillment will be re-prompted. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The unique identifier of this event handler. + output: true + - name: 'event' + type: String + description: | + The name of the event to handle. + - name: 'triggerFulfillment' + type: NestedObject + description: | + The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'targetPage' + type: String + description: | + The target page to transition to. + Format: projects//locations//agents//flows//pages/. + - name: 'targetFlow' + type: String + description: | + The target flow to transition to. + Format: projects//locations//agents//flows/. + # This can be an arbitrary value, so we use a string instead of a NestedObject. + - name: 'defaultValue' + type: String + description: | + The default value of an optional parameter. If the parameter is required, the default value will be ignored. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'redact' + type: Boolean + description: | + Indicates whether the parameter content should be redacted in log. + If redaction is enabled, the parameter content will be replaced by parameter name during logging. Note: the parameter content is subject to redaction if either parameter level redaction or entity type level redaction is enabled. + - name: 'advancedSettings' + type: NestedObject + description: | + Hierarchical advanced settings for this parameter. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + properties: + - name: 'dtmfSettings' + type: NestedObject + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - name: 'enabled' + type: Boolean + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - name: 'maxDigits' + type: Integer + description: | + Max length of DTMF digits. + - name: 'finishDigit' + type: String + description: | + The digit that terminates a DTMF digit sequence. + - name: 'transitionRouteGroups' + type: Array + description: | + Ordered list of TransitionRouteGroups associated with the page. Transition route groups must be unique within a page. + If multiple transition routes within a page scope refer to the same intent, then the precedence order is: page's transition route -> page's transition route group -> flow's transition routes. + If multiple transition route groups within a page contain the same intent, then the first group in the ordered list takes precedence. + Format:projects//locations//agents//flows//transitionRouteGroups/. + item_type: + type: String + - name: 'transitionRoutes' + type: Array + description: | + A list of transitions for the transition rules of this page. They route the conversation to another page in the same flow, or another flow. + When we are in a certain page, the TransitionRoutes are evalauted in the following order: + TransitionRoutes defined in the page with intent specified. + TransitionRoutes defined in the transition route groups with intent specified. + TransitionRoutes defined in flow with intent specified. + TransitionRoutes defined in the transition route groups with intent specified. + TransitionRoutes defined in the page with only condition specified. + TransitionRoutes defined in the transition route groups with only condition specified. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The unique identifier of this transition route. + output: true + - name: 'intent' + type: String + description: | + The unique identifier of an Intent. + Format: projects//locations//agents//intents/. Indicates that the transition can only happen when the given intent is matched. At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. + - name: 'condition' + type: String + description: | + The condition to evaluate against form parameters or session parameters. + At least one of intent or condition must be specified. When both intent and condition are specified, the transition can only happen when both are fulfilled. + - name: 'triggerFulfillment' + type: NestedObject + description: | + The fulfillment to call when the condition is satisfied. At least one of triggerFulfillment and target must be specified. When both are defined, triggerFulfillment is executed first. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'targetPage' + type: String + description: | + The target page to transition to. + Format: projects//locations//agents//flows//pages/. + - name: 'targetFlow' + type: String + description: | + The target flow to transition to. + Format: projects//locations//agents//flows/. + - name: 'eventHandlers' + type: Array + description: | + Handlers associated with the page to handle events such as webhook errors, no match or no input. + item_type: + type: NestedObject + properties: + - name: 'name' + type: String + description: | + The unique identifier of this event handler. + output: true + - name: 'event' + type: String + description: | + The name of the event to handle. + - name: 'triggerFulfillment' + type: NestedObject + description: | + The fulfillment to call when the event occurs. Handling webhook errors with a fulfillment enabled with webhook could cause infinite loop. It is invalid to specify such fulfillment for a handler handling webhooks. + properties: + - name: 'messages' + type: Array + description: | + The list of rich message responses to present to the user. + item_type: + type: NestedObject + properties: + - name: 'channel' + type: String + description: | + The channel which the response is associated with. Clients can specify the channel via QueryParameters.channel, and only associated channel response will be returned. + - name: 'text' + type: NestedObject + description: | + The text response message. + properties: + - name: 'text' + type: Array + description: | + A collection of text responses. + item_type: + type: String + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + # This can be an arbitrary json blob, so we use a string instead of a NestedObject. + output: true + - name: 'payload' + type: String + description: | + A custom, platform-specific payload. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conversationSuccess' + type: NestedObject + description: | + Indicates that the conversation succeeded, i.e., the bot handled the issue that the customer talked to it about. + Dialogflow only uses this to determine which conversations should be counted as successful and doesn't process the metadata in this message in any way. Note that Dialogflow also considers conversations that get to the conversation end page as successful even if they don't return ConversationSuccess. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates that the conversation succeeded. + * In a webhook response when you determine that you handled the customer issue. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'outputAudioText' + type: NestedObject + description: | + A text or ssml response that is preferentially used for TTS output audio synthesis, as described in the comment on the ResponseMessage message. + properties: + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'text' + type: String + description: | + The raw text to be synthesized. + - name: 'ssml' + type: String + description: | + The SSML text to be synthesized. For more information, see SSML. + - name: 'liveAgentHandoff' + type: NestedObject + description: | + Indicates that the conversation should be handed off to a live agent. + Dialogflow only uses this to determine which conversations were handed off to a human agent for measurement purposes. What else to do with this signal is up to you and your handoff procedures. + You may set this, for example: + * In the entryFulfillment of a Page if entering the page indicates something went extremely wrong in the conversation. + * In a webhook response when you determine that the customer issue can only be handled by a human. + properties: + - name: 'metadata' + type: String + description: | + Custom metadata. Dialogflow doesn't impose any structure on this. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_schema.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'playAudio' + type: NestedObject + description: | + Specifies an audio clip to be played by the client as part of the response. + properties: + - name: 'audioUri' + type: String + description: | + URI of the audio clip. Dialogflow does not impose any validation on this value. It is specific to the client that reads it. + required: true + - name: 'allowPlaybackInterruption' + type: Boolean + description: | + Whether the playback of this message can be interrupted by the end user's speech and the client can then starts the next Dialogflow request. + output: true + - name: 'telephonyTransferCall' + type: NestedObject + description: | + Represents the signal that telles the client to transfer the phone call connected to the agent to a third-party endpoint. + properties: + - name: 'phoneNumber' + type: String + description: | + Transfer the call to a phone number in E.164 format. + required: true + - name: 'webhook' + type: String + description: | + The webhook to call. Format: projects//locations//agents//webhooks/. + - name: 'returnPartialResponses' + type: Boolean + description: | + Whether Dialogflow should return currently queued fulfillment response messages in streaming APIs. If a webhook is specified, it happens before Dialogflow invokes webhook. Warning: 1) This flag only affects streaming API. Responses are still queued and returned once in non-streaming API. 2) The flag can be enabled in any fulfillment but only the first 3 partial responses will be returned. You may only want to apply it to fulfillments that have slow webhooks. + - name: 'tag' + type: String + description: | + The tag used by the webhook to identify which fulfillment is being called. This field is required if webhook is specified. + - name: 'setParameterActions' + type: Array + description: | + Set parameter values before executing the webhook. + item_type: + type: NestedObject + properties: + - name: 'parameter' + type: String + description: | + Display name of the parameter. + - name: 'value' + type: String + description: | + The new JSON-encoded value of the parameter. A null value clears the parameter. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'conditionalCases' + type: Array + description: | + Conditional cases for this fulfillment. + item_type: + type: NestedObject + properties: + - name: 'cases' + type: String + description: | + A JSON encoded list of cascading if-else conditions. Cases are mutually exclusive. The first one with a matching condition is selected, all the rest ignored. + See [Case](https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/Fulfillment#case) for the schema. + state_func: 'func(v interface{}) string { s, _ := structure.NormalizeJsonString(v); return s }' + custom_flatten: 'templates/terraform/custom_flatten/go/json_schema.tmpl' + custom_expand: 'templates/terraform/custom_expand/go/json_value.tmpl' + validation: + function: 'validation.StringIsJSON' + - name: 'targetPage' + type: String + description: | + The target page to transition to. + Format: projects//locations//agents//flows//pages/. + - name: 'targetFlow' + type: String + description: | + The target flow to transition to. + Format: projects//locations//agents//flows/. + - name: 'advancedSettings' + type: NestedObject + description: | + Hierarchical advanced settings for this page. The settings exposed at the lower level overrides the settings exposed at the higher level. + Hierarchy: Agent->Flow->Page->Fulfillment/Parameter. + properties: + - name: 'dtmfSettings' + type: NestedObject + description: | + Define behaviors for DTMF (dual tone multi frequency). DTMF settings does not override each other. DTMF settings set at different levels define DTMF detections running in parallel. Exposed at the following levels: + * Agent level + * Flow level + * Page level + * Parameter level + properties: + - name: 'enabled' + type: Boolean + description: | + If true, incoming audio is processed for DTMF (dual tone multi frequency) events. For example, if the caller presses a button on their telephone keypad and DTMF processing is enabled, Dialogflow will detect the event (e.g. a "3" was pressed) in the incoming audio and pass the event to the bot to drive business logic (e.g. when 3 is pressed, return the account balance). + - name: 'maxDigits' + type: Integer + description: | + Max length of DTMF digits. + - name: 'finishDigit' + type: String + description: | + The digit that terminates a DTMF digit sequence. diff --git a/mmv1/products/dialogflowcx/go_SecuritySettings.yaml b/mmv1/products/dialogflowcx/go_SecuritySettings.yaml new file mode 100644 index 000000000000..de4efc950040 --- /dev/null +++ b/mmv1/products/dialogflowcx/go_SecuritySettings.yaml @@ -0,0 +1,166 @@ +# Copyright 2024 Google Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Warning: This is a temporary file, and should not be edited directly +--- +name: 'SecuritySettings' +description: | + Represents the settings related to security issues, such as data redaction and data retention. It may take hours for updates on the settings to propagate to all the related components and take effect. + Multiple security settings can be configured in each location. Each agent can specify the security settings to apply, and each setting can be applied to multiple agents in the same project and location. +references: + guides: + 'Official Documentation': 'https://cloud.google.com/dialogflow/cx/docs' + api: 'https://cloud.google.com/dialogflow/cx/docs/reference/rest/v3/projects.locations.securitySettings' +docs: +id_format: 'projects/{{project}}/locations/{{location}}/securitySettings/{{name}}' +base_url: 'projects/{{project}}/locations/{{location}}/securitySettings' +update_verb: 'PATCH' +update_mask: true +import_format: + - 'projects/{{project}}/locations/{{location}}/securitySettings/{{name}}' +timeouts: + insert_minutes: 40 + update_minutes: 40 + delete_minutes: 20 +custom_code: +examples: + - name: 'dialogflowcx_security_settings_basic' + primary_resource_id: 'basic_security_settings' + vars: + settings_name: 'dialogflowcx-security-settings' + - name: 'dialogflowcx_security_settings_full' + primary_resource_id: 'basic_security_settings' + vars: + inspect_name: 'dialogflowcx-inspect-template' + deidentify_name: 'dialogflowcx-deidentify-template' + settings_name: 'dialogflowcx-security-settings' + bucket_name: 'dialogflowcx-bucket' + test_env_vars: + project: 'PROJECT_NAME' +parameters: +properties: + - name: 'name' + type: String + description: | + The unique identifier of the settings. + Format: projects//locations//securitySettings/. + output: true + custom_flatten: 'templates/terraform/custom_flatten/go/name_from_self_link.tmpl' + - name: 'location' + type: String + description: | + The location these settings are located in. Settings can only be applied to an agent in the same location. + See [Available Regions](https://cloud.google.com/dialogflow/cx/docs/concept/region#avail) for a list of supported locations. + url_param_only: true + required: true + immutable: true + - name: 'displayName' + type: String + description: | + The human-readable name of the security settings, unique within the location. + required: true + - name: 'redactionStrategy' + type: Enum + description: | + Defines how we redact data. If not set, defaults to not redacting. + * REDACT_WITH_SERVICE: Call redaction service to clean up the data to be persisted. + enum_values: + - 'REDACT_WITH_SERVICE' + - name: 'redactionScope' + type: Enum + description: | + Defines what types of data to redact. If not set, defaults to not redacting any kind of data. + * REDACT_DISK_STORAGE: On data to be written to disk or similar devices that are capable of holding data even if power is disconnected. This includes data that are temporarily saved on disk. + enum_values: + - 'REDACT_DISK_STORAGE' + - name: 'inspectTemplate' + type: String + description: | + [DLP](https://cloud.google.com/dlp/docs) inspect template name. Use this template to define inspect base settings. If empty, we use the default DLP inspect config. + Note: inspectTemplate must be located in the same region as the SecuritySettings. + Format: projects//locations//inspectTemplates/